content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Between-Within functions to obtain Denominator degrees of freedom
#'
#' @keywords internal
#'
#' @importFrom stringr str_detect
ddf_BW <- function(object, L){
parameters <- names(object$coefficients)
est_param <- parameters[which(L==1)]
n_obs <- object$dims$N
n_param <- object$dims$p
for (i in 1:length(sys.parents())){
if (exists("transformed_data", envir = parent.frame(n=i), inherits=FALSE)){
# formul <- get("myformul", envir = parent.frame(n=i), inherits=FALSE)
data.obj <- get("transformed_data", envir = parent.frame(n=i), inherits=FALSE)
}
if (exists("input", envir = parent.frame(n=i), inherits=FALSE)){
input.obj <- get("input", envir = parent.frame(n=i), inherits=FALSE)
}
}
n_indiv <- length(unique(data.obj$Subject))
armRef <- input.obj$selectRefArmInter
n_obs_armRef <- nrow(data.obj[which(data.obj$arm==armRef),])
n_param_interac <- length(stringr::str_detect(parameters, ":")[stringr::str_detect(parameters, ":")==TRUE])
n_param_bkg <- length(stringr::str_detect(parameters, "bkg")[stringr::str_detect(parameters, "bkg")==TRUE])
# for inter-arm
if(input.obj$selectModel == "1"){
if(stringr::str_detect(est_param, "bkg")){
#ddf_between
ddf <- n_obs - n_indiv - n_param
} else if(stringr::str_detect(est_param, ":")){
#ddf_within for stim
ddf <- n_obs - n_obs_armRef - n_param_interac
} else{
#ddf_within for stim and arm
ddf <- n_obs_armRef - n_indiv - (n_param - n_param_interac - n_param_bkg)
}
#for intra-arm
} else if(input.obj$selectModel == "2"){
ddf <- n_obs - n_indiv - n_param
}
ddf
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/BW_functions.R
|
#'Toy data to upload in the app.
#'
#'@docType data
#'
#'@keywords data
#'
#'@name ICS_ex
#'
#'@format A tab-separated .txt file
#'
#'@usage data(ICS_ex)
#'
#'@examples
#'if(interactive()){
#' set.seed(1382019)
#' nsubj <- 20
#' ntp <- 3
#' nstim <- 3
#' narm <- 3
#' subj <- rep(rep(rep(1:nsubj, each = ntp), times = nstim), times = narm)
#' stim <- rep(rep(c("NS", "S1", "S2"), each = nsubj*ntp), times = narm)
#' tp <- rep(rep(c("D0", "D1", "D3"), times=nsubj*nstim), times = narm)
#' a <- rep(c("Placebo", "A2", "A3"), each = nsubj*nstim*ntp)
#' y1 <- round(abs(rnorm(n=nsubj*nstim*ntp*narm,m = 0.03, sd=0.06)) +
#' (stim=="S2" & a == "A2" & tp == "D1")*abs(rnorm(n=nsubj*nstim*ntp*narm, m = 0.05, sd=0.01)), 4)
#' y2 <- round(abs(rnorm(n=nsubj*nstim*ntp*narm,m = 0.03, sd=0.06)) +
#' (stim=="S1" & a =="A3" & tp == "D3")*abs(rnorm(n=nsubj*nstim*ntp*narm, m = 0.1, sd=0.02)), 4)
#' ICS_ex <- cbind.data.frame("Subject" = subj, "StimulationPool" = stim, "TimePoint" = tp,
#' "Arm" = a, "Response1" = y1, "Response2" = y2)
#' #View(ICS_ex)
#' write.table(ICS_ex, file="Documents/GitHub/vici/data/ICS_ex.txt", sep="\t",
#' row.names = FALSE, quote = FALSE)
#'}
#'
#'
NULL
|
/scratch/gouwar.j/cran-all/cranData/vici/R/ICS_ex.R
|
#' @import shiny
#' @import ggpubr
#' @importFrom nlme gls varIdent
#' @importFrom utils read.csv write.table
#' @importFrom stats coef relevel as.formula model.matrix
#' @importFrom tidyr spread
#' @importFrom cowplot plot_grid
app_server <- function(input, output, session) {
# initialize everything ----
output$mod <- reactive(NULL)
output$mod_display <- reactive(FALSE)
output$res_sentence <- reactive(NULL)
output$res_error <- reactive(NULL)
output$res_lik <- reactive(NULL)
output$heatmap <- reactive(NULL)
output$boxplot <- reactive(NULL)
output$downloadHM <- reactive(NULL)
output$downloadBP <- reactive(NULL)
output$res_var <- reactive(NULL)
output$armisfactor <- reactive(TRUE)
output$arm2isfactor <- reactive(TRUE)
output$timeisfactor <- reactive(TRUE)
output$time2isfactor <- reactive(TRUE)
output$stimisfactor <- reactive(TRUE)
output$warningarmisfactor <- reactive(NULL)
output$warningarm2isfactor <- reactive(NULL)
output$warningstimisfactor <- reactive(NULL)
output$warningtimeisfactor <- reactive(NULL)
output$warningtime2isfactor <- reactive(NULL)
outputOptions(output, "mod_display", suspendWhenHidden = FALSE)
outputOptions(output, "warningarmisfactor", suspendWhenHidden = FALSE)
outputOptions(output, "warningarm2isfactor", suspendWhenHidden = FALSE)
outputOptions(output, "warningstimisfactor", suspendWhenHidden = FALSE)
outputOptions(output, "armisfactor", suspendWhenHidden = FALSE)
outputOptions(output, "arm2isfactor", suspendWhenHidden = FALSE)
outputOptions(output, "stimisfactor", suspendWhenHidden = FALSE)
outputOptions(output, "res_error", suspendWhenHidden = FALSE)
#outputOptions(output, "res_tab", suspendWhenHidden = FALSE)
outputOptions(output, "res_lik", suspendWhenHidden = FALSE)
outputOptions(output, "res_var", suspendWhenHidden = FALSE)
outputOptions(output, "heatmap", suspendWhenHidden = FALSE)
outputOptions(output, "downloadHM", suspendWhenHidden = FALSE)
outputOptions(output, "downloadBP", suspendWhenHidden = FALSE)
data <- reactiveValues()
data$fact_stim_OK <- TRUE
data$fact_arm_OK <- TRUE
data$fact_time_OK <- TRUE
data$fact_time2_OK <- TRUE
session$userData$res_data <- NULL
output$downloadRes <- downloadHandler(
filename = "ResVICI.txt",
content = function(file){
utils::write.table(session$userData$res_data,file,row.names = TRUE, sep = "\t", quote = FALSE)
}
)
#Module return input so sub module can access it
inpt <- callModule(module = mod_settings_pan_server, id = "settings_pan_ui_1",data = data,parent = session)
callModule(module = mod_modelfit_server, id = "modelfit_ui_1",datas = data,parent = inpt,origin = session)
observeEvent({
input$selectModel;
input$selectStim;
input$selectRefStim;
input$selectArmInter;
input$selectArmIntra;
input$selectRefArmInter ;
input$selectRefArmIntra;
input$selectTimeInter ;
input$selectTimeIntra;
input$selectRefTimeInter ;
input$selectRefTimeIntra}, {
#appelé data load
# write LaTeX model ----
if(input$selectModel == 1 & input$selectRefStim != '' & input$selectRefArmInter != '' & input$selectStim !='' &
input$selectArmInter %in% colnames(data$df) & input$selectStim %in% colnames(data$df)){
output$mod_display <- reactive(TRUE)
arm_coefs <- NULL
for(a in levels(data$df[, input$selectArmInter])){
if(a != input$selectRefArmInter){
arm_coefs <- paste0(arm_coefs, '+ \\beta_{', a,'}^{', input$selectRefStim, '}', a,
'_i')
}
}
statmodel <- paste0('$$y_i^{', input$selectRefStim, '} = \\beta_0^{', input$selectRefStim,
'}', arm_coefs, '+ \\varepsilon_i^{', input$selectRefStim, '}$$')
for(s in levels(data$df[, input$selectStim])){
if(s != input$selectRefStim){
arm_coefs <- NULL
for(a in levels(data$df[, input$selectArmInter])){
if(a != input$selectRefArmInter){
arm_coefs <- paste0(arm_coefs, '+ \\beta_{', a,'}^{', s, '}', a,
'_i')
}
}
statmodel <- paste0(statmodel, '$$y_i^{', s, '} = \\beta_0^{', s, '} ',
arm_coefs, '+ \\beta_{', input$selectRefStim, '}^{', s, '} y^{',
input$selectRefStim, '}_i + \\varepsilon_i^{', s, '}$$'
)
}
}
output$mod <- renderUI({
withMathJax(statmodel)
})
}else if(input$selectModel == 2 & input$selectRefStim != '' & input$selectRefTimeIntra != '' & input$selectStim !='' &
input$selectTimeIntra %in% colnames(data$df) & input$selectStim %in% colnames(data$df)) {
output$mod_display <- reactive(TRUE)
statmodel <- NULL
for(t in levels(data$df[, input$selectTimeIntra])){
if(t != input$selectRefTimeIntra){
statmodel <- paste0(statmodel, '$$y_{diff\\,',t ,'\\, _i}^{', input$selectRefStim, '} = \\beta_{0\\,',t ,'}^{', input$selectRefStim,
'} ', '+ \\varepsilon_{',t ,'\\, _i}^{', input$selectRefStim, '}$$')
}
}
for(s in levels(data$df[, input$selectStim])){
if(s != input$selectRefStim){
for(t in levels(data$df[, input$selectTimeIntra])){
if(t != input$selectRefTimeIntra){
statmodel <- paste0(statmodel, '$$y_{diff\\,',t ,'\\, _i}^{', s, '} = \\beta_{0\\,',t ,'}^{', s,
'} + \\beta_{', input$selectRefStim, '\\,',t ,'}^{', s, '} \\,y^{',
input$selectRefStim, '}_{diff\\,',t ,'\\, _i} + \\varepsilon_{',t ,'\\, _i}^{', s, '}$$'
)
}
}
}
}
diffdef <- paste0('where \\(y_{diff\\,\\{\\textsf{t}\\}\\, _i}^{\\{\\textsf{s}\\}} = y_i^{\\{\\textsf{s}\\}}(\\{\\textsf{t}\\}) - y_i^{\\{\\textsf{s}\\}}(',
input$selectRefTimeIntra, ')\\)'
)
output$mod <- renderUI({
tagList(
withMathJax(statmodel),
div(""),
div(diffdef)
)
})
}else{
output$mod <- reactive(NULL)
output$mod_display <- reactive(FALSE)
}
clean_output(output)
})
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/app_server.R
|
#' @import shiny
app_ui <- function() {
fluidPage(
tags$head(tags$link(rel="shortcut icon", href="www/favicon.ico")),
titlePanel(div(img(src='www/logo.svg', align="right", height="120"), "VICI: accurate estimation of Vaccine Induced Cellular Immunogenicity with bivariate modeling"),
windowTitle = "VICI"),
h6("v0.7.3"),
h5(),
shiny::actionLink(inputId='github_code', label="Source code",
icon = icon("github"),
onclick ="window.open('https://github.com/sistm/vici', '_blank')"),
shiny::actionLink(inputId='github_code', label="CRAN package",
icon = icon("r-project"),
onclick ="window.open('https://CRAN.R-project.org/package=vici', '_blank')",
style='padding:10px;'),
h5(),
sidebarLayout(
mod_settings_pan_ui("settings_pan_ui_1"),
mainPanel(tabsetPanel(type = "tabs", id="inTabset",
tabPanel("Results", value="resTab",
conditionalPanel(
condition = "output.heatmap != null | output.res_error != null",
tags$hr(),
h3("Analysis results"),
conditionalPanel(
condition = "output.res_error != null",
verbatimTextOutput("res_error")
),
conditionalPanel(
condition = "output.res_lik != null",
wellPanel(
fluidRow(
plotOutput("heatmap"),
h6(""),
downloadButton("downloadHM", label = "Download heatmap [PNG]",
class = "btn-primary")
)
),
h2(""),
uiOutput('boxplotsAndTabs'),
downloadButton("downloadRes", label = "Download Results", class = "btn-primary")
)
)
),
# Output: Data file ----
tabPanel("Data view", value="dataTab",
conditionalPanel(
condition = "output.table2render == null",
helpText("Please input some data")
),
h5(""),
DT::dataTableOutput("table2render")
),
tabPanel("Additional Information", value="infoTab",
conditionalPanel(
condition = "output.mod_display",
withMathJax(),
h3("Statistical model fitted for each ICS response:"),
uiOutput('mod')
),
h2(),
tags$hr(),
conditionalPanel(
condition = "output.res_lik != null",
h3("Additional estimates:"),
wellPanel(htmlOutput("res_nparam")),
h5(""),
wellPanel(tableOutput("res_lik")),
h5(""),
wellPanel(h4("Estimated variances"),
tableOutput("res_var"))
)
)
)
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/app_ui.R
|
#'Plotting function for displaying boxplots and associated p-values
#'
#'Internal function for displaying significance boxplots
#'
#'@param data_df a \code{data.frame}.
#'@param pval_2plot a \code{data.frame} with the p-values to display.
#'@param response_name a character string indicating the name of the response.
#'@param input internal input from UI.
#'@param inter a logical flag indicating whether we are in the interarm setting or not.
#'Default is \code{TRUE}.
#'@param baseline baseline value used in title when \code{inter} is \code{FALSE}.
#'Default is \code{NULL}.
#'@param fill a logical flag indicating if the boxplot is filled
#'Default if \code{FALSE}
#'
#'@return a \code{ggpubr} plot object
#'
#'@author Boris Hejblum
#'
#'@keywords internal
#'
#'@import ggplot2
#'@import ggpubr
#'@import RColorBrewer
boxplot_VICI <- function(data_df, pval_2plot, response_name, input, inter=TRUE, baseline=NULL,fill=FALSE){
if(!is.numeric(data_df$response)){
data_df$response <- as.numeric(data_df$response)
}
p <- NULL
if(inter){
data_df$arm <- relevel(data_df$arm, ref=input$selectRefArmInter)
suppressWarnings(
if(input$jiter == "None"){
p <- ggboxplot(na.omit(data_df), x="stim", y="response", color= "arm",
palette = "RdGy", fill = "stim", alpha=0.3)
}else{
p <- ggboxplot(na.omit(data_df), x="stim", y="response", color= "arm", palette = "RdGy",fill = "stim",
alpha=0.3, add="jitter", shape = as.numeric(input$jiter))
}
)
}else{
data_df$time <- relevel(data_df$time, ref=input$selectRefTimeIntra)
if(input$jiter == "None"){
p <- ggboxplot(na.omit(data_df), x="stim", y="response", color= "time", palette = "RdGy", alpha=0.3)
}else{
p <- ggboxplot(na.omit(data_df), x="stim", y="response", color= "time", palette = "RdGy",
alpha=0.3, add="jitter", shape = as.numeric(input$jiter))
}
}
p <- p + theme_grey() +
theme(panel.grid.major.x = element_blank()) +
scale_color_brewer(palette = input$color) +
stat_pvalue_manual(data = pval_2plot, label = "pvalue_format", tip.length = 0.025) +
ylab(paste0("Response ", response_name)) +
xlab("Stimulation") +
ggtitle(paste0("Arm effect on ", response_name),
subtitle = "p-values taking into account background response levels through bivariate modeling") +
labs(caption = "made with VICI")
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/boxplot_VICI.R
|
clean_output <- function(output){
output$res_tab <- reactive(NULL)
output$res_lik <- reactive(NULL)
output$heatmap <- reactive(NULL)
output$boxplot <- reactive(NULL)
output$downloadHM <- reactive(NULL)
output$downloadBP <- reactive(NULL)
output$res_var <- reactive(NULL)
output$res_error <- reactive(NULL)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/clean_output.R
|
#' Functions to obtain coefficient, degree of freedom, p-value
#'
#' This function allows to calculate the different approximations of degrees of
#' freedom and returns the table of results in the app.
#'
#' @param model a \code{gls} model.
#' @param ddf degrees of freedom approximation.
#'
#' @return a matrix containing coefficient, degrees of freedom and p-value
#' @keywords internal
#'
#' @importFrom stats vcov sigma pt
#' @importFrom nlme glsEstimate coef<-
#' @importFrom numDeriv hessian jacobian
get_coefmat_gls <- function (model, ddf = c("Satterthwaite", "Kenward-Roger", "Between-Within")) {
ddf <- match.arg(ddf)
p <- length(model$coefficients)
if (p < 1){
tab <- as.matrix(contest1D(model, L = numeric(0L), ddf = ddf))
}else{
Lmat <- diag(p)
tab <- rbindall(lapply(1:p, function(i) contest1D(model, L = Lmat[i, ], ddf = ddf)))
rownames(tab) <- names(model$coefficients)
as.matrix(tab)
}
return(tab)
}
#from lmerTest:::contest1D.lmerModLmerTest
contest1D <- function (model, L, rhs = 0, ddf = c("Satterthwaite", "Kenward-Roger", "Between-Within"),
confint = FALSE, level = 0.95, ...){
mk_ttable <- function(estimate, se, ddf) {
tstat <- (estimate - rhs)/se
pvalue <- 2 * pt(abs(tstat), df = ddf, lower.tail = FALSE)
if (confint) {
ci <- waldCI(estimate, se, ddf, level = level)
data.frame(Estimate = estimate, `Std. Error` = se,
df = ddf, `t value` = tstat, lower = unname(ci[,
"lower"]), upper = unname(ci[, "upper"]), `Pr(>|t|)` = pvalue,
check.names = FALSE)
}
else data.frame(Estimate = estimate, `Std. Error` = se,
df = ddf, `t value` = tstat, `Pr(>|t|)` = pvalue,
check.names = FALSE)
}
method <- match.arg(ddf)
if (is.matrix(L))
L <- drop(L)
stopifnot(is.numeric(L), length(L) == length(model$coefficients),
is.numeric(rhs), length(rhs) == 1L)
if (length(L) == 0L) {
o <- numeric(0L)
return(mk_ttable(o, o, o))
}
if (any(is.na(L)))
return(mk_ttable(NA_real_, NA_real_, NA_real_))
estimate <- sum(L * model$coefficients)
var_con <- sum(L * (model$varBeta %*% L))
if(method == "Between-Within"){
return(mk_ttable(estimate = estimate, se = sqrt(var_con),
ddf = ddf_BW(model, L)))
}
#To have objects of compute_jaclist function
jaclist <- compute_jaclist(object=model, tol=1e-14)
grad_var_con <- vapply(jaclist$jacobian_list, function(x) qform(L, x), numeric(1L))
satt_denom <- qform(grad_var_con, jaclist$vcov_varpar)
ddf <- drop(2 * var_con^2/satt_denom)
mk_ttable(estimate = estimate, se = sqrt(var_con), ddf = ddf)
}
qform <- function (L, V){
sum(L * (V %*% L))
}
# use glsEstimate and to compute the FULL deviance
# adapted from pbkrtest:::devfun_vp
devfun_gls <- function(varpar, gls_obj){
nvarpar <- length(varpar)
coef(gls_obj$modelStruct) <- varpar[-nvarpar]
attr(gls_obj$modelStruct, "conLin")$sigma <- varpar[nvarpar]
contr <- gls_obj$call$control
if(is.null(contr)){
contr <- list(singular.ok = FALSE)
}
#
est <- glsEstimate(object = gls_obj$modelStruct, control = contr)
return(as.numeric(-2*est$logLik))
}
# mix above with pbkrtest:::get_covbeta
varBetafun_gls <- function(varpar, gls_obj){
REML <- gls_obj$dims$REML
nvarpar <- length(varpar)
coef(gls_obj$modelStruct) <- varpar[-nvarpar]
N <- gls_obj$dims$N
p <- gls_obj$dims$p
attr(gls_obj$modelStruct, "conLin")$sigma <- varpar[nvarpar]
contr <- gls_obj$call$control
if(is.null(contr)){
contr <- list(singular.ok = FALSE)
}
est <- glsEstimate(object = gls_obj$modelStruct, control = contr)
varBeta <- crossprod(est$sigma * est$varBeta * sqrt((N - REML * p)/(N - p)))
return(varBeta)
}
#from pbkrtest:::compute_auxillary
compute_jaclist <- function (object, tol = 1e-06){
if (!inherits(object, "gls"))
stop("'model' not an 'gls'")
out <- list(sigma = NULL, vcov_beta = NULL, vcov_varpar = NULL,
jacobian_list = NULL)
out$sigma <- sigma(object)
out$vcov_beta <- as.matrix(vcov(object))
varpar_opt <- c(coef(object$modelStruct), "sigma" = sigma(object))
h <- hessian(func = devfun_gls, x = varpar_opt,
gls_obj = object)
eig_h <- eigen(h, symmetric = TRUE)
evals <- eig_h$values
neg <- evals < -tol
pos <- evals > tol
zero <- evals > -tol & evals < tol
if (sum(neg) > 0) {
evals_num <- paste(sprintf("%1.1e", evals[neg]), collapse = " ")
warning(sprintf("Model failed to converge with %d negative eigenvalue(s): %s",
sum(neg), evals_num), call. = FALSE)
}
if (sum(zero) > 0) {
evals_num <- paste(sprintf("%1.1e", evals[zero]), collapse = " ")
warning(sprintf("Model may not have converged with %d eigenvalue(s) close to zero: %s",
sum(zero), evals_num))
}
pos <- eig_h$values > tol
q <- sum(pos)
h_inv <- with(eig_h, {
vectors[, pos, drop = FALSE] %*% diag(1/values[pos],
nrow = q) %*% t(vectors[, pos, drop = FALSE])
})
out$vcov_varpar <- 2 * h_inv
jac <- jacobian(func = varBetafun_gls, x = varpar_opt,
gls_obj = object)
out$jacobian_list <- lapply(1:ncol(jac), function(i){array(jac[, i], dim = rep(length(coef(object)), 2))})
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/gls_ownfunctions.R
|
#'A heatmap function for displaying
#'
#'Internal function for displaying significance heatmap when multiple conditions are tested
#'
#'@param res_2plot a \code{data.frame}
#'
#'@param inter a logical flag indicating whether we are in the interarm setting or not.
#'Default is \code{TRUE}.
#'
#'@return a \code{ggplot2} plot object
#'
#'@author Boris Hejblum
#'
#'@keywords internal
#'
#'@import ggplot2
heatmap_vici <- function(res_2plot, inter=TRUE, baseline=NULL){
if(inter){
p <- ggplot(data = res_2plot) +
geom_tile(aes_string(x="Stimulation", y="response", fill="pvalue"), color="white") +
theme_minimal() +
theme(panel.grid = element_blank()) +
ggtitle("Arm effect on ICS response",
subtitle = "taking into account background response levels") +
labs(caption = "made with VICI") +
ylab("ICS response") +
scale_fill_manual(values = c("red4", "red3", "coral2", "grey80", "grey90", "grey95"),
breaks = c("[0,0.001)", "[0.001,0.01)", "[0.01,0.05)", "[0.05,0.1)", "[0.1,0.5)", "[0.5,1)"),
labels = c("[0,0.001[", "[0.001,0.01[", "[0.01,0.05[", "[0.05,0.1[", "[0.1,0.5[", "[0.5,1]"),
name="P-value",
limits = c("[0,0.001)", "[0.001,0.01)", "[0.01,0.05)", "[0.05,0.1)", "[0.1,0.5)", "[0.5,1)")
) + facet_wrap(c("Arm"), labeller = "label_both")
}else{
p <- ggplot(data = res_2plot) +
geom_tile(aes_string(x="Stimulation", y="response", fill="pvalue"), color="white") +
theme_minimal() +
theme(panel.grid = element_blank()) +
ggtitle(paste0("Intra-arm vaccine effect on ICS responses compared to baseline ", baseline),
subtitle = "taking into account background response levels") +
labs(caption = "made with VICI") +
ylab("ICS response") +
scale_fill_manual(values = c("red4", "red3", "coral2", "grey80", "grey90", "grey95"),
breaks = c("[0,0.001)", "[0.001,0.01)", "[0.01,0.05)", "[0.05,0.1)", "[0.1,0.5)", "[0.5,1)"),
labels = c("[0,0.001[", "[0.001,0.01[", "[0.01,0.05[", "[0.05,0.1[", "[0.1,0.5[", "[0.5,1]"),
name="P-value",
limits = c("[0,0.001)", "[0.001,0.01)", "[0.01,0.05)", "[0.05,0.1)", "[0.1,0.5)", "[0.5,1)")
) + facet_wrap(c("Timepoint"), labeller = "label_both")
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/heatmap_VICI.R
|
#'Plotting function for displaying histograms and associated p-values
#'
#'Internal function for displaying significance histograms
#'
#'@param data_df a \code{data.frame}.
#'@param pval_2plot a \code{data.frame} with the p-values to display.
#'@param response_name a character string indicating the name of the response.
#'@param input internal input from UI.
#'@param inter a logical flag indicating whether we are in the interarm setting or not.
#'Default is \code{TRUE}.
#'@param baseline baseline value used in title when \code{inter} is \code{FALSE}.
#'Default is \code{NULL}.
#'
#'@return a \code{ggpubr} plot object
#'
#'@author Clément NERESTAN
#'
#'@keywords internal
#'
#'@import ggplot2
#'@import ggpubr
#'@import RColorBrewer
histogram_VICI <- function(data_df, pval_2plot, response_name, input, inter=TRUE, baseline=NULL){
#browser()
p <- NULL
if(inter){
#browser()
data_df$arm <- relevel(data_df$arm, ref=input$selectRefArmInter)
#browser()
#suppressWarnings(
#browser(),
p <-
ggbarplot(na.omit(data_df), x="stim",y="response",
color= "arm", palette = "RdGy",#c("Red","Blue","Black"),#"RdBu",
#fill="white",#"arm",
alpha=0.3,
sort.by.groups = TRUE) +
#theme_bw() +
# theme_grey() +
# #theme(panel.grid.major.x = element_blank()) +
# #scale_colour_manual(values = CPCOLS) +
scale_color_brewer(palette = input$color) +#"RdGy") +
# #scale_fill_viridis_d("Arm: ") +
# #scale_color_viridis_d("Arm: ") +
stat_pvalue_manual(data = pval_2plot, label = "pvalue_format",
tip.length = 0.025) +
#ylab(paste0("Response ", response_name)) +
xlab("Stimulation") +
ggtitle(paste0("Arm effect on ", response_name),
subtitle = "p-values taking into account background response levels through bivariate modeling") +
labs(caption = "made with VICI")
#)
}else{
data_df$time <- relevel(data_df$time, ref=input$selectRefTimeIntra)
#browser()
suppressWarnings(
p <-
ggbarplot(na.omit(data_df), x="stim", y="response",
color="time",# palette = "RdGy",#c("Red","Blue","Black"),#"RdBu",
#fill="white",#"time",
alpha=0.3,
sort.by.groups = TRUE) +
# #theme_bw() +
# theme_grey() +
# #scale_colour_manual(values = CPCOLS) +
scale_color_brewer(palette = input$color)+#"RdGy") +
# #scale_fill_viridis_d("Time-point: ") +
# #scale_color_viridis_d("Time-point: ") +
stat_pvalue_manual(data = pval_2plot, label = "pvalue_format",
tip.length = 0.025) +
# ylab(paste0("Response ", response_name)) +
xlab("Stimulation") +
ggtitle(paste0("Intra-arm vaccine effect on ", response_name, " compared to baseline ", baseline),
subtitle = "p-values taking into account background response levels through bivariate modeling") +
labs(caption = "made with VICI")
)
}
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/histogram_VICI.R
|
#'Fitting GLS For Inter-Arm Setting
#'
#' @keywords internal
#' @importFrom stats na.omit
#' @importFrom nlme varIdent
interarm_fit <- function(transformed_data, input,resp){
res_tab <- NULL
res_lik <- NULL
res_error <- NULL
bkg_inter_mat <- model.matrix(data = stats::na.omit(transformed_data), ~ -1 + stim:bkg)[, -1, drop=FALSE]
colnames(bkg_inter_mat) <- gsub(":", "_", colnames(bkg_inter_mat), fixed = TRUE)
transformed_data <- cbind.data.frame(stats::na.omit(transformed_data), bkg_inter_mat)
myformul <- as.formula(paste0("response ~ -1 + stim + stim:arm", "+", paste(colnames(bkg_inter_mat), collapse = " + ")))
mgls <- mygls(myformul,
data = transformed_data,
weights = varIdent(value = c("1" = 1), form = ~ 1 | stim),
method="REML", na.action = stats::na.omit)
if(!inherits(mgls, "try-error")){
# getting coef
s_mgls <- summary(mgls)
res_lik <- mgls$logLik
if(input$ddf == "By default"){
df_residual <- mgls$dims$N - mgls$dims$p
res_tab <- data.frame(cbind(s_mgls$tTable[, 1:2], rep(df_residual, nrow(s_mgls$tTable)), s_mgls$tTable[, 4]))
}else res_tab <- get_coefmat_gls(mgls, ddf=input$ddf)[, c(1,2,3,5)]
colnames(res_tab) <- c("Estimate", "Standard error", "ddf", "p-value")
sigmas <- stats::coef(mgls$modelStruct$varStruct, uncons = FALSE, allCoef = TRUE) * mgls$sigma
res_nparam <- renderText({paste0("<b>Number of estimated model parameters:</b> ", nrow(res_tab) + length(sigmas))})
# pretty coef names
rownames(res_tab)[1] <- paste0(as.character(resp), " : Average response in reference stimulation ", input$selectRefStim,
" in reference arm ", input$selectRefArmInter)
nstim <- nlevels(transformed_data$stim)
for(i in 1:(nstim-1)){
rownames(res_tab)[1 + i] <- paste0(as.character(resp), " : Average response in stimulation ", levels(transformed_data$stim)[1 + i],
" in reference arm ", input$selectRefArmInter)
}
for(i in 1:(nstim-1)){
rownames(res_tab)[nstim + i] <- paste0(as.character(resp), " : Effect of reference stimulation ", input$selectRefStim, " on response in stimulation ",
levels(transformed_data$stim)[i + 1])
}
narm <- nlevels(transformed_data$arm)
for(i in 1:(narm-1)){
rownames(res_tab)[nstim*2-1 + 1 + (i-1)*nstim] <- paste0(as.character(resp), " : Effect of arm ", levels(transformed_data$arm)[1 + i],
" on response in reference stimulation ", input$selectRefStim)
for(j in 1:(nstim-1)){
rownames(res_tab)[nstim*2-1 + nstim*(i-1) + j + 1] <- paste0(as.character(resp), " : Effect of arm ", levels(transformed_data$arm)[1 + i],
" on response in stimulation ", levels(transformed_data$stim)[1 + j])
}
}
}else{
res_error <- paste0("Model was not able to run with the following error message:\n\n", mgls[1],
"\nMake sure analysis parameters are correct")
}
return(list("mgls" = mgls,
"res_error" = res_error,
"res_tab" = res_tab,
"res_lik" = res_lik))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/interarm_fit.R
|
interarm_postprocessres <- function(data_df, fit_res){
m2resloglik <- -2*fit_res$mgls$logLik
s_mgls <- summary(fit_res$mgls)
aic <- s_mgls$AIC
var_res <- fit_res$mgls$sigma^2
sigmas <- stats::coef(fit_res$mgls$modelStruct$varStruct, uncons = FALSE, allCoef = TRUE) * fit_res$mgls$sigma
vars <- t(cbind(sigmas^2))
colnames(vars) <- levels(data_df$stim)
rownames(vars) <- c("Variance")
# model output ----
res_lik <- t(c("AIC" = aic, "-2 Res. logLikelihood" = m2resloglik))
res_2plot <- fit_res$res_tab[grep("Effect of arm", rownames(fit_res$res_tab)), ]
rnames<-rownames(res_2plot)
metainfo_2plot <- do.call(rbind, strsplit(gsub(" reference", "", gsub(" stimulation", "",
sapply(strsplit(rownames(res_2plot), "Effect of arm "), "[", 2))),
" on response in "))
res_2plot <- cbind.data.frame("Arm" = metainfo_2plot[, 1],
"Stimulation" = metainfo_2plot[, 2],
"pvalue" = res_2plot[, "p-value"])
rownames(res_2plot)<-rnames
pval_2plot <- make_nice_pvals(res_2plot, data_df, auxvar = "arm")
return(list(
"vars" = vars,
"pval_2plot" = pval_2plot,
"res_lik" = res_lik,
"res_2plot" = res_2plot,
"pval_2plot" = pval_2plot
))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/interarm_postprocessres.R
|
#'Fitting GLS For Intra-Arm Setting
#'
#' @keywords internal
#' @importFrom stats na.omit
#' @importFrom nlme varIdent
intraarm_fit <- function(transformed_data, tested_time, input,resp){
res_tab <- NULL
res_lik <- NULL
res_error <- NULL
bkg_inter_mat <- model.matrix(data = stats::na.omit(transformed_data), ~ -1 + stim:bkg)[, -1, drop=FALSE]
colnames(bkg_inter_mat) <- gsub(":", "_", colnames(bkg_inter_mat), fixed = TRUE)
colnames(bkg_inter_mat) <- gsub(" ","",colnames(bkg_inter_mat), fixed = TRUE)
transformed_data <- cbind.data.frame(stats::na.omit(transformed_data), bkg_inter_mat)
myformul <- as.formula(paste0("response ~ -1 + stim", "+", paste(colnames(bkg_inter_mat), collapse = " + ")))
mgls <- mygls(myformul,
data = transformed_data,
# correlation = nlme::corCompSymm(form= ~ 1 | stim),
weights = varIdent(value = c("1" = 1), form = ~ 1 | stim),
method="REML", na.action = stats::na.omit
)
if(!inherits(mgls, "try-error")){
# getting coef
s_mgls <- summary(mgls)
res_lik <- mgls$logLik
if(input$ddf == "By default"){
df_residual <- mgls$dims$N - mgls$dims$p
res_tab <- data.frame(cbind(s_mgls$tTable[, 1:2], rep(df_residual, nrow(s_mgls$tTable)), s_mgls$tTable[, 4]))
}else res_tab <- get_coefmat_gls(mgls, ddf=input$ddf)[, c(1,2,3,5)]
colnames(res_tab) <- c("Estimate", "Standard error", "ddf", "p-value")
sigmas <- stats::coef(mgls$modelStruct$varStruct, uncons = FALSE, allCoef = TRUE) * mgls$sigma
res_nparam <- renderText({paste0("<b>Number of estimated model parameters:</b> ", nrow(res_tab) + length(sigmas))})
# pretty coef names
rownames(res_tab)[1] <- paste0(as.character(resp), " : Vaccine effect on response in reference stimulation ", input$selectRefStim,
" at ", tested_time, " compared to baseline ", input$selectRefTimeIntra)
nstim <- nlevels(transformed_data$stim)
for(i in 1:(nstim-1)){
rownames(res_tab)[1 + i] <- paste0(as.character(resp), " : Vaccine effect on response in stimulation ", levels(transformed_data$stim)[1 + i],
" at ", tested_time, " compared to baseline ", input$selectRefTimeIntra)
rownames(res_tab)[nstim + i] <- paste0(as.character(resp), " : Effect of reference stimulation ", input$selectRefStim,
" on response in stimulation ", levels(transformed_data$stim)[1 + i],
" at ", tested_time, " compared to baseline ", input$selectRefTimeIntra)
}
}else{
res_error <- paste0("Model was not able to run with the following error message:\n\n", mgls[1],
"\nMake sure analysis parameters are correct")
}
return(list("mgls" = mgls,
"res_error" = res_error,
"res_tab" = res_tab,
"res_lik" = res_lik))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/intraarm_fit.R
|
intraarm_postprocessres <- function(data_df, fit_res){
m2resloglik <- sapply(fit_res, function(x){-2*x$mgls$logLik})
s_mgls <- lapply(fit_res, function(x){summary(x$mgls)})
aic <- sapply(s_mgls, "[[", "AIC")
var_res <- sapply(fit_res, function(x){x$mgls$sigma^2})
sigmas <- sapply(fit_res, function(x){stats::coef(x$mgls$modelStruct$varStruct, uncons = FALSE, allCoef = TRUE) * x$mgls$sigma})
vars <- t(cbind(sigmas^2))
colnames(vars) <- levels(data_df$stim)
# model output ----
res_lik <- cbind("AIC" = aic, "-2 Res. logLikelihood" = m2resloglik)
res_2plot <- lapply(fit_res, function(x){x$res_tab[grep("Vaccine effect", rownames(x$res_tab)), ]})
res_2plot <- lapply(res_2plot, function(y){
temp <- cbind.data.frame(do.call(rbind, lapply(strsplit(gsub("on response in stimulation ", "",
sapply(strsplit(rownames(y), "Vaccine effect "), "[", 2)),
" at "),
function(x){c(x[1], strsplit(x[2], " compared to baseline ")[[1]][1])})),
"pvalue" = y[, "p-value"])
rownames(temp) <- NULL
colnames(temp)[1:2] <- c("Stimulation", "Timepoint")
return(temp)
})
pval_2plot <- make_nice_pvals(do.call(rbind.data.frame, res_2plot), data_df, auxvar = "time")
#maybe TODO compute group2 correctly when more than 2 Timepoints
return(list(
"vars" = vars,
"pval_2plot" = pval_2plot,
"res_lik" = res_lik,
"res_2plot" = res_2plot,
"pval_2plot" = pval_2plot
))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/intraarm_postprocessres.R
|
make_nice_pvals <- function(res_2plot, data_df, auxvar = "arm"){
data_df$stim <- as.factor(as.numeric(data_df$stim))
pval_2plot <- res_2plot
naux <- nlevels(data_df[, auxvar])
nstim <- nlevels(data_df$stim)
if(!is.numeric(data_df$response)){
data_df$response <- as.numeric(data_df$response)
}
pval_2plot$y.position <- as.vector(by(data_df$response, INDICES = data_df$stim, FUN = max, na.rm=TRUE)) +
0.05*max(data_df$response, na.rm = TRUE)
pval_2plot$group1 <- 1:nlevels(data_df$stim) - (naux-1)*0.4/naux
pval_2plot$group2 <- pval_2plot$group1
for(i in 2:(naux)){
pval_2plot$group2[(i-2)*(nstim) +1:nstim] <- pval_2plot$group1[(i-2)*(nstim) +1:nstim] + 2*(i-1)*0.4/naux
if(i>2){
pval_2plot$y.position[(i-2)*(nstim) +1:nstim] <- pval_2plot$y.position[(i-2)*(nstim) +1:nstim] + (i-2)*0.1*max(data_df$response, na.rm = TRUE)
}
}
pval_2plot$pvalue_format <- formatC(pval_2plot$pvalue, format = "e", digits=2)
if(length(pval_2plot$pvalue > 0.01) > 0){
pval_2plot$pvalue_format[pval_2plot$pvalue > 0.01] <- formatC(pval_2plot$pvalue[pval_2plot$pvalue > 0.01], digits=3)
}
if(length(pval_2plot$pvalue > 0.05) > 0){
pval_2plot$pvalue_format[pval_2plot$pvalue > 0.05] <- paste0("NS (p-value = ", formatC(pval_2plot$pvalue[pval_2plot$pvalue > 0.05], digits=3), ")")
}
if(length(pval_2plot$pvalue < 0.05) > 0){
pval_2plot$pvalue_format[pval_2plot$pvalue < 0.05] <- paste0("* (p-value = ", pval_2plot$pvalue_format[pval_2plot$pvalue < 0.05], ")")
}
if(length(pval_2plot$pvalue < 0.01) > 0){
pval_2plot$pvalue_format[pval_2plot$pvalue < 0.01] <- paste0("*", pval_2plot$pvalue_format[pval_2plot$pvalue < 0.01])
}
if(length(pval_2plot$pvalue < 0.001) > 0){
pval_2plot$pvalue_format[pval_2plot$pvalue < 0.001] <- paste0("*", pval_2plot$pvalue_format[pval_2plot$pvalue < 0.001])
}
return(pval_2plot)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/make_nice_pvals.R
|
# Module UI
#' @title mod_modelfit_ui and mod_modelfit_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#' @param datas internal
#' @param parent internal
#' @param origin internal
#'
#' @rdname mod_modelfit
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_modelfit_ui <- function(id){
ns <- NS(id)
tagList(
actionButton(ns("fit"), label = "Fit model",
class = "btn-primary")
)
}
# Module Server
#' @rdname mod_modelfit
#' @export
#' @keywords internal
mod_modelfit_server <- function(input, output, session, datas,parent,origin){
ns <- session$ns
# Run whenever fit button is pressed
observeEvent(input$fit, {
origin$output$res_error <- reactive("Please select adequate analysis parameters...")
responses_res <- list()
boxplot_print <- list()
heatmap_data2plot <- list()
toomuchdata <- FALSE
for(response in parent$selectResponse){
if(!is.null(datas$df) & parent$selectSubject %in% colnames(datas$df) &
parent$selectStim %in% colnames(datas$df) & datas$fact_stim_OK &
(parent$selectArmInter %in% colnames(datas$df) & datas$fact_arm_OK) |
(parent$selectTimeIntra %in% colnames(datas$df) & datas$fact_time_OK))
{
if(parent$selectModel == 1){
# data tansformation
if(parent$selectTimeInter != ''){
data_df <- datas$df[datas$df[, parent$selectTimeInter] == parent$selectRefTimeInter,
c(parent$selectSubject, response, parent$selectStim, parent$selectArmInter)]
}else{
data_df <- datas$df[, c(parent$selectSubject, response, parent$selectStim, parent$selectArmInter)]
}
colnames(data_df) <- c("Subject", "response", "stim", "arm")
transformed_data <- data_df
transformed_data$bkg <- 0
transformed_data <- transformed_data[order(transformed_data$stim, transformed_data$Subject), ] # align stimulations so that subject order matches in the following loop
if(!is.factor(transformed_data$stim)){
transformed_data$stim <- as.factor(transformed_data$stim)
}
for(l in levels(transformed_data$stim)){
if(l != parent$selectRefStim){
transformed_data[transformed_data$stim == l, "bkg"] <- transformed_data[transformed_data$stim == parent$selectRefStim, "response"]
}
}
transformed_data$arm <- stats::relevel(factor(transformed_data$arm), ref=parent$selectRefArmInter)
transformed_data$stim <- stats::relevel(factor(transformed_data$stim), ref=parent$selectRefStim)
data_df$stim <- relevel(factor(data_df$stim), ref=parent$selectRefStim)
if(!is.factor(data_df$arm)){
data_df$arm <- as.factor(data_df$arm)
}
# model fit ----
fit_res <- interarm_fit(transformed_data, parent, response)
if(!inherits(fit_res$mgls, "try-error")){
responses_res[[response]]$res_error <- NULL
responses_res[[response]]$postprocess_res <- interarm_postprocessres(data_df, fit_res)
if(parent$plot == "boxplot"){
boxplot_print[[response]] <- boxplot_VICI(data_df, responses_res[[response]]$postprocess_res$pval_2plot,
response_name = response, input = parent)
}
if(parent$plot == "histogram"){
boxplot_print[[response]] <- histogram_VICI(data_df, responses_res[[response]]$postprocess_res$pval_2plot,
response_name = response, input = parent)
}
heatmap_data2plot[[response]] <- responses_res[[response]]$postprocess_res$res_2plot
heatmap_data2plot[[response]]$response <- response
heatmap_data2plot[[response]]$pvalue <- cut(heatmap_data2plot[[response]]$pvalue,
breaks = c(0, 0.001, 0.01, 0.05, 0.1, 0.5, 1),
right = FALSE)
responses_res[[response]]$res_tab <- fit_res$res_tab
}
}else if(parent$selectModel == 2){
# data tansformation
if(parent$selectArmIntra != ''){
data_df <- datas$df[datas$df[, parent$selectArmIntra] == parent$selectRefArmIntra,
c(parent$selectSubject, response, parent$selectStim, parent$selectTimeIntra)]
}else{
data_df <- datas$df[, c(parent$selectSubject, response, parent$selectStim, parent$selectTimeIntra)]
}
colnames(data_df) <- c("Subject", "response", "stim", "time")
data_df$stim <- stats::relevel(factor(data_df$stim), ref=parent$selectRefStim)
transformed_data <- data_df
transformed_data$time <- stats::relevel(transformed_data$time, ref=parent$selectRefTimeIntra)
transformed_data <- try(tidyr::spread(transformed_data, key = "time", value = "response"),
silent = TRUE)
if(inherits(transformed_data, "try-error")){
clean_output(output)
toomuchdata <- TRUE
origin$output$res_error <- reactive(paste0("Too many observation in time point ", parent$selectRefTimeIntra,
"... Perhaps the Arm to analyzed was not specified"))
}else{
for(i in ncol(transformed_data):3){
if(!is.numeric(transformed_data[, 3])){
transformed_data[, 3] <- as.numeric(transformed_data[, 3])
}
if(!is.numeric(transformed_data[, i])){
transformed_data[, i] <- as.numeric(transformed_data[, i])
}
transformed_data[, i] <- (transformed_data[, i] - transformed_data[, 3])
}
fit_res <- list()
for(t in 4:ncol(transformed_data)){
tp <- colnames(transformed_data)[t]
transformed_data_temp <- transformed_data[, c(1:2, t), drop=FALSE]
colnames(transformed_data_temp)[3] <- "response"
transformed_data_temp$bkg <- 0.000 # intialize bkg ground
transformed_data_temp <- transformed_data_temp[order(transformed_data_temp$stim,
transformed_data_temp$Subject), ] # align stimulations so that subject order matches in the following loop
for(l in levels(transformed_data_temp$stim)){
if(l != parent$selectRefStim){
transformed_data_temp[transformed_data_temp$stim == l, "bkg"] <- transformed_data_temp[transformed_data_temp$stim ==
parent$selectRefStim, "response"]
}
}
transformed_data_temp$stim <- stats::relevel(factor(transformed_data_temp$stim), ref=parent$selectRefStim)
# model fit ----
fit_res[[tp]] <- intraarm_fit(transformed_data = transformed_data_temp,
tested_time = tp, input = parent, resp = response)
}
if(!prod(sapply(fit_res, function(x){inherits(x$mgls, "try-error")}))){
responses_res[[response]]$res_error <- NULL
responses_res[[response]]$postprocess_res <- intraarm_postprocessres(data_df, fit_res)
if(parent$plot == "boxplot"){
boxplot_print[[response]] <- boxplot_VICI(data_df, responses_res[[response]]$postprocess_res$pval_2plot,
response_name = response,
input = parent,
inter = FALSE,
baseline = parent$selectRefTimeIntra)
}
if(parent$plot == "histogram"){
boxplot_print[[response]] <- histogram_VICI(data_df, responses_res[[response]]$postprocess_res$pval_2plot,
response_name = response,
input = parent,
inter = FALSE,
baseline = parent$selectRefTimeIntra)
}
responses_res[[response]]$res_tab <- do.call(rbind, lapply(fit_res, "[[", "res_tab"))
heatmap_data2plot[[response]] <- responses_res[[response]]$postprocess_res$res_2plot
for(l in 1:length(heatmap_data2plot[[response]])){
heatmap_data2plot[[response]][[l]]$response <- response
heatmap_data2plot[[response]][[l]]$pvalue <- cut(heatmap_data2plot[[response]][[l]]$pvalue,
breaks = c(0, 0.001, 0.01, 0.05, 0.1, 0.5, 1),
right = FALSE)
}
heatmap_data2plot[[response]] <- do.call(rbind.data.frame,
heatmap_data2plot[[response]])
}
}
}
}
}
if(!toomuchdata){
if(length(responses_res)<1){
clean_output(output)
origin$output$res_error <- reactive("Please select adequate analysis parameters before trying to fit the model...")
}else{
if(parent$ddf=="Kenward-Roger"){
myTabs <- tabPanel(title = "WARNING - Kenward-Roger not implemented, please use SAS to obtain results of this approximation.")
}else{
myTabs <- lapply(parent$selectResponse, function(resp) {
if(is.null(session$userData$res_data)){
session$userData$res_data<<- responses_res[[resp]]$res_tab
}else{
session$userData$res_data<<- rbind(session$userData$res_data,responses_res[[resp]]$res_tab)
}
tabPanel(title = resp, value = resp,
wellPanel(
fluidRow(
renderPlot(boxplot_print[[resp]])),
h6(""),
myDownloadHandlerForPlots(name = "VICIboxplot.png", plot_obj = boxplot_print[[resp]],
outputArgs = list(label = "Download boxplot [PNG]", class = "btn-primary")),
h3(""),
h4(paste("Numerical results for", resp)),
renderTable(
{
responses_res[[resp]]$res_tab[,1] <- formatC(responses_res[[resp]]$res_tab[,1], format="f", digits = 5)
responses_res[[resp]]$res_tab[,2] <- formatC(responses_res[[resp]]$res_tab[,2], format="f", digits = 5)
if(parent$ddf=="By default"){
responses_res[[resp]]$res_tab[,3] <- formatC(as.numeric(substr(responses_res[[resp]]$res_tab[,3], 1,5)), format="f", digits = 0)
} else responses_res[[resp]]$res_tab[,3] <- formatC(as.numeric(substr(responses_res[[resp]]$res_tab[,3], 1,4)), format="f", digits = 1)
responses_res[[resp]]$res_tab[,4] <- formatC(responses_res[[resp]]$res_tab[,4], format="f", digits = 5)
responses_res[[resp]]$res_tab
}, rownames=TRUE
)
)
)
})}
origin$output$boxplotsAndTabs <- renderUI({
do.call(tabsetPanel, myTabs)
})
if(parent$selectModel == 1){
origin$output$res_nparam <- renderText({paste0("<b>Number of estimated model parameters for each response:</b> ",
nrow(responses_res[[1]]$res_tab) + ncol(responses_res[[1]]$postprocess_res$vars))})
heatmap_data2plot_noref <- lapply(heatmap_data2plot, function(x){x[-grep("reference", rownames(x)), ]})
}else if(parent$selectModel == 2){
origin$output$res_nparam <- renderText({paste0("<b>Number of estimated model parameters for each response:</b> ",
nrow(responses_res[[1]]$res_tab) + length(responses_res[[1]]$postprocess_res$vars))})
heatmap_data2plot_noref <- lapply(heatmap_data2plot, function(x){x[-grep("reference", x$Stimulation), ]})
}
origin$output$res_error <- reactive(NULL)
res_lik_all <- lapply(lapply(responses_res, "[[", "postprocess_res"), "[[", "res_lik")
res_lik_all <- do.call(rbind.data.frame, res_lik_all)
origin$output$res_lik <- renderTable(res_lik_all,
rownames = TRUE, digits = 4)
all_vars <- lapply(lapply(responses_res, "[[", "postprocess_res"), "[[", "vars")
all_vars <- do.call(rbind.data.frame, all_vars)
origin$output$res_var <- renderTable(all_vars, rownames = TRUE, digits=6)
hm_data2plot_all <- do.call(rbind.data.frame, heatmap_data2plot_noref)
hm_data2plot_all$response <- factor(hm_data2plot_all$response, ordered = TRUE,
levels = rev(parent$selectResponse))
#TODO if model 2: cross response x time points on y-axis
if(parent$selectModel == 2)
heatmap_print <- heatmap_vici(hm_data2plot_all, inter = FALSE,
baseline = parent$selectRefTimeIntra)
else{
heatmap_print <- heatmap_vici(hm_data2plot_all, inter = TRUE)
}
origin$output$heatmap <- renderPlot(heatmap_print)
origin$output$downloadHM <- myDownloadHandlerForPlots(name = "VICIheatmap.png", plot_obj = heatmap_print)
}
}
updateTabsetPanel(origin, "inTabset", selected = "resTab")
})
}
## To be copied in the UI
# mod_modelfit_ui("modelfit_ui_1")
## To be copied in the server
# callModule(mod_modelfit_server, "modelfit_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/vici/R/mod_modelfit.R
|
# Module UI
#' @title mod_settings_pan_ui and mod_settings_pan_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#' @param datas internal
#' @param parent
#'
#' @rdname mod_settings_pan
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
#' @import shinyWidgets
#' @import scales
mod_settings_pan_ui <- function(id){
ns <- NS(id)
listPal <- list("Blues","BuGn","BuPu","GnBu","Greens","Greys","Oranges","OrRd","PuBu",
"PuBuGn","PuRd","Purples","RdPu","Reds","YlGn","YlGnBu","YlOrBr","YlOrRd",
"BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral",
"Set3","Set2","Set1","Pastel2","Pastel1","Paired","Dark2","Accent")
tagList(
sidebarPanel(
# Input: Select a file ----
h3("Data input"),
fileInput(ns("datafile"), label = "Choose a CSV/TXT file to import",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Input: Checkbox if file has header ----
checkboxInput(ns("header"), "Header", TRUE),
# Input: Select separator ----
radioButtons(ns("sep"), "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = "\t"),
# Input: Select quotes ----
radioButtons("quote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
# Horizontal line ----
tags$hr(),
h3("Input parameters"),
selectInput(ns("selectModel"), label = "Model choice",
choices = list("inter-arm (ARM Compare)" = 1, "intra-arm (TimePoint Compare)" = 2),
selected = 2),
h4("Variable specification"),
selectizeInput(ns("selectSubject"), label = "Select the column that identifies the subject ID",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
),
selectizeInput(ns("selectResponse"), label = "Select the column(s) that identify ICS response",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below'),
multiple = TRUE
),
selectizeInput(ns("selectStim"), label = "Select the column that identifies the stimulation",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
),
conditionalPanel(
condition = sprintf("input['%s']!= '' & !output.stimisfactor",ns("selectStim")),#"input.selectStim != '' & !output.stimisfactor",
verbatimTextOutput(ns("warningstimisfactor"))
),
conditionalPanel(
condition = sprintf("input['%s'] != '' & output.stimisfactor",ns("selectStim")),#"input.selectStim != '' & output.stimisfactor",
selectizeInput(ns("selectRefStim"), label = "Select the value that identifies background samples",
choices =c(Choose = "", NULL))
),
conditionalPanel(
condition = sprintf("input['%s'] == 1",ns("selectModel")),#"input.selectModel == 1",
selectizeInput(ns("selectArmInter"), label = "Select the column that identifies the arm",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 1 & input['%s'] != '' & !output.armisfactor & output.warningarmisfactor != null",ns("selectModel"),ns("selectArmInter")),#"input.selectModel == 1 & input.selectArmInter != '' & !output.armisfactor & output.warningarmisfactor != null",
verbatimTextOutput(ns("warningarmisfactor"))
),
conditionalPanel(
condition = sprintf("input['%s'] == 1 & input['%s'] != '' & output.armisfactor",ns("selectModel"),ns("selectArmInter")),#"input.selectModel == 1 & input.selectArmInter != '' & output.armisfactor",
selectizeInput(ns("selectRefArmInter"), label = "Select the value that identifies the reference arm",
choices =c(Choose = "", NULL))
),
conditionalPanel(
condition = sprintf("input['%s'] == 1",ns("selectModel")),#"input.selectModel == 1",
selectizeInput(ns("selectTimeInter"), label = "If several time-points (optional), please select the column that identifies the observation's time-point",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 1 & input['%s'] != '' ",ns("selectModel"),ns("selectTimeInter")) ,#"input.selectModel == 1 & input.selectTimeInter != '' ",
selectizeInput(ns("selectRefTimeInter"), label = "Select the time-point to analyze",
choices =c(Choose = "", NULL))
),
conditionalPanel(
condition = sprintf("input['%s'] == 2",ns("selectModel")),#"input.selectModel == 2",
selectizeInput(ns("selectTimeIntra"), label = "Select the column that identifies the time-points",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 2 & input['%s'] != ''",ns("selectModel"),ns("selectTimeIntra")),#"input.selectModel == 2 & input.selectTimeIntra != ''",
selectizeInput(ns("selectRefTimeIntra"), label = "Select the value that identifies the reference time-point",
choices =c(Choose = "", NULL))
),
conditionalPanel(
condition = sprintf("input['%s'] == 2",ns("selectModel")),#"input.selectModel == 2",
selectizeInput(ns("selectArmIntra"), label = "If several arms (optional) please select the column that identifies the observation's arm",
choices = c(Choose = "", NULL),
options = list(placeholder = 'Please select a column name below')
)
),
conditionalPanel(
condition = sprintf("input['%s'] == 2 & input['%s'] != '' & !output.armisfactor2 & output.warningarm2isfactor != null",ns("selectModel"),ns("selectArmIntra")) ,#"input.selectModel == 2 & input.selectArmIntra != '' & !output.armisfactor2 & output.warningarm2isfactor != null",
verbatimTextOutput(ns("warningarm2isfactor"))
),
conditionalPanel(
condition = sprintf("input['%s'] == 2 & input['%s'] != '' & output.arm2isfactor",ns("selectModel"),ns("selectArmIntra")) ,#"input.selectModel == 2 & input.selectArmIntra != '' & output.arm2isfactor",
selectizeInput(ns("selectRefArmIntra"), label = "Select the arm to analyze",
choices =c(Choose = "", NULL))
),
h4("Denominator degrees of freedom approximations"),
radioButtons(ns("ddf"), NULL,
choices = c("By default",
"Between-Within",
"Satterthwaite",
"Kenward-Roger"),
selected = "Between-Within"),
radioButtons(ns("plot"), "Choose the type of plot",
choices = c(Boxplot = "boxplot"),
#Histogram = "histogram"),
selected = "boxplot"),
pickerInput(inputId = ns("color"),
label = "Color palette",
choices = listPal <- list("Blues","BuGn","BuPu","GnBu","Greens","Greys","Oranges","OrRd","PuBu",
"PuBuGn","PuRd","Purples","RdPu","Reds","YlGn","YlGnBu","YlOrBr","YlOrRd",
"BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral",
"Set3","Set2","Set1","Pastel2","Pastel1","Paired","Dark2","Accent"),#c("pal1","pal2", "pal3", "pal4"),#df$val,
selected = "Set1",
choicesOpt = list(content = c(sprintf("<img src='www/palettes/Blues.png' width=30px><div class='jhr'>%s</div></img>", "Blues"),
sprintf("<img src='www/palettes/BuGn.png' width=30px><div class='jhr'>%s</div></img>", "BuGn"),
sprintf("<img src='www/palettes/BuPu.png' width=30px><div class='jhr'>%s</div></img>", "BuPu"),
sprintf("<img src='www/palettes/GnBu.png' width=30px><div class='jhr'>%s</div></img>", "GnBu"),
sprintf("<img src='www/palettes/Greens.png' width=30px><div class='jhr'>%s</div></img>", "Greens"),
sprintf("<img src='www/palettes/Greys.png' width=30px><div class='jhr'>%s</div></img>", "Greys"),
sprintf("<img src='www/palettes/Oranges.png' width=30px><div class='jhr'>%s</div></img>", "Oranges"),
sprintf("<img src='www/palettes/OrRd.png' width=30px><div class='jhr'>%s</div></img>", "OrRd"),
sprintf("<img src='www/palettes/PuBu.png' width=30px><div class='jhr'>%s</div></img>", "PuBu"),
sprintf("<img src='www/palettes/PuBuGn.png' width=30px><div class='jhr'>%s</div></img>", "PuBuGn"),
sprintf("<img src='www/palettes/PuRd.png' width=30px><div class='jhr'>%s</div></img>", "PuRd"),
sprintf("<img src='www/palettes/Purples.png' width=30px><div class='jhr'>%s</div></img>", "Purples"),
sprintf("<img src='www/palettes/RdPu.png' width=30px><div class='jhr'>%s</div></img>", "RdPu"),
sprintf("<img src='www/palettes/Reds.png' width=30px><div class='jhr'>%s</div></img>", "Reds"),
sprintf("<img src='www/palettes/YlGn.png' width=30px><div class='jhr'>%s</div></img>", "YlGn"),
sprintf("<img src='www/palettes/YlGnBu.png' width=30px><div class='jhr'>%s</div></img>", "YlGnBu"),
sprintf("<img src='www/palettes/YlOrBr.png' width=30px><div class='jhr'>%s</div></img>", "YlOrBr"),
sprintf("<img src='www/palettes/YlOrRd.png' width=30px><div class='jhr'>%s</div></img>", "YlOrRd"),
sprintf("<img src='www/palettes/BrBG.png' width=30px><div class='jhr'>%s</div></img>", "BrBG"),
sprintf("<img src='www/palettes/PiYG.png' width=30px><div class='jhr'>%s</div></img>", "PiYG"),
sprintf("<img src='www/palettes/PRGn.png' width=30px><div class='jhr'>%s</div></img>", "PRGn"),
sprintf("<img src='www/palettes/PuOr.png' width=30px><div class='jhr'>%s</div></img>", "PuOr"),
sprintf("<img src='www/palettes/RdBu.png' width=30px><div class='jhr'>%s</div></img>", "RdBu"),
sprintf("<img src='www/palettes/RdGy.png' width=30px><div class='jhr'>%s</div></img>", "RdGy"),
sprintf("<img src='www/palettes/RdYlBu.png' width=30px><div class='jhr'>%s</div></img>", "RdYlBu"),
sprintf("<img src='www/palettes/RdYlGn.png' width=30px><div class='jhr'>%s</div></img>", "RdYlGn"),
sprintf("<img src='www/palettes/Spectral.png' width=30px><div class='jhr'>%s</div></img>", "Spectral"),
sprintf("<img src='www/palettes/Set3.png' width=30px><div class='jhr'>%s</div></img>", "Set3"),
sprintf("<img src='www/palettes/Set2.png' width=30px><div class='jhr'>%s</div></img>", "Set2"),
sprintf("<img src='www/palettes/Set1.png' width=30px><div class='jhr'>%s</div></img>", "Set1"),
sprintf("<img src='www/palettes/Pastel2.png' width=30px><div class='jhr'>%s</div></img>", "Pastel2"),
sprintf("<img src='www/palettes/Pastel1.png' width=30px><div class='jhr'>%s</div></img>", "Pastel1"),
sprintf("<img src='www/palettes/Paired.png' width=30px><div class='jhr'>%s</div></img>", "Paired"),
sprintf("<img src='www/palettes/Dark2.png' width=30px><div class='jhr'>%s</div></img>", "Dark2"),
sprintf("<img src='www/palettes/Accent.png' width=30px><div class='jhr'>%s</div></img>", "Accent")
))),
radioButtons(ns("jiter"), "Dot of Boxplot",
choices = c(Filed = "19",
Empty = "1",
None = "None"),
selected = "19"),
tags$hr(),
h3("Run analysis"),
mod_modelfit_ui("modelfit_ui_1"),
h3(),
tags$hr(),
h3("Example data"),
fluidRow(
actionButton(ns("loadExample"), label = "load example data")
),
fluidRow(
downloadButton(ns("downloadExData"), label = "download example data")
)
)
)
}
# Module Server
#' @rdname mod_settings_pan
#' @export
#' @keywords internal
mod_settings_pan_server <- function(input, output, session,datas,parent){
ns <- session$ns
# example data
output$downloadExData <- downloadHandler( #Fait appel à une lib externe donc pas besoin de tester
filename = "exampleICSdata.txt",
content = function(file) {
utils::write.table(vici::ICS_ex, file, row.names = FALSE, sep="\t", quote = FALSE)
}
)
observeEvent(input$loadExample,{
datas$df <<- vici::ICS_ex
clean_output(output)
parent$output$table2render <- DT::renderDataTable(datas$df,
options = list(pageLength = 10, lengthMenu = list(c(5, 10, -1), c('5', '10', 'All')))
)
output$mod <- reactive(NULL)
output$mod_display <- reactive(FALSE)
updateRadioButtons(session, inputId = "sep", selected = "\t")
updateCheckboxInput(session, inputId = "header", value = TRUE)
observeEvent(input$selectModel, {
if (input$selectModel==1){
available_vars_init <- colnames(datas$df)
updateSelectizeInput(session, "selectSubject",
selected = 'Subject',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectResponse",
selected = c('Response1', 'Response2'),
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectStim",
selected = 'StimulationPool',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefStim",
selected = 'NS',
choices = c(levels(datas$df$StimulationPool))
)
updateSelectizeInput(session, "selectArmInter",
selected = 'Arm',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefArmInter",
selected = 'Placebo',
choices = c(levels(datas$df$Arm))
)
updateSelectizeInput(session, "selectTimeInter",
selected = 'TimePoint',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefTimeInter",
selected = 'D1',
choices = c(levels(datas$df$TimePoint))
)
updateTabsetPanel(parent, "inTabset", selected = "dataTab")
}else if (input$selectModel==2){
available_vars_init <- colnames(datas$df)
updateSelectizeInput(session, "selectSubject",
selected = 'Subject',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectResponse",
selected = c('Response1', 'Response2'),
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectStim",
selected = 'StimulationPool',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefStim",
selected = 'NS',
choices = c(levels(datas$df$StimulationPool))
)
updateSelectizeInput(session, "selectTimeIntra",
selected = 'TimePoint',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefTimeIntra",
selected = 'D0',
choices = c(levels(datas$df$TimePoint))
)
updateSelectizeInput(session, "selectArmIntra",
selected = 'Arm',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefArmIntra",
selected = 'A2',
choices = c(levels(datas$df$Arm))
)
updateTabsetPanel(parent, "inTabset", selected = "dataTab")
}
})
})
observeEvent({input$datafile; input$header; input$sep}, {
req(input$datafile)
datas$df <- {
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
df <- utils::read.csv(input$datafile$datapath,
header = input$header,
sep = input$sep)
},
error = function(e){ stop(safeError(e)) } # return a safeError if a parsing error occurs
)
#Setters
clean_output(parent$output)
parent$output$mod <- reactive(NULL)
parent$output$mod_display <- reactive(FALSE)
df}
available_vars_init <- colnames(datas$df)
updateSelectizeInput(session, "selectSubject",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectResponse",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectStim",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectArmInter",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectArmIntra",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectTimeIntra",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectTimeInter",
selected = '',
choices = c('', available_vars_init),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectRefTimeIntra",
selected = ''
)
updateSelectizeInput(session, "selectRefTimeInter",
selected = ''
)
updateSelectizeInput(session, "selectRefArmInter",
selected = ''
)
updateSelectizeInput(session, "selectRefArmIntra",
selected = ''
)
updateSelectizeInput(session, "selectRefStim",
selected = ''
)
updateTabsetPanel(parent, "inTabset", selected = "dataTab")
})
parent$output$table2render <- DT::renderDataTable(
{
datas$df
},
options = list(pageLength = 10, lengthMenu = list(c(5, 10, -1), c('5', '10', 'All')))
)
# update available variables for selection ----
# observeEvent available_vars ----
observeEvent(datas$available_vars, {
updateSelectizeInput(session, "selectSubject",
choices = c(input$selectSubject, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectResponse",
selected = input$selectResponse,
choices = as.list(c(input$selectResponse, datas$available_vars, intToUtf8(160))),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectStim",
choices = c(input$selectStim, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectArmInter",
choices = c(input$selectArmInter, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectArmIntra",
choices = c(input$selectArmIntra, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectTimeIntra",
choices = c(input$selectTimeIntra, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)
updateSelectizeInput(session, "selectTimeInter",
choices = c(input$selectTimeInter, datas$available_vars, intToUtf8(160)),
options = list(placeholder = 'Please select a variable below')
)}
)
observeEvent(input$selectSubject, {
if (input$selectSubject != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
clean_output(output)
})
observeEvent(input$selectStim, {
if (input$selectStim != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
if (input$selectStim %in% colnames(datas$df)){
selected_stim_var <- factor(datas$df[, input$selectStim])
if(is.factor(selected_stim_var)){
output$stimisfactor <- reactive(TRUE)
possible_stims <- levels(selected_stim_var)
output$warningstimisfactor <- reactive(NULL)
datas$fact_stim_OK <- TRUE
}else{
output$stimisfactor <- reactive(FALSE)
output$warningstimisfactor <- reactive(paste0("WARNING: '", input$selectStim, "' is not a factor"))
datas$fact_stim_OK <- FALSE
possible_stims <- paste0("Error: '", input$selectStim, "' is not a factor\nPlease select a different variable")
}
updateSelectizeInput(session, "selectRefStim",
choices = c(possible_stims[1], possible_stims),
selected = ifelse(input$selectRefStim=='', possible_stims[1], input$selectRefStim)
)
}else{
output$stimisfactor <- reactive(FALSE)
output$warningstimisfactor <- reactive(paste0("WARNING: '", input$selectStim, "' is not a column in the input data"))
datas$fact_stim_OK <- FALSE
}
}
clean_output(output)
})
observeEvent(input$selectResponse, {
if (length(input$selectResponse) >= 1){
if (input$selectResponse[1] != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
clean_output(output)
}
}
})
observeEvent(input$selectArmInter, {
if (input$selectArmInter != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
if (input$selectArmInter %in% colnames(datas$df)){
selected_arm_var <- factor(datas$df[, input$selectArmInter])
if(is.factor(selected_arm_var)){
output$armisfactor <- reactive(TRUE)
possible_arms <- levels(selected_arm_var)
output$warningarmisfactor <- reactive(NULL)
datas$fact_arm_OK <- TRUE
}else{
output$armisfactor <- reactive(FALSE)
output$warningarmisfactor <- reactive(paste0("WARNING: '", input$selectArmInter,
"' is not a factor"))
datas$fact_arm_OK <- FALSE
possible_arms <- paste0("Error: '", input$selectArmInter,
"' is not a factor\nPlease select a different variable")
}
updateSelectizeInput(session, "selectRefArmInter",
choices = c(possible_arms[1], possible_arms),
selected = ifelse(is.null(input$selectRefArmInter) | (length(input$selectRefArmInter)>0 && input$selectRefArmInter==''),
possible_arms[1], input$selectRefArmInter)
)
}else if(input$selectArmInter != intToUtf8(160)){
output$armisfactor <- reactive(FALSE)
output$warningarmisfactor <- reactive(paste0("WARNING: '", input$selectArmInter,
"' is not a column in the input data"))
datas$fact_arm_OK <- FALSE
}else{
output$armisfactor <- reactive(FALSE)
output$warningarmisfactor <- reactive(NULL)
datas$fact_arm_OK <- FALSE
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df)) #A tester
}
}else{
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
clean_output(output)
})
observeEvent(input$selectArmIntra, {
if (input$selectArmIntra != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
if (input$selectArmIntra %in% colnames(datas$df)){
selected_arm2_var <- factor(datas$df[, input$selectArmIntra])
if(is.factor(selected_arm2_var)){
output$arm2isfactor <- reactive(TRUE)
possible_arm2s <- levels(selected_arm2_var)
output$warningarm2isfactor <- reactive(NULL)
datas$fact_arm2_OK <- TRUE
}else{
output$arm2isfactor <- reactive(FALSE)
output$warningarm2isfactor <- reactive(paste0("WARNING: '", input$selectArmIntra,
"' is not a factor"))
datas$fact_arm2_OK <- FALSE
possible_arm2s <- paste0("Error: '", input$selectArmIntra,
"' is not a factor\nPlease select a different variable")
}
updateSelectizeInput(session, "selectRefArmIntra",
choices = c(possible_arm2s[1], possible_arm2s),
selected = ifelse(is.null(input$selectRefArmIntra) | (length(input$selectRefArmIntra)>0 && input$selectRefArmIntra==''),
possible_arm2s[1], input$selectRefArmIntra)
)
}else if(input$selectArmIntra != intToUtf8(160)){
output$arm2isfactor <- reactive(FALSE)
output$warningarm2isfactor <- reactive(paste0("WARNING: '", input$selectArmIntra,
"' is not a column in the input data"))
datas$fact_arm2_OK <- FALSE
}else{
output$arm2isfactor <- reactive(FALSE)
output$warningarm2isfactor <- reactive(NULL)
datas$fact_arm2_OK <- FALSE
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
}else{
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
clean_output(output)
})
observeEvent(input$selectModel, {
if(!is.null(datas$available_vars)){
updateSelectizeInput(session, "selectArmInter",
choices = union(c('', datas$available_vars),
union(union(input$selectArmInter, input$selectArmIntra), input$selectTimeInter))
)
updateSelectizeInput(session, "selectArmIntra",
choices = union(c('', datas$available_vars),
union(union(input$selectArmInter, input$selectArmIntra), input$selectTimeIntra))
)
updateSelectizeInput(session, "selectTimeIntra",
choices = union(c('', datas$available_vars),
union(union(input$selectArmInter, input$selectArmIntra), input$selectTimeIntra))
)
updateSelectizeInput(session, "selectTimeInter",
choices = union(c('', datas$available_vars),
union(union(input$selectArmInter, input$selectArmIntra), input$selectTimeInter))
)
updateSelectizeInput(session, "selectRefTimeIntra",
selected = ''
)
updateSelectizeInput(session, "selectRefTimeInter",
selected = ''
)
updateSelectizeInput(session, "selectRefArmInter",
selected = ''
)
updateSelectizeInput(session, "selectRefArmIntra",
selected = ''
)
}
clean_output(output)
}
)
# observe time ----
observeEvent(input$selectTimeIntra, {
if (input$selectTimeIntra != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
if(input$selectTimeIntra %in% colnames(datas$df)){
datas$df[, input$selectTimeIntra] <- as.factor(as.character(datas$df[, input$selectTimeIntra]))
selected_time_var <- datas$df[, input$selectTimeIntra]
output$timeisfactor <- reactive(TRUE)
possible_times <- levels(selected_time_var)
output$warnintimeisfactor <- reactive(NULL)
datas$fact_time_OK <- TRUE
updateSelectizeInput(session, "selectRefTimeIntra",
choices = c(possible_times[1], possible_times),
selected = ifelse(is.null(input$selectRefTimeIntra) | (length(input$selectRefTimeIntra)>0 && input$selectRefTimeIntra==''),
possible_times[1], input$selectRefTimeIntra)
)
}else if(input$selectTimeIntra != intToUtf8(160)){
output$timeisfactor <- reactive(FALSE)
output$warnintimeisfactor <- reactive(paste0("WARNING: '", input$selectTimeIntra,
"' is not a column in the input datas"))
datas$fact_time_OK <- FALSE
}else{
output$timeisfactor <- reactive(FALSE)
output$warnintimeisfactor <- reactive(NULL)
datas$fact_time_OK <- FALSE
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
}else{
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
clean_output(output)
})
# observe time ----
observeEvent(input$selectTimeInter, {
if (input$selectTimeInter != ''){
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
if(input$selectTimeInter %in% colnames(datas$df)){
datas$df[, input$selectTimeInter] <- as.factor(as.character(datas$df[, input$selectTimeInter]))
selected_time2_var <- datas$df[, input$selectTimeInter]
output$time2isfactor <- reactive(TRUE)
possible_times2 <- levels(selected_time2_var)
output$warnintime2isfactor <- reactive(NULL)
datas$fact_time2_OK <- TRUE
updateSelectizeInput(session, "selectRefTimeInter",
choices = c(possible_times2[1], possible_times2),
selected = ifelse(is.null(input$selectRefTimeInter) | (length(input$selectRefTimeInter)>0 && input$selectRefTimeInter==''),
possible_times2[1], input$selectRefTimeInter)
)
}else if(input$selectTimeInter != intToUtf8(160)){
output$time2isfactor <- reactive(FALSE)
output$warnintime2isfactor <- reactive(paste0("WARNING: '", input$selectTimeInter,
"' is not a column in the input data"))
datas$fact_time2_OK <- FALSE
}else{
output$time2isfactor <- reactive(FALSE)
output$warnintime2isfactor <- reactive(NULL)
datas$fact_time2_OK <- FALSE
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
}else{
datas$available_vars <- update_vars(input, possibilities = colnames(datas$df))
}
clean_output(output)
})
observeEvent(input$ddf, {
clean_output(parent$output)
})
observeEvent({input$selectRefArmInter; input$selectRefArmIntra; input$selectRefStim; input$selectRefTimeIntra; input$selectRefTimeInter}, {
clean_output(output)
})
return(input)
}
## To be copied in the UI
# mod_settings_pan_ui("settings_pan_ui_1")
## To be copied in the server
# callModule(mod_settings_pan_server, "settings_pan_ui_1")
## Code to generate the png files:
# library(ggplot2)
# listPal <- list("Blues","BuGn","BuPu","GnBu","Greens","Greys","Oranges","OrRd","PuBu",
# "PuBuGn","PuRd","Purples","RdPu","Reds","YlGn","YlGnBu","YlOrBr","YlOrRd",
# "BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral",
# "Set3","Set2","Set1","Pastel2","Pastel1","Paired","Dark2","Accent")
# n <- 5
# for(pal in listPal){
# ggplot(data.frame(x=as.character(1:5), y=1)) +
# geom_tile(aes(x=x, y=y, fill=x)) +
# scale_fill_manual(values = RColorBrewer::brewer.pal(n = n, name = pal)) +
# theme_void() +
# guides(fill="none") +
# scale_x_discrete(expand=c(0,0)) +
# scale_y_continuous(expand=c(0,0))
# }
|
/scratch/gouwar.j/cran-all/cranData/vici/R/mod_settings_pan.R
|
#'Custom download handler for plots
#'
#'@param name output file name
#'@param plot_obj a plot object to be downloaded
#'
#'
#'@return a \code{ggpubr} plot object
#'
#'@author Boris Hejblum
#'
#'@keywords internal
#'
#'@import ggplot2
#'@import ggpubr
#'@importFrom grDevices dev.off png
myDownloadHandlerForPlots <- function(name, plot_obj, outputArgs = list()){
downloadHandler(
filename = name,
content = function(file){
png(file, height = 10,width = 12,res = 300,units = "in")
print(plot_obj)
dev.off()
},
outputArgs = outputArgs
)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/myDownloadHandlerForPlots.R
|
#'Our generalized least squares ls function
#'
#'Internal function to adapt generalized least squares (\code{gls}) model with more details in output.
#'
#'@param model a \code{formula}.
#'@param data a \code{data.frame} containing variables called in \code{model}, \code{correlation}, \code{weights}, \code{subset}.
#'@param correlation a \code{corCompSymm} object. Default is \code{NULL}.
#'@param weights a \code{varIdent} object. Default is \code{NULL}.
#'@param subset an optional expression indicating which subset of the rows of \code{data} should be used in the fit. By default, all observations are included.
#'@param method a character string to choose the maximization method. Default is "\code{REML}".
#'@param na.action a function that indicates what should happen when the data contain NAs. Default is \code{na.fail}.
#'@param control a list of control values. Default is an empty list.
#'@param verbose an optional logical value. If TRUE information on the evolution of the iterative algorithm is printed. Default is FALSE.
#'
#'@return a \code{gls} object
#'
#'@keywords internal
#'
#' @importFrom stats na.fail terms formula asOneSidedFormula contrasts nlminb logLik optim fitted coef model.frame update
#' @importFrom nlme glsControl getGroupsFormula glsStruct varFunc asOneFormula getGroups Initialize needUpdate varWeights glsApVar coef<-
mygls <- function (model, data = sys.frame(sys.parent()), correlation = NULL,
weights = NULL, subset, method = c("REML", "ML"), na.action = na.fail,
control = list(), verbose = FALSE)
{
Call <- match.call()
controlvals <- glsControl()
if (!missing(control))
controlvals[names(control)] <- control
if (!inherits(model, "formula") || length(model) != 3L) {
stop("\nmodel must be a formula of the form \"resp ~ pred\"")
}
method <- match.arg(method)
REML <- method == "REML"
groups <- if (!is.null(correlation))
getGroupsFormula(correlation)
glsSt <- glsStruct(corStruct = correlation, varStruct = varFunc(weights))
model <- terms(model, data = data)
mfArgs <- list(formula = asOneFormula(formula(glsSt), model,
groups), data = data, na.action = na.action)
if (!missing(subset)) {
mfArgs[["subset"]] <- asOneSidedFormula(Call[["subset"]])[[2L]]
}
mfArgs$drop.unused.levels <- TRUE
dataMod <- do.call(model.frame, mfArgs)
origOrder <- row.names(dataMod)
if (!is.null(groups)) {
groups <- eval(substitute(~1 | GR, list(GR = groups[[2L]])))
grps <- getGroups(dataMod, groups, level = length(getGroupsFormula(groups,
asList = TRUE)))
ord <- order(grps)
grps <- grps[ord]
dataMod <- dataMod[ord, , drop = FALSE]
revOrder <- match(origOrder, row.names(dataMod))
}
else grps <- NULL
X <- model.frame(model, dataMod)
contr <- lapply(X, function(el) if (inherits(el, "factor"))
contrasts(el))
contr <- contr[!unlist(lapply(contr, is.null))]
X <- model.matrix(model, X)
if (ncol(X) == 0L)
stop("no coefficients to fit")
y <- eval(model[[2L]], dataMod)
N <- nrow(X)
p <- ncol(X)
parAssign <- attr(X, "assign")
fTerms <- terms(as.formula(model), data = data)
namTerms <- attr(fTerms, "term.labels")
if (attr(fTerms, "intercept") > 0) {
namTerms <- c("(Intercept)", namTerms)
}
namTerms <- factor(parAssign, labels = namTerms)
parAssign <- split(order(parAssign), namTerms)
fixedSigma <- (controlvals$sigma > 0)
attr(glsSt, "conLin") <- list(Xy = array(c(X, y), c(N, ncol(X) +
1L), list(row.names(dataMod), c(colnames(X), deparse(model[[2]])))),
dims = list(N = N, p = p, REML = as.integer(REML)), logLik = 0,
sigma = controlvals$sigma, fixedSigma = fixedSigma)
glsEstControl <- controlvals["singular.ok"]
glsSt <- Initialize(glsSt, dataMod, glsEstControl)
parMap <- attr(glsSt, "pmap")
numIter <- numIter0 <- 0L
repeat {
oldPars <- c(attr(glsSt, "glsFit")[["beta"]], coef(glsSt))
if (length(coef(glsSt))) {
optRes <- if (controlvals$opt == "nlminb") {
nlminb(c(coef(glsSt)), function(glsPars) -logLik(glsSt,
glsPars), control = list(trace = controlvals$msVerbose,
iter.max = controlvals$msMaxIter))
}
else {
optim(c(coef(glsSt)), function(glsPars) -logLik(glsSt,
glsPars), method = controlvals$optimMethod,
control = list(trace = controlvals$msVerbose,
maxit = controlvals$msMaxIter, reltol = if (numIter ==
0L) controlvals$msTol else 100 * .Machine$double.eps))
}
coef(glsSt) <- optRes$par
}
else {
optRes <- list(convergence = 0)
}
attr(glsSt, "glsFit") <- glsEstimate(glsSt, control = glsEstControl)
if (!needUpdate(glsSt)) {
if (optRes$convergence)
stop(optRes$message)
break
}
numIter <- numIter + 1L
glsSt <- update(glsSt, dataMod)
aConv <- c(attr(glsSt, "glsFit")[["beta"]], coef(glsSt))
conv <- abs((oldPars - aConv)/ifelse(aConv == 0, 1, aConv))
aConv <- c(beta = max(conv[1:p]))
conv <- conv[-(1:p)]
for (i in names(glsSt)) {
if (any(parMap[, i])) {
aConv <- c(aConv, max(conv[parMap[, i]]))
names(aConv)[length(aConv)] <- i
}
}
if (verbose) {
cat("\nIteration:", numIter)
cat("\nObjective:", format(optRes$value), "\n")
print(glsSt)
cat("\nConvergence:\n")
print(aConv)
}
if (max(aConv) <= controlvals$tolerance) {
break
}
if (numIter > controlvals$maxIter) {
stop("maximum number of iterations reached without convergence")
}
}
glsFit <- attr(glsSt, "glsFit")
namBeta <- names(glsFit$beta)
attr(glsSt, "fixedSigma") <- fixedSigma
attr(parAssign, "varBetaFact") <- varBeta <- glsFit$sigma *
glsFit$varBeta * sqrt((N - REML * p)/(N - p))
varBeta <- crossprod(varBeta)
dimnames(varBeta) <- list(namBeta, namBeta)
Fitted <- fitted(glsSt)
if (!is.null(grps)) {
grps <- grps[revOrder]
Fitted <- Fitted[revOrder]
Resid <- y[revOrder] - Fitted
attr(Resid, "std") <- glsFit$sigma/varWeights(glsSt)[revOrder]
}
else {
Resid <- y - Fitted
attr(Resid, "std") <- glsFit$sigma/varWeights(glsSt)
}
names(Resid) <- names(Fitted) <- origOrder
apVar <- if (controlvals$apVar)
glsApVar(glsSt, glsFit$sigma, .relStep = controlvals[[".relStep"]],
minAbsPar = controlvals[["minAbsParApVar"]], natural = controlvals[["natural"]])
else "Approximate variance-covariance matrix not available"
dims <- attr(glsSt, "conLin")[["dims"]]
dims[["p"]] <- p
attr(glsSt, "glsFit") <- NULL
attr(glsSt, "fixedSigma") <- fixedSigma
grpDta <- inherits(data, "groupedData")
structure(class = "gls", list(modelStruct = glsSt, dims = dims,
contrasts = contr, coefficients = glsFit[["beta"]], varBeta = varBeta,
sigma = if (fixedSigma) controlvals$sigma else glsFit$sigma,
apVar = apVar, logLik = glsFit$logLik, numIter = if (needUpdate(glsSt)) numIter else numIter0,
groups = grps, call = Call, method = method, fitted = Fitted,
residuals = Resid, parAssign = parAssign, na.action = attr(dataMod,
"na.action")), namBetaFull = colnames(X), units = if (grpDta)
attr(data, "units"), labels = if (grpDta)
attr(data, "labels"))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/mygls.R
|
#' Adds the content of www to www/ from this package
#'
#' @importFrom shiny addResourcePath
#'
#' @noRd
.onLoad <- function(...) {
shiny::addResourcePath('www', system.file('app/www', package = 'vici'))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/onload.R
|
#' Launch VICI Shiny App
#'
#'@param host Default is "127.0.0.1", see \link[shiny]{runApp} for details.
#'@param port Default is 3838, see \link[shiny]{runApp} for details.
#'@param ... additional arguments to be passed to the \link[shiny]{runApp} function.
#'
#'@examples
#'if(interactive()){
#' vici::run_app()
#'}
#'
#' @export
#' @importFrom shiny runApp
run_app <- function(host="127.0.0.1", port=3838, ...) {
shiny::runApp(system.file("app", package = "vici"), port=port, host=host, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/run_app.R
|
update_vars <- function(input, possibilities){
setdiff(possibilities, union(union(union(union(union(union(input$selectSubject,
input$selectStim),
input$selectResponse),
input$selectArmInter),
input$selectArmIntra),
input$selectTimeIntra),
input$selectTimeInter))
}
|
/scratch/gouwar.j/cran-all/cranData/vici/R/update_vars.R
|
# Utility Functions for VICI Package
#
# ----- contained functions : ------
# qform
# devfun_gls
# varBetafun_gls
# compute_jaclist
# waldCI
# rbindall
##############################################
# ------------------- qform ------------------
##############################################
#' Compute Quadratic Form
#'
#' @param L a numeric vector.
#' @param V a symmetric numeric matrix.
#'
#' @return a numerical scalar.
#' @keywords internal
qform <- function (L, V){
sum(L * (V %*% L))
}
##############################################
# ---------------- devfun_gls ----------------
##############################################
#' Compute Full Deviance
#'
#' @param varpar variance parameters.
#' @param gls_obj a \code{gls} object.
#'
#' @return the full deviance, a numerical scalar.
#' @keywords internal
#'
#' @details This code is adapted from code in \code{devfun_vp} internal function of
#' \pkg{pbkrtest} package.
#'
# use glsEstimate and to compute the FULL deviance
# adapted from pbkrtest:::devfun_vp
devfun_gls <- function(varpar, gls_obj){
nvarpar <- length(varpar)
coef(gls_obj$modelStruct) <- varpar[-nvarpar]
attr(gls_obj$modelStruct, "conLin")$sigma <- varpar[nvarpar]
contr <- gls_obj$call$control
if(is.null(contr)){
contr <- list(singular.ok = FALSE)
}
#
est <- glsEstimate(object = gls_obj$modelStruct, control = contr)
return(as.numeric(-2*est$logLik))
}
##############################################
# -------------- varBetafun_gls --------------
##############################################
#' Compute covariance of Beta for a Generalized Least Squares (\code{GLS}) Model
#'
#' @param varpar variance parameters.
#' @param gls_obj a \code{gls} object.
#'
#' @return covariance of Beta, a numerical scalar.
#' @keywords internal
#'
#' @details This code is adapted from code in \code{get_covbeta} internal function of
#' \pkg{pbkrtest} package.
#'
# mix above with pbkrtest:::get_covbeta
varBetafun_gls <- function(varpar, gls_obj){
REML <- gls_obj$dims$REML
nvarpar <- length(varpar)
coef(gls_obj$modelStruct) <- varpar[-nvarpar]
N <- gls_obj$dims$N
p <- gls_obj$dims$p
attr(gls_obj$modelStruct, "conLin")$sigma <- varpar[nvarpar]
contr <- gls_obj$call$control
if(is.null(contr)){
contr <- list(singular.ok = FALSE)
}
est <- glsEstimate(object = gls_obj$modelStruct, control = contr)
varBeta <- crossprod(est$sigma * est$varBeta * sqrt((N - REML * p)/(N - p)))
return(varBeta)
}
##############################################
# -------------- compute_jaclist -------------
##############################################
#' Compute_jaclist quantities needed for the Satterthwaite
#' approximation.
#'
#' Computes vcov of variance parameters (theta, sigma), jacobian of
#' each variance parameter etc.
#'
#' @param object a \code{gls} object.
#' @param tol a tolerance
#'
#' @return a list.
#' @keywords internal
#'
#' @details This code is adapted from code in \code{compute_auxillary} internal
#' function of \pkg{pbkrtest} package.
#'
#from pbkrtest:::compute_auxillary
compute_jaclist <- function (object, tol = 1e-06){
if (!inherits(object, "gls"))
stop("'model' not an 'gls'")
out <- list(sigma = NULL, vcov_beta = NULL, vcov_varpar = NULL,
jacobian_list = NULL)
out$sigma <- sigma(object)
out$vcov_beta <- as.matrix(vcov(object))
varpar_opt <- c(coef(object$modelStruct), "sigma" = sigma(object))
h <- hessian(func = devfun_gls, x = varpar_opt,
gls_obj = object)
eig_h <- eigen(h, symmetric = TRUE)
evals <- eig_h$values
neg <- evals < -tol
pos <- evals > tol
zero <- evals > -tol & evals < tol
if (sum(neg) > 0) {
evals_num <- paste(sprintf("%1.1e", evals[neg]), collapse = " ")
warning(sprintf("Model failed to converge with %d negative eigenvalue(s): %s",
sum(neg), evals_num), call. = FALSE)
}
if (sum(zero) > 0) {
evals_num <- paste(sprintf("%1.1e", evals[zero]), collapse = " ")
warning(sprintf("Model may not have converged with %d eigenvalue(s) close to zero: %s",
sum(zero), evals_num))
}
pos <- eig_h$values > tol
q <- sum(pos)
h_inv <- with(eig_h, {
vectors[, pos, drop = FALSE] %*% diag(1/values[pos],
nrow = q) %*% t(vectors[, pos, drop = FALSE])
})
out$vcov_varpar <- 2 * h_inv
jac <- jacobian(func = varBetafun_gls, x = varpar_opt,
gls_obj = object)
out$jacobian_list <- lapply(1:ncol(jac), function(i){array(jac[, i], dim = rep(length(coef(object)), 2))})
return(out)
}
##############################################
# ------------------ waldCI -----------------
##############################################
#' Compute Wald Confidence Interval
#'
#' @param estimate an estimated coefficient.
#' @param se standard error of \code{estimate}.
#' @param df degrees of freedom associate to \code{estimate}. \code{df = Inf} is
#' allowed.
#' @param level level of confidence interval.
#'
#' @return a matrix of lower and upper confidence interval.
#' @keywords internal
#' @importFrom stats qt
#'
#' @details This code is greatly inspired by code from the \pkg{lmerTest}
#' package.
#'
waldCI <- function(estimate, se, df = Inf, level = 0.95) {
stopifnot(length(level) == 1,
is.numeric(level),
level > 0, level < 1)
alpha <- (1 - level)/2
fac <- qt(alpha, df = df, lower.tail = FALSE)
res <- cbind(lower = estimate - se * fac,
upper = estimate + se * fac)
if(!is.null(names(estimate))) rownames(res) <- names(estimate)
res
}
##############################################
# ----------------- rbindall -----------------
##############################################
#' \code{rbind} Multiple Objects
#'
#' @param ... objects to be \code{rbind}'ed - typically matrices or vectors
#'
#' @keywords internal
rbindall <- function(...) do.call(rbind, ...)
|
/scratch/gouwar.j/cran-all/cranData/vici/R/utils.R
|
vici:::app_server
|
/scratch/gouwar.j/cran-all/cranData/vici/inst/app/server.R
|
vici:::app_ui()
|
/scratch/gouwar.j/cran-all/cranData/vici/inst/app/ui.R
|
#' Run \code{\{video\}} Example Applications
#'
#' @param example Name of the example to load. Current examples include:
#' \describe{
#' \item{basic}{Basic example of \code{video} in use}
#' \item{full}{Basic example of using all buttons available in \code{video}}
#' \item{server}{Example showing server-side functionality}
#' }
#' @param display.mode The mode in which to display the application. By default set to \code{"showcase"} to show
#' code behind the example.
#' @param ... Optional arguments to send to \code{shiny::runApp}
#'
#' @return
#' This function does not return a value; interrupt R to stop the application (usually by pressing Ctrl+C or Esc).
#'
#' @examples
#' availableVideoExamples()
#'
#' if (interactive()) {
#' library(shiny)
#' library(video)
#'
#' runVideoExample("server")
#' }
#'
#' @rdname examples
#' @export
runVideoExample <- function(example = "basic", display.mode = "showcase", ...) {
available_examples <- findVideoExamples()
if (!example %in% available_examples) {
stop("Example not available. Choose from: '", paste(available_examples, collapse = "', '"), "'")
}
shiny::runApp(
file.path(system.file("examples", package = "video"), example),
display.mode = display.mode,
...
)
}
#' @rdname examples
#' @export
availableVideoExamples <- function() {
available_examples <- findVideoExamples()
cat("'", paste(available_examples, collapse = "', '"), "'\n", sep = "")
invisible(available_examples)
}
findVideoExamples <- function() {
example_dir <- system.file("examples", package = "video")
list.files(example_dir)
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/example.R
|
#' Guess Video Format Type
#'
#' @description
#' If no type is provided when generating a video.js video, then the format needs to
#' be guessed. Included in the package is a dataset of the default type of each
#' video. This will give the default type of each file provided.
#'
#' @param files A vector of URL paths (relative or absolute) to videos
#'
#' @return
#' A vector the same length as \code{files} of the video types.
#'
#' @examples
#' guessVideoFormat("video.mp4")
#'
#' @export
guessVideoFormat <- function(files) {
extensions <- tolower(tools::file_ext(files))
types <- video_formats[match(extensions, tolower(video_formats$extension)), "type"]
if (any(is.na(types))) {
message(
"Unable to guess file format types for the following files:\n",
paste(" -", files[is.na(types)], "\n"),
" Use `video_formats` to find the relevant type"
)
}
types
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/format.R
|
#' Add Language Support
#'
#' @description
#' Enabling languages (other than English) to appear as tooltips and other buttons in video.js widgets.
#'
#' @param video A \code{\link{video}}
#' @param languages A character vector of languages to support in the video.
#' See \code{availableVideoLanguages()} for a full list
#'
#' @return
#' An updated \code{video} with extra language support
#'
#' @details
#' If any languages are missing, you can add a separate script in the head of the application
#' that will apply the language to all videos. See \url{https://videojs.com/guides/languages/}
#' for more details
#'
#' @examples
#' video <- video("https://vjs.zencdn.net/v/oceans.mp4")
#' video <- addVideoLanguages(video, c("es", "fr", "de"))
#'
#' if (interactive()) {
#' library(shiny)
#'
#' ui <- fluidPage(lang = "fr", video)
#' server <- function(input, output) {}
#' shinyApp(ui, server)
#' }
#'
#' @importFrom stats setNames
#'
#' @rdname video-languages
#' @export
addVideoLanguages <- function(video, languages) {
missing_languages <- setdiff(languages, availableLanguages())
if (length(missing_languages)) {
stop(
"The following languages are currently unavailable for video.js: ",
paste(missing_languages, collapse = ", ")
)
}
video$x$options$languages <- setNames(lapply(languages, getLanguageText), languages)
video
}
#' @rdname video-languages
#' @export
availableLanguages <- function() {
lang_dir <- system.file("htmlwidgets/video/lang", package = "video")
unique(sub("\\..*", "", list.files(lang_dir)))
}
getLanguageText <- function(language) {
jsonlite::fromJSON(
system.file(file.path("htmlwidgets/video/lang", paste0(language, ".json")), package = "video")
)
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/language.R
|
#' Update video.js Server-Side
#'
#' @description
#' Change the state of the video player from the server.
#'
#' \code{playVideo}, \code{pauseVideo} and \code{stopVideo} will all be applied to the current video.
#'
#' \code{changeVideo} will update the track to the URL or file specified.
#'
#' \code{updatePlaybackRate} will change how fast the video is playing.
#'
#' @param session Shiny session
#' @param id ID of the \code{video} to update
#'
#' @return
#' Updates the the state of the specified \code{video} in the shiny application.
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#'
#' ui <- fluidPage(
#' title = "howler.js Player",
#' video(
#' "https://vjs.zencdn.net/v/oceans.mp4",
#' elementId = "video"
#' ),
#' actionButton("pause", "Pause Video")
#' )
#'
#' server <- function(input, output) {
#' observeEvent(input$pause, pauseVideo("video"))
#' }
#'
#' runShiny(ui, server)
#' }
#'
#' @name video-server
#' @rdname video-server
#' @export
playVideo <- function(id, session = getDefaultReactiveDomain()) {
session$sendCustomMessage("playVideo", id)
}
#' @rdname video-server
#' @export
pauseVideo <- function(id, session = getDefaultReactiveDomain()) {
session$sendCustomMessage("pauseVideo", id)
}
#' @rdname video-server
#' @export
stopVideo <- function(id, session = getDefaultReactiveDomain()) {
session$sendCustomMessage("stopVideo", id)
}
#' @param seek Time (in seconds) to set the position of the track
#' @rdname video-server
#' @export
seekVideo <- function(id, seek, session = getDefaultReactiveDomain()) {
session$sendCustomMessage("seekVideo", list(id = id, seek = seek))
}
#' @param files A vector of file paths or URLs pointing
#' @param format An optional list of formats of \code{video}
#' @rdname video-server
#' @export
changeVideo <- function(id, files, format = NULL, session = getDefaultReactiveDomain()) {
if (is.null(format)) {
format <- guessVideoFormat(files)
} else if (length(format) != length(files)) {
stop("Files is not the same length as format")
}
sources <- lapply(seq(files), function(x) list(src = files[x], type = format[x]))
session$sendCustomMessage("changeVideo", list(id = id, src = sources))
}
#' @param playrate Speed of playback of the video. Default is set to 1 (normal speed)
#' @rdname video-server
#' @export
updatePlaybackRate <- function(id, playrate = 1, session = getDefaultReactiveDomain()) {
session$sendCustomMessage("setVideoPlayrate", list(id = id, playrate = playrate))
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/server-side.R
|
#' Add Text Tracks to Video
#'
#' @description
#' video.js contains the ability to include tracks with the video, including subtitles,
#' captions and descriptions. \code{includeTextTracks} will make sure that they are
#' included on load, and find the defaults to embed with the video.
#'
#' @param video A \code{\link{video}()}
#' @param files A vector of WebVTT files that contain "cues" of when text should appear,
#' hide and what text to display
#' @param language The valid BCP 47 code for the language of the text track,
#' e.g. "en" for English or "es" for Spanish.
#' @param label Short descriptive text for the track that will used in the user interface.
#' For example, in a menu for selecting a captions language.
#' @param kind An optional vector to match the type of text tracks in \code{files}:
#' \describe{
#' \item{subtitles}{(default): Translations of the dialogue in the video for when audio is
#' available but not understood. Subtitles are shown over the video.}
#' \item{captions}{Transcription of the dialogue, sound effects, musical cues, and other
#' audio information for viewer who are deaf/hard of hearing, or the video is muted.
#' Captions are also shown over the video.}
#' \item{chapters}{Chapter titles that are used to create navigation within the video.
#' Typically, these are in the form of a list of chapters that the viewer can use to
#' navigate the video.}
#' \item{descriptions}{Text descriptions of the action in the content for when the video portion
#' isn't available or because the viewer is blind or not using a screen. Descriptions are
#' read by a screen reader or turned into a separate audio track.}
#' \item{metadata}{Tracks that have data meant for JavaScript to parse and do something with.
#' These aren't shown to the user.}
#' }
#' @param default The boolean \code{default} attribute can be used to indicate that a track's
#' mode should start as "showing". Otherwise, the viewer would need to select their language
#' from a captions or subtitles menu.
#'
#' @details
#' All vectors must either be the same length as \code{files} or of length 1. In the latter,
#' they will be applied to all \code{files} supplied.
#'
#' @return
#' An updated \code{video} with text tracks included
#'
#' @examples
#' vid <- video("https://vjs.zencdn.net/v/oceans.mp4")
#' includeTextTracks(vid, "url/to/subtitles.vtt")
#'
#' @export
includeTextTracks <- function(video, files, language = "en", label = "English",
kind = "subtitles", default = FALSE) {
if (length(files) == 0) {
stop("No text tracks provided to add")
}
n_files <- length(files)
language <- checkLength(language, n_files, "language")
label <- checkLength(label, n_files, "label")
kind <- checkLength(kind, n_files, "kind")
default <- checkLength(default, n_files, "default")
remote_tracks <- lapply(seq(files), function(x) {
track_info <- list(
src = files[x],
kind = kind[x],
srclang = language[x],
label = label[x]
)
if (isTRUE(default[x])) {
track_info$default <- NA
}
track_info
})
video$x$options$tracks <- remote_tracks
video
}
checkLength <- function(x, n, label) {
if (length(x) == 1) {
rep(x, n)
} else if (length(x) != n) {
stop("The size of `", label, "` must either be 1 or number of files (", n, ")")
} else {
x
}
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/text_track.R
|
#' Video Player
#'
#' @description
#' A video player that can be embedded in HTML pages.
#'
#' @param files A vector of file paths or URLs pointing
#' @param format An optional list of formats of \code{video}
#' @param options A named list of options to apply to the video. List of available options
#' available in Details
#' @param seek_ping_rate Number of milliseconds between each update of `input$\{id\}_seek` while playing. Default is
#' set to 1000. If set to 0, then `input$\{id\}_seek` will not exist.
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended. Use \code{NA} for it to use
#' the original video width/height.
#' @param elementId HTML id tag to be given to the video player element
#'
#' @return
#' A shiny.tag containing all of the required options for a \code{videojs} JS object to be initialised in a shiny application.
#'
#' On the server side there will be up to four additional objects available as inputs:
#' \describe{
#' \item{\code{\{id\}_playing}}{A logical value as to whether or not the \code{video} is playing audio}
#' \item{\code{\{id\}_seek}}{(If \code{seek_ping_rate > 0}) the current time of the track loaded}
#' \item{\code{\{id\}_duration}}{The duration of the track loaded}
#' }
#'
#' @details
#' Here are some more common options to implement:
#' \describe{
#' \item{autoplay}{
#' Whether or not the video will autoplay on load. NOTE: There is not a guarantee autoplay will work in the browser.
#' \describe{
#' \item{\code{FALSE}}{Default: Video won't autoplay}
#' \item{\code{TRUE}}{Video will use browser's autoplay}
#' \item{\code{"muted"}}{Will mute the video and then manually call \code{play()} on \code{loadstart()}. Likely to work on browsers}
#' \item{\code{"play"}}{Will call \code{play()} on \code{loadstart()}, similar to browser autoplay}
#' }
#' }
#' \item{controls}{
#' Determines whether or not the player has controls that the user can interact with. By default
#' \code{video} will include controls even if not specified in the options.
#' }
#' \item{poster}{
#' A URL to an image that displays before the video begins playing.
#' This is often a frame of the video or a custom title screen.
#' }
#' }
#'
#' For a full list of available options check out \url{https://videojs.com/guides/options/}
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#'
#' ui <- fluidPage(
#' title = "howler.js Player",
#' video("https://vjs.zencdn.net/v/oceans.mp4")
#' )
#'
#' server <- function(input, output) {
#' }
#'
#' runShiny(ui, server)
#' }
#'
#' @import shiny
#' @import htmlwidgets
#'
#' @export
video <- function(files, format = NULL, options = list(), seek_ping_rate = 1000,
width = NULL, height = NULL, elementId = NULL) {
if (is.null(format)) {
format <- guessVideoFormat(files)
} else if (length(format) != length(files)) {
stop("Files is not the same length as format")
}
sources <- lapply(seq(files), function(x) list(src = files[x], type = format[x]))
options <- append(
options, list(sources = sources)
)
if (!"controls" %in% names(options)) {
options$controls <- TRUE
}
settings <- list(
options = options,
seek_ping_rate = seek_ping_rate
)
htmlwidgets::createWidget(
name = "video",
x = settings,
width = width,
height = height,
package = "video",
elementId = elementId
)
}
#' Shiny bindings for video
#'
#' Output and render functions for using video within Shiny
#' applications and interactive Rmd documents.
#'
#' @param outputId output variable to read from
#' @param width,height Must be a valid CSS unit (like \code{'100\%'},
#' \code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
#' string and have \code{'px'} appended.
#' @param expr An expression that generates a video
#' @param env The environment in which to evaluate \code{expr}.
#' @param quoted Is \code{expr} a quoted expression (with \code{quote()})? This
#' is useful if you want to save an expression in a variable.
#'
#' @return
#' An output or render function that enables the use of the widget within Shiny applications.
#'
#' @name video-shiny
#' @export
videoOutput <- function(outputId, width = "100%", height = "400px"){
htmlwidgets::shinyWidgetOutput(outputId, "video", width, height, package = "video")
}
#' @rdname video-shiny
#' @export
renderVideo <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
htmlwidgets::shinyRenderWidget(expr, videoOutput, env, quoted = TRUE)
}
widget_html.video <- function(id, style, class, ...) {
tags$video(
id = id,
style = style,
class = paste("video-js", class),
...,
tags$p(
class = "vjs-no-js",
"To view this video please enable JavaScript, and consider upgrading to a web browser that",
tags$a(
href = "https://videojs.com/html5-video-support/",
target = "_blank",
"supports HTML5 video"
)
)
)
}
|
/scratch/gouwar.j/cran-all/cranData/video/R/video.R
|
---
title: "video"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{video}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<!-- badges: start -->
[](https://lifecycle.r-lib.org/articles/stages.html#experimental)
[](https://app.codecov.io/gh/ashbaldry/video?branch=main)
[](https://github.com/ashbaldry/video/actions)
<!-- badges: end -->
# {video} - Interactive Video Player <img src="https://raw.githubusercontent.com/ashbaldry/video/master/man/figures/logo.png" align="right" width="120"/>
`{video}` is a package that utilises the [video.js](https://github.com/videojs/video.js) library to play video on the modern web.
## Installation
This package is not yet available on CRAN. To install the latest version:
```r
install.packages("devtools")
devtools::install_github("ashbaldry/video")
```
## Usage
The HTML way to include an audio file in any shiny application/web page is to use the `<audio>` tag. This cannot (easily) be manipulated from the server.
```r
tags$video(src = "https://vjs.zencdn.net/v/oceans.mp4", type = "video/mp4", controls = NA)
```
video.js is a flexible video player that is more robust than the basic HTML5 video player, and can easily be manipulated from the server side of shiny applications.
```r
library(shiny)
library(video)
ui <- fluidPage(
title = "video Example",
h1("Video Example"),
video(
"https://vjs.zencdn.net/v/oceans.mp4",
elementId = "video"
),
tags$p(
"Currently playing:",
textOutput("video_playing", container = tags$strong, inline = TRUE)
)
)
server <- function(input, output, session) {
output$video_playing <- renderText({
if (isTRUE(input$video_playing)) "Yes" else "No"
})
observe({
req(input$video_seek)
if (round(input$video_seek) == 10) {
pauseVideo("video")
} else if (round(input$video_seek) == 20) {
stopVideo("video")
}
})
}
shinyApp(ui, server)
```
<video src="https://user-images.githubusercontent.com/8420419/175826808-83d03bfc-6ba1-49c6-8f86-4e40973b010d.mp4" type="movie/mp4" controls style="width: 100%"></video>
Whilst the buttons below the video aren't required for playing/pausing the video, they are linked to `observeEvent`s that send messages from the server to the video to update.
### Extending video.js
For those who want more from video.js and isn't currently available within {video}, then the API is very flexible (https://docs.videojs.com/), and any video can be retrieved in JavaScript using `const player = videojs("id")` and manipulated from there.
## Examples
All examples are available in the [Examples](https://github.com/ashbaldry/video/tree/main/inst/examples) directory and can be run locally by installing the `{video}` package:
- [Basic Player](https://github.com/ashbaldry/video/tree/main/inst/examples/basic)
- [Server-Side Controls](https://github.com/ashbaldry/video/tree/main/inst/examples/server)
|
/scratch/gouwar.j/cran-all/cranData/video/inst/doc/video.Rmd
|
library(shiny)
library(video)
ui <- fluidPage(
h1("Video Player"),
video("https://vjs.zencdn.net/v/oceans.mp4")
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
|
/scratch/gouwar.j/cran-all/cranData/video/inst/examples/basic/app.R
|
library(shiny)
library(video)
ui <- fluidPage(
title = "video Example",
h1("Video Example"),
video(
"https://vjs.zencdn.net/v/oceans.mp4",
elementId = "video"
),
tags$br(),
tags$br(),
tags$p(
"Currently playing:",
textOutput("video_playing", container = tags$strong, inline = TRUE)
),
tags$p(
"Duration:",
textOutput("video_seek", container = tags$strong, inline = TRUE),
"/",
textOutput("video_duration", container = tags$strong, inline = TRUE)
)
)
server <- function(input, output, session) {
output$video_playing <- renderText({
if (isTRUE(input$video_playing)) "Yes" else "No"
})
output$video_duration <- renderText({
sprintf(
"%02d:%02.0f",
input$video_duration %/% 60,
input$video_duration %% 60
)
})
output$video_seek <- renderText({
sprintf(
"%02d:%02.0f",
input$video_seek %/% 60,
input$video_seek %% 60
)
})
}
shinyApp(ui, server)
|
/scratch/gouwar.j/cran-all/cranData/video/inst/examples/full/app.R
|
library(shiny)
library(video)
ui <- fluidPage(
h1("Video Player"),
video(
elementId = "video",
files = c(
"https://vjs.zencdn.net/v/oceans.mp4",
"https://vjs.zencdn.net/v/oceans.webm",
"https://vjs.zencdn.net/v/oceans.ogv"
),
format = c(
"video/mp4",
"video/webm",
"video/ogg"
)
),
br(),
actionButton("play", "Play", icon("play")),
actionButton("pause", "Pause", icon("pause")),
actionButton("stop", "Stop", icon("stop")),
actionButton("seek", "Go to 10 secs"),
actionButton("change", "Change video")
)
server <- function(input, output, session) {
observeEvent(input$play, playVideo("video"))
observeEvent(input$pause, pauseVideo("video"))
observeEvent(input$stop, stopVideo("video"))
observeEvent(input$seek, seekVideo("video", 10))
observeEvent(input$change, {
changeVideo("video", "//d2zihajmogu5jn.cloudfront.net/elephantsdream/ed_hd.mp4")
playVideo("video")
})
}
shinyApp(ui, server)
|
/scratch/gouwar.j/cran-all/cranData/video/inst/examples/server/app.R
|
---
title: "video"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{video}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
<!-- badges: start -->
[](https://lifecycle.r-lib.org/articles/stages.html#experimental)
[](https://app.codecov.io/gh/ashbaldry/video?branch=main)
[](https://github.com/ashbaldry/video/actions)
<!-- badges: end -->
# {video} - Interactive Video Player <img src="https://raw.githubusercontent.com/ashbaldry/video/master/man/figures/logo.png" align="right" width="120"/>
`{video}` is a package that utilises the [video.js](https://github.com/videojs/video.js) library to play video on the modern web.
## Installation
This package is not yet available on CRAN. To install the latest version:
```r
install.packages("devtools")
devtools::install_github("ashbaldry/video")
```
## Usage
The HTML way to include an audio file in any shiny application/web page is to use the `<audio>` tag. This cannot (easily) be manipulated from the server.
```r
tags$video(src = "https://vjs.zencdn.net/v/oceans.mp4", type = "video/mp4", controls = NA)
```
video.js is a flexible video player that is more robust than the basic HTML5 video player, and can easily be manipulated from the server side of shiny applications.
```r
library(shiny)
library(video)
ui <- fluidPage(
title = "video Example",
h1("Video Example"),
video(
"https://vjs.zencdn.net/v/oceans.mp4",
elementId = "video"
),
tags$p(
"Currently playing:",
textOutput("video_playing", container = tags$strong, inline = TRUE)
)
)
server <- function(input, output, session) {
output$video_playing <- renderText({
if (isTRUE(input$video_playing)) "Yes" else "No"
})
observe({
req(input$video_seek)
if (round(input$video_seek) == 10) {
pauseVideo("video")
} else if (round(input$video_seek) == 20) {
stopVideo("video")
}
})
}
shinyApp(ui, server)
```
<video src="https://user-images.githubusercontent.com/8420419/175826808-83d03bfc-6ba1-49c6-8f86-4e40973b010d.mp4" type="movie/mp4" controls style="width: 100%"></video>
Whilst the buttons below the video aren't required for playing/pausing the video, they are linked to `observeEvent`s that send messages from the server to the video to update.
### Extending video.js
For those who want more from video.js and isn't currently available within {video}, then the API is very flexible (https://docs.videojs.com/), and any video can be retrieved in JavaScript using `const player = videojs("id")` and manipulated from there.
## Examples
All examples are available in the [Examples](https://github.com/ashbaldry/video/tree/main/inst/examples) directory and can be run locally by installing the `{video}` package:
- [Basic Player](https://github.com/ashbaldry/video/tree/main/inst/examples/basic)
- [Server-Side Controls](https://github.com/ashbaldry/video/tree/main/inst/examples/server)
|
/scratch/gouwar.j/cran-all/cranData/video/vignettes/video.Rmd
|
which_or_NA <- function(v) {
ind <- which(v)
if (length(ind) == 0) {ind <- NA_integer_}
return(ind)
}
convert_unicode_to_ascii <- function(string) {
old <- paste0("\u00C0\u00C1\u00C2\u00C3\u00C8\u00C9\u00CA\u00CC\u00CD\u00D2",
"\u00D3\u00D4\u00D5\u00D9\u00DA\u00DD\u00E0\u00E1\u00E2\u00E3",
"\u00E8\u00E9\u00EA\u00EC\u00ED\u00F2\u00F3\u00F4\u00F5\u00F9",
"\u00FA\u00FD\u0102\u0103\u0110\u0111\u0128\u0129\u0168\u0169",
"\u01A0\u01A1\u01AF\u01B0\u1EA0\u1EA1\u1EA2\u1EA3\u1EA4\u1EA5",
"\u1EA6\u1EA7\u1EA8\u1EA9\u1EAA\u1EAB\u1EAC\u1EAD\u1EAE\u1EAF",
"\u1EB0\u1EB1\u1EB2\u1EB3\u1EB4\u1EB5\u1EB6\u1EB7\u1EB8\u1EB9",
"\u1EBA\u1EBB\u1EBC\u1EBD\u1EBE\u1EBF\u1EC0\u1EC1\u1EC2\u1EC3",
"\u1EC4\u1EC5\u1EC6\u1EC7\u1EC8\u1EC9\u1ECA\u1ECB\u1ECC\u1ECD",
"\u1ECE\u1ECF\u1ED0\u1ED1\u1ED2\u1ED3\u1ED4\u1ED5\u1ED6\u1ED7",
"\u1ED8\u1ED9\u1EDA\u1EDB\u1EDC\u1EDD\u1EDE\u1EDF\u1EE0\u1EE1",
"\u1EE2\u1EE3\u1EE4\u1EE5\u1EE6\u1EE7\u1EE8\u1EE9\u1EEA\u1EEB",
"\u1EEC\u1EED\u1EEE\u1EEF\u1EF0\u1EF1\u1EF2\u1EF3\u1EF4\u1EF5",
"\u1EF6\u1EF7\u1EF8\u1EF9\u00d0")
new <- paste0("AAAAEEEIIO",
"OOOUUYaaaa",
"eeeiioooou",
"uyAaDdIIUu",
"OoUuAaAaAa",
"AaAaAaAaAa",
"AaAaAaAaEe",
"EeEeEeEeEe",
"EeEeIiIiOo",
"OoOoOoOoOo",
"OoOoOoOoOo",
"OoUuUuUuUu",
"UuUuUuYyYy",
"YyYyD")
return(chartr(old, new, string))
}
|
/scratch/gouwar.j/cran-all/cranData/vietnamcode/R/utils.R
|
#' Convert Vietnam provincial ID
#'
#' Converts Vietnam's provinces' names and ID across different formats. Handles
#' diacritics and different spellings.
#'
#' @param sourcevar Character vector that contains the codes or province names to be converted
#' @param origin String that indicates the coding scheme of origin
#' @param destination String that indicates the coding scheme of destination
#'
#' @keywords vietnamcode
#' @note Supports the following coding schemes:
#' \itemize{
#' \item{province_name}
#' \item{province_name_diacritics}
#' \item{enterprise_censusm enterprise_census_old, enterprise_census_c: }{the same as GSO code}
#' \item{pci: }{Provincial Competitiveness Index}
#' }
#'
#' @export
#' @examples
#' codes.of.origin <- vietnamcode::vietnamcode_data$province_name # Vector of values to be converted
#' vietnamcode(codes.of.origin, "province_name", "pci")
vietnamcode <- function(sourcevar,
origin = c("province_name",
"enterprise_census", "enterprise_census_old", "enterprise_census_c",
"pci"),
destination = c("province_name", "province_name_diacritics",
"enterprise_census", "enterprise_census_old", "enterprise_census_c",
"pci")) {
if (is.null(sourcevar))
stop("sourcevar cannot be NULL")
origin <- match.arg(origin)
destination <- match.arg(destination)
# Sanitize province name to lower case and ASCII
if (origin %in% c("province_name")) {
sourcevar <- tolower(convert_unicode_to_ascii(sourcevar))
# Multiple regex search
tmp <- sapply(stats::na.omit(vietnamcode::vietnamcode_data[["regex"]]), grepl,
unique(sourcevar), ignore.case = TRUE, perl = TRUE)
if (is.vector(tmp)) {
regex_index = which_or_NA(tmp)
} else if (is.matrix(tmp)) {
regex_index = apply(tmp, 1, which_or_NA)
}
match_table <- data.frame(source = unique(sourcevar),
regex_index = regex_index)
destination_index <- match_table[match(sourcevar, match_table[["source"]]),
"regex_index"]
} else {
destination_index <- match(sourcevar, vietnamcode::vietnamcode_data[[origin]])
}
return(vietnamcode::vietnamcode_data[destination_index, destination])
}
|
/scratch/gouwar.j/cran-all/cranData/vietnamcode/R/vietnamcode.R
|
#' Vietnam provinces ID
#'
#' @format A data frame
#'
#' \itemize{
#' \item{province_name}{province name}
#' \item{province_name}{province name with diacritics}
#' \item{pci}{used by the Provincial Competitiveness Index}
#' \item{enterprise_census}{used by the General Statistics Office (GSO) for the Enterprise Census}
#' \item{enterprise_census_old}{old GSO code}
#' \item{enterprise_census_c}{GSO code as character}
#' \item{regex}{Regular Expression to match different spellings}
#' }
"vietnamcode_data"
|
/scratch/gouwar.j/cran-all/cranData/vietnamcode/R/vietnamcode_data.R
|
#' @title
#' Convert characters from legacy Vietnamese encodings to UTF-8 encoding
#'
#' @param x data.frame, sf object, or character vector
#' @param from Text encoding of input x
#' @param to Text encoding of output
#' @param diacritics logical. Preserve diacritics (TRUE) or not (FALSE)?
# @param ... Additional arguments to gsubfn()
#'
#' @details
#' Many characters in legacy Vietnamese encodings (e.g. TCVN3, VPS, VISCII)
#' are not read correctly in R, particularly those with diacritics (accents). The particular
#' encodings don't seem to be supported by R, at least on many locales. When R reads them as if they have UTF-8
#' encoding, it will result in wrong characters being printed and garbled text (Mojibake - see vignette and examples below).
#'
#' This functions converts character vectors to from various Vietnamese legacy encodings to readable
#' Unicode characters in UTF-8 encoding. By default the function attempts the conversion from TCVN3 to Unicode
#' while preserving the diacritics, but also supports other Vietnamese encodings (TCVN3, VPS, VISCII - via argument \code{from}).
#' Currently VNI and VNU are not supported.
#'
#' It works on data frames, spatial objects (from the sf package), and character vectors.
#'
#' \code{diacritics = TRUE} will return characters with their diacritics. With \code{diacritics = FALSE},
#' the output will be ASCII letters without diacritics. Upper/lower case will be preserved regardless.
#'
#' The internal search and replace is performed by the \code{\link[gsubfn]{gsubfn}} function from the \pkg{gsubfn} package. It performs simple character replacements to fix the text.
#'
#' Currently the function converts from the Vietnamese encodings to Unicode, not vice versa. Please contact the maintainer
#' if the conversion from Unicode to Vietnamese encodings would be relevant for you.
#'
#' The character conversion table was adapted from \url{http://vietunicode.sourceforge.net/charset/}.
#'
#' @section Warning:
#' When printing a data frame with Unicode characters using the standard print method, the R console will show the Unicode escape characters (e.g. "<U+1EA3>") instead of the actual Unicode characters. This is a limitation of the R console. The data are correct and will show correctly when using e.g. View() or when printing columns as vectors.
#'
#' @return character string or data frame (depending on x)
#'
#' @export
#' @importFrom methods is
#' @importFrom utf8 as_utf8
#' @importFrom gsubfn gsubfn
#' @importFrom sf st_geometry
#' @importFrom sf st_drop_geometry
#' @importFrom sf st_set_geometry
#'
#'
#' @examples
#' # First we produce the wrongly formatted character string
#' # using Unicode symbols is only necessary to create a portable example in the R package
#' # you don't need to use Unicode characters like this in your data
#'
#' string <- c("Qu\u00B6ng Tr\u00DE", "An \u00A7\u00ABn", "Th\u00F5a Thi\u00AAn Hu\u00D5")
#'
#' # Below we have a look at the wrongly formatted character string.
#' # This is what it would look like when you load TCVN3 encoded data as UTF8
#' string
#'
#' # convert character vector from TCVN3 > UTF-8
#' decodeVN(string)
#' decodeVN(string, diacritics = FALSE)
#'
#' # # convert data frame columns from TCVN3 > UTF-8
#' df <- data.frame(id = c(1,2,3),
#' name = string)
#'
#' df_decode <- decodeVN(df)
#' df_decode
#' # NOTE: some characters may be displayed as unicode in the R console
#' # check the individual column to see if they are correct:
#' df_decode[,2]
#'
#' decodeVN(df, diacritics = FALSE)
#'
#' # using the built-in sample data
#' data(vn_samples)
#' decodeVN(vn_samples$TCVN3) # TCVN -> Unicode # TCVN3 -> Unicode
#' decodeVN(vn_samples$TCVN3, diacritics = FALSE) # TCVN3 -> Unicode (ASCII characters only)
#' decodeVN(vn_samples$VISCII, from = "VISCII") # VISCII -> Unicode
#'
#'
#' # Demonstration for sf object
#'
#' # create sf object (just for demonstration)
#' require(sf)
#' df_geom <- st_sfc(st_point(c(3,4)), st_point(c(10,11)), st_point(c(15,13)))
#' df_spatial <- st_set_geometry(df, df_geom)
#'
#' # convert Vietnamese characters
#' df_spatial_decode <- decodeVN(df_spatial)
#'
#' df_spatial_decode
#' df_spatial_decode$name
#'
#'
decodeVN <- function(x,
from = c("TCVN3", "VISCII", "VPS", "Unicode"), # "VNI", "VNU",
to = c("Unicode", "TCVN3", "VISCII", "VPS"), # "VNI"
diacritics = TRUE
) {
# if spatial objects, temporarily store spatial information
if(is(x, "sf")) {
spatial <- TRUE
x_geometry <- st_geometry(x) # temporarily store geometry column
x <- st_drop_geometry(x) # remove geometry column
} else {
spatial <- FALSE
}
if(inherits(x, "data.frame")) x <- as.data.frame(x) # for tibbles, sf, data.table
if(!class(x) %in% c("data.frame", "character")) stop("x must be a character vector or data.frame")
enc_table <- loadEncodingTableVN(version = 2)
from <- match.arg(from, choices = c("TCVN3", "VISCII", "VPS", "Unicode"))
to <- match.arg(to, choices = c("Unicode", "TCVN3", "VISCII", "VPS"))
if(isFALSE(diacritics) & to != "Unicode") stop("diacritics can only be FALSE when 'to' != 'Unicode'")
# set enginge for gsubfn
perl <- TRUE # the default sometimes led to unexpected results
if(diacritics) {
tmp <- as.list(enc_table[,to])
} else {
tmp <- as.list(enc_table$ASCII)
}
names(tmp) <- enc_table[, from]
if(is.data.frame(x)) {
char_cols <- which(sapply(x, typeof) %in% "character")
if(length(char_cols) == 0) stop("No character columns in x")
out <- x
for(i in char_cols){
out[,i] <- as_utf8(x[,i])
which_na <- which(is.na(out[,i]))
if(length(which_na) >= 1){
out[-which_na,i] <- gsubfn(".", replacement = tmp, x = out[-which_na ,i], perl = perl)
} else {
out[,i] <- gsubfn(".", replacement = tmp, x = out[ ,i], perl = perl)
}
}
if(spatial) {
out <- st_set_geometry(out, x_geometry) # assign geometry column again
}
}
if(is.character(x)) {
x <- as_utf8(x)
# if(from == "VNI"){ # doesn't yet work for all characters
# x_tmp <- x
# for(i in 1:length(tmp))
# x_tmp <- gsubfn(pattern = names(tmp)[i], replacement = tmp[[i]], x = x_tmp, perl = T)
# } else {
# }
which_na <- which(is.na(x))
if(length(which_na) >= 1){
out <- x
out[-which_na] <- gsubfn(".", replacement = tmp, x = x[-which_na], perl = T)
} else {
out <- gsubfn(".", replacement = tmp, x = x, perl = T)
}
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vietnameseConverter/R/decodeVN.R
|
#' Load conversion table for Vietnamese characters
#' @param version Version of this table (1 or 2) - 2 is currentlty being used
#' @return data.frame
#' @details
#' The table was adapted from \url{http://vietunicode.sourceforge.net/charset/}
#' @export
#'
loadEncodingTableVN <- function(version)
{
if(version == 1){
out <- structure(list(Unicode_Hex = c("U+00C0",
"U+00C1", "U+00C2", "U+00C3", "U+00C8", "U+00C9", "U+00CA", "U+00CC",
"U+00CD", "U+00D2", "U+00D3", "U+00D4", "U+00D5", "U+00D9", "U+00DA",
"U+00DD", "U+00E0", "U+00E1", "U+00E2", "U+00E3", "U+00E8", "U+00E9",
"U+00EA", "U+00EC", "U+00ED", "U+00F2", "U+00F3", "U+00F4", "U+00F5",
"U+00F9", "U+00FA", "U+00FD", "U+0102", "U+0103", "U+0110", "U+0111",
"U+0128", "U+0129", "U+0168", "U+0169", "U+01A0", "U+01A1", "U+01AF",
"U+01B0", "U+1EA0", "U+1EA1", "U+1EA2", "U+1EA3", "U+1EA4", "U+1EA5",
"U+1EA6", "U+1EA7", "U+1EA8", "U+1EA9", "U+1EAA", "U+1EAB", "U+1EAC",
"U+1EAD", "U+1EAE", "U+1EAF", "U+1EB0", "U+1EB1", "U+1EB2", "U+1EB3",
"U+1EB4", "U+1EB5", "U+1EB6", "U+1EB7", "U+1EB8", "U+1EB9", "U+1EBA",
"U+1EBB", "U+1EBC", "U+1EBD", "U+1EBE", "U+1EBF", "U+1EC0", "U+1EC1",
"U+1EC2", "U+1EC3", "U+1EC4", "U+1EC5", "U+1EC6", "U+1EC7", "U+1EC8",
"U+1EC9", "U+1ECA", "U+1ECB", "U+1ECC", "U+1ECD", "U+1ECE", "U+1ECF",
"U+1ED0", "U+1ED1", "U+1ED2", "U+1ED3", "U+1ED4", "U+1ED5", "U+1ED6",
"U+1ED7", "U+1ED8", "U+1ED9", "U+1EDA", "U+1EDB", "U+1EDC", "U+1EDD",
"U+1EDE", "U+1EDF", "U+1EE0", "U+1EE1", "U+1EE2", "U+1EE3", "U+1EE4",
"U+1EE5", "U+1EE6", "U+1EE7", "U+1EE8", "U+1EE9", "U+1EEA", "U+1EEB",
"U+1EEC", "U+1EED", "U+1EEE", "U+1EEF", "U+1EF0", "U+1EF1", "U+1EF2",
"U+1EF3", "U+1EF4", "U+1EF5", "U+1EF6", "U+1EF7", "U+1EF8", "U+1EF9"
), VNI_Hex = c("41 D8", "41 D9", "41 C2", "41 D5", "45 D8", "45 D9",
"45 C2", "CC", "CD", "4F D8", "4F D9", "4F C2", "4F D5", "55 D8",
"55 D9", "59 D9", "61 F8", "61 F9", "61 E2", "61 F5", "65 F8",
"65 F9", "65 E2", "EC", "ED", "6F F8", "6F F9", "6F E2", "6F F5",
"75 F8", "75 F9", "79 F9", "41 CA", "61 EA", "D1", "F1", "D3",
"F3", "55 D5", "75 F5", "D4", "F4", "D6", "F6", "41 CF", "61 EF",
"41 DB", "61 FB", "41 C1", "61 E1", "41 C0", "61 E0", "41 C5",
"61 E5", "41 C3", "61 E3", "41 C4", "61 E4", "41 C9", "61 E9",
"41 C8", "61 E8", "41 DA", "61 FA", "41 DC", "61 FC", "41 CB",
"61 EB", "45 CF", "65 EF", "45 DB", "65 FB", "45 D5", "65 F5",
"45 C1", "65 E1", "45 C0", "65 E0", "45 C5", "65 E5", "45 C3",
"65 E3", "45 C4", "65 E4", "C6", "E6", "D2", "F2", "4F CF", "6F EF",
"4F DB", "6F FB", "4F C1", "6F E1", "4F C0", "6F E0", "4F C5",
"6F E5", "4F C3", "6F E3", "4F C4", "6F E4", "D4 D9", "F4 F9",
"D4 D8", "F4 F8", "D4 DB", "F4 FB", "D4 D5", "F4 F5", "D4 CF",
"F4 EF", "55 CF", "75 EF", "55 DB", "75 FB", "D6 D9", "F6 F9",
"D6 D8", "F6 F8", "D6 DB", "F6 FB", "D6 D5", "F6 F5", "D6 CF",
"F6 EF", "59 D8", "79 F8", "CE", "EE", "59 DB", "79 FB", "59 D5",
"79 F5"), VPS_Hex = c("80", "C1", "C2", "82", "D7", "C9", "CA",
"B5", "B4", "BC", "B9", "D4", "BE", "A8", "DA", "DD", "E0", "E1",
"E2", "E3", "E8", "E9", "EA", "EC", "ED", "F2", "F3", "F4", "F5",
"F9", "FA", "9A", "88", "E6", "F1", "C7", "B8", "EF", "AC", "DB",
"F7", "D6", "D0", "DC", "2", "E5", "81", "E4", "83", "C3", "84",
"C0", "85", "C4", "1C", "C5", "3", "C6", "8D", "A1", "8E", "A2",
"8F", "A3", "F0", "A4", "4", "A5", "5", "CB", "DE", "C8", "FE",
"EB", "90", "89", "93", "8A", "94", "8B", "95", "CD", "6", "8C",
"B7", "CC", "10", "CE", "11", "86", "BD", "D5", "96", "D3", "97",
"D2", "98", "B0", "99", "87", "12", "B6", "9D", "A7", "9E", "A9",
"9F", "AA", "A6", "AB", "13", "AE", "14", "F8", "D1", "FB", "AD",
"D9", "AF", "D8", "B1", "BA", "1D", "BB", "15", "BF", "B2", "FF",
"19", "9C", "FD", "9B", "B3", "CF"), VISCII_Hex = c("C0", "C1",
"C2", "C3", "C8", "C9", "CA", "CC", "CD", "D2", "D3", "D4", "A0",
"D9", "DA", "DD", "E0", "E1", "E2", "E3", "E8", "E9", "EA", "EC",
"ED", "F2", "F3", "F4", "F5", "F9", "FA", "FD", "C5", "E5", "D0",
"F0", "CE", "EE", "9D", "FB", "B4", "BD", "BF", "DF", "80", "D5",
"C4", "E4", "84", "A4", "85", "A5", "86", "A6", "6", "E7", "87",
"A7", "81", "A1", "82", "A2", "2", "C6", "5", "C7", "83", "A3",
"89", "A9", "CB", "EB", "88", "A8", "8A", "AA", "8B", "AB", "8C",
"AC", "8D", "AD", "8E", "AE", "9B", "EF", "98", "B8", "9A", "F7",
"99", "F6", "8F", "AF", "90", "B0", "91", "B1", "92", "B2", "93",
"B5", "95", "BE", "96", "B6", "97", "B7", "B3", "DE", "94", "FE",
"9E", "F8", "9C", "FC", "BA", "D1", "BB", "D7", "BC", "D8", "FF",
"E6", "B9", "F1", "9F", "CF", "1E", "DC", "14", "D6", "19", "DB"
), TCVN3_Hex = c("41 B5", "41 B8", "A2", "41 B7", "45 CC", "45 D0",
"A3", "49 D7", "49 DD", "4F DF", "4F E3", "A4", "4F E2", "55 EF",
"55 F3", "59 FD", "B5", "B8", "A9", "B7", "CC", "D0", "AA", "D7",
"DD", "DF", "E3", "AB", "E2", "EF", "F3", "FD", "A1", "A8", "A7",
"AE", "49 DC", "DC", "55 F2", "F2", "A5", "AC", "A6", "AD", "41 B9",
"B9", "41 B6", "B6", "A2 CA", "CA", "A2 C7", "C7", "A2 C8", "C8",
"A2 C9", "C9", "A2 CB", "CB", "A1 BE", "BE", "A1 BB", "BB", "A1 BC",
"BC", "A1 BD", "BD", "A1 C6", "C6", "45 D1", "D1", "45 CE", "CE",
"45 CF", "CF", "A3 D5", "D5", "A3 D2", "D2", "A3 D3", "D3", "A3 D4",
"D4", "A3 D6", "D6", "49 D8", "D8", "49 DE", "DE", "4F E4", "E4",
"4F E1", "E1", "A4 E8", "E8", "A4 E5", "E5", "A4 E6", "E6", "A4 E7",
"E7", "A4 E9", "E9", "A5 ED", "ED", "A5 EA", "EA", "A5 EB", "EB",
"A5 EC", "EC", "A5 EE", "EE", "55 F4", "F4", "55 F1", "F1", "A6 F8",
"F8", "A6 F5", "F5", "A6 F6", "F6", "A6 F7", "F7", "A6 F9", "F9",
"59 FA", "FA", "59 FE", "FE", "59 FB", "FB", "59 FC", "FC"),
VIQR = c("A`", "A'", "A^", "A~", "E`", "E'", "E^", "I`",
"I'", "O`", "O'", "O^", "O~", "U`", "U'", "Y'", "a`", "a'",
"a^", "a~", "e`", "e'", "e^", "i`", "i'", "o`", "o'", "o^",
"o~", "u`", "u'", "y'", "A(", "a(", "DD", "dd", "I~", "i~",
"U~", "u~", "O+", "o+", "U+", "u+", "A.", "a.", "A?", "a?",
"A^'", "a^'", "A^`", "a^`", "A^?", "a^?", "A^~", "a^~", "A^.",
"a^.", "A('", "a('", "A(`", "a(`", "A(?", "a(?", "A(~", "a(~",
"A(.", "a(.", "E.", "e.", "E?", "e?", "E~", "e~", "E^'",
"e^'", "E^`", "e^`", "E^?", "e^?", "E^~", "e^~", "E^.", "e^.",
"I?", "i?", "I.", "i.", "O.", "o.", "O?", "o?", "O^'", "o^'",
"O^`", "o^`", "O^?", "o^?", "O^~", "o^~", "O^.", "o^.", "O+'",
"o+'", "O+`", "o+`", "O+?", "o+?", "O+~", "o+~", "O+.", "o+.",
"U.", "u.", "U?", "u?", "U+'", "u+'", "U+`", "u+`", "U+?",
"u+?", "U+~", "u+~", "U+.", "u+.", "Y`", "y`", "Y.", "y.",
"Y?", "y?", "Y~", "y~"), English_Name = c("LATIN CAPITAL LETTER A WITH GRAVE",
"LATIN CAPITAL LETTER A WITH ACUTE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX",
"LATIN CAPITAL LETTER A WITH TILDE", "LATIN CAPITAL LETTER E WITH GRAVE",
"LATIN CAPITAL LETTER E WITH ACUTE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX",
"LATIN CAPITAL LETTER I WITH GRAVE", "LATIN CAPITAL LETTER I WITH ACUTE",
"LATIN CAPITAL LETTER O WITH GRAVE", "LATIN CAPITAL LETTER O WITH ACUTE",
"LATIN CAPITAL LETTER O WITH CIRCUMFLEX", "LATIN CAPITAL LETTER O WITH TILDE",
"LATIN CAPITAL LETTER U WITH GRAVE", "LATIN CAPITAL LETTER U WITH ACUTE",
"LATIN CAPITAL LETTER Y WITH ACUTE", "LATIN SMALL LETTER A WITH GRAVE",
"LATIN SMALL LETTER A WITH ACUTE", "LATIN SMALL LETTER A WITH CIRCUMFLEX",
"LATIN SMALL LETTER A WITH TILDE", "LATIN SMALL LETTER E WITH GRAVE",
"LATIN SMALL LETTER E WITH ACUTE", "LATIN SMALL LETTER E WITH CIRCUMFLEX",
"LATIN SMALL LETTER I WITH GRAVE", "LATIN SMALL LETTER I WITH ACUTE",
"LATIN SMALL LETTER O WITH GRAVE", "LATIN SMALL LETTER O WITH ACUTE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX", "LATIN SMALL LETTER O WITH TILDE",
"LATIN SMALL LETTER U WITH GRAVE", "LATIN SMALL LETTER U WITH ACUTE",
"LATIN SMALL LETTER Y WITH ACUTE", "LATIN CAPITAL LETTER A WITH BREVE",
"LATIN SMALL LETTER A WITH BREVE", "LATIN CAPITAL LETTER D WITH STROKE",
"LATIN SMALL LETTER D WITH STROKE", "LATIN CAPITAL LETTER I WITH TILDE",
"LATIN SMALL LETTER I WITH TILDE", "LATIN CAPITAL LETTER U WITH TILDE",
"LATIN SMALL LETTER U WITH TILDE", "LATIN CAPITAL LETTER O WITH HORN",
"LATIN SMALL LETTER O WITH HORN", "LATIN CAPITAL LETTER U WITH HORN",
"LATIN SMALL LETTER U WITH HORN", "LATIN CAPITAL LETTER A WITH DOT BELOW",
"LATIN SMALL LETTER A WITH DOT BELOW", "LATIN CAPITAL LETTER A WITH HOOK ABOVE",
"LATIN SMALL LETTER A WITH HOOK ABOVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER A WITH BREVE AND ACUTE",
"LATIN SMALL LETTER A WITH BREVE AND ACUTE", "LATIN CAPITAL LETTER A WITH BREVE AND GRAVE",
"LATIN SMALL LETTER A WITH BREVE AND GRAVE", "LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE",
"LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE", "LATIN CAPITAL LETTER A WITH BREVE AND TILDE",
"LATIN SMALL LETTER A WITH BREVE AND TILDE", "LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW",
"LATIN SMALL LETTER A WITH BREVE AND DOT BELOW", "LATIN CAPITAL LETTER E WITH DOT BELOW",
"LATIN SMALL LETTER E WITH DOT BELOW", "LATIN CAPITAL LETTER E WITH HOOK ABOVE",
"LATIN SMALL LETTER E WITH HOOK ABOVE", "LATIN CAPITAL LETTER E WITH TILDE",
"LATIN SMALL LETTER E WITH TILDE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER I WITH HOOK ABOVE",
"LATIN SMALL LETTER I WITH HOOK ABOVE", "LATIN CAPITAL LETTER I WITH DOT BELOW",
"LATIN SMALL LETTER I WITH DOT BELOW", "LATIN CAPITAL LETTER O WITH DOT BELOW",
"LATIN SMALL LETTER O WITH DOT BELOW", "LATIN CAPITAL LETTER O WITH HOOK ABOVE",
"LATIN SMALL LETTER O WITH HOOK ABOVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER O WITH HORN AND ACUTE",
"LATIN SMALL LETTER O WITH HORN AND ACUTE", "LATIN CAPITAL LETTER O WITH HORN AND GRAVE",
"LATIN SMALL LETTER O WITH HORN AND GRAVE", "LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE",
"LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE", "LATIN CAPITAL LETTER O WITH HORN AND TILDE",
"LATIN SMALL LETTER O WITH HORN AND TILDE", "LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW",
"LATIN SMALL LETTER O WITH HORN AND DOT BELOW", "LATIN CAPITAL LETTER U WITH DOT BELOW",
"LATIN SMALL LETTER U WITH DOT BELOW", "LATIN CAPITAL LETTER U WITH HOOK ABOVE",
"LATIN SMALL LETTER U WITH HOOK ABOVE", "LATIN CAPITAL LETTER U WITH HORN AND ACUTE",
"LATIN SMALL LETTER U WITH HORN AND ACUTE", "LATIN CAPITAL LETTER U WITH HORN AND GRAVE",
"LATIN SMALL LETTER U WITH HORN AND GRAVE", "LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE",
"LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE", "LATIN CAPITAL LETTER U WITH HORN AND TILDE",
"LATIN SMALL LETTER U WITH HORN AND TILDE", "LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW",
"LATIN SMALL LETTER U WITH HORN AND DOT BELOW", "LATIN CAPITAL LETTER Y WITH GRAVE",
"LATIN SMALL LETTER Y WITH GRAVE", "LATIN CAPITAL LETTER Y WITH DOT BELOW",
"LATIN SMALL LETTER Y WITH DOT BELOW", "LATIN CAPITAL LETTER Y WITH HOOK ABOVE",
"LATIN SMALL LETTER Y WITH HOOK ABOVE", "LATIN CAPITAL LETTER Y WITH TILDE",
"LATIN SMALL LETTER Y WITH TILDE"), ascii = c("A", "A", "A",
"A", "E", "E", "E", "I", "I", "O", "O", "O", "O", "U", "U",
"Y", "a", "a", "a", "a", "e", "e", "e", "i", "i", "o", "o",
"o", "o", "u", "u", "y", "A", "a", "D", "d", "I", "i", "U",
"u", "O", "o", "U", "u", "A", "a", "A", "a", "A", "a", "A",
"a", "A", "a", "A", "a", "A", "a", "A", "a", "A", "a", "A",
"a", "A", "a", "A", "a", "E", "e", "E", "e", "E", "e", "E",
"e", "E", "e", "E", "e", "E", "e", "E", "e", "I", "i", "I",
"i", "O", "o", "O", "o", "O", "o", "O", "o", "O", "o", "O",
"o", "O", "o", "O", "o", "O", "o", "O", "o", "O", "o", "O",
"o", "U", "u", "U", "u", "U", "u", "U", "u", "U", "u", "U",
"u", "U", "u", "Y", "y", "Y", "y", "Y", "y", "Y", "y")), row.names = c(NA,
-134L), class = "data.frame") #out[,1] <- as_utf8(out[,1])
out$Unicode <- intToUtf8(gsub("U+", "0x", out$Unicode_Hex, fixed = T), multiple = TRUE)
out$VNI <- sapply(strsplit(out$VNI_Hex, " "), FUN = function(x) {
intToUtf8(paste0("0x", x))})
out$VPS <- sapply(strsplit(out$VPS_Hex, " "), FUN = function(x) {
intToUtf8(paste0("0x", x))})
out$VISCII <- sapply(strsplit(out$VISCII_Hex, " "), FUN = function(x) {
intToUtf8(paste0("0x", x))})
out$TCVN3 <- sapply(strsplit(out$TCVN3_Hex, " "), FUN = function(x) {
intToUtf8(paste0("0x", x))})
}
if(version == 2){
out <- structure(list(Unicode_Hex = c("U+00C0", "U+00C1", "U+00C2",
"U+00C3", "U+00C8", "U+00C9", "U+00CA", "U+00CC", "U+00CD", "U+00D2",
"U+00D3", "U+00D4", "U+00D5", "U+00D9", "U+00DA", "U+00DD", "U+00E0",
"U+00E1", "U+00E2", "U+00E3", "U+00E8", "U+00E9", "U+00EA", "U+00EC",
"U+00ED", "U+00F2", "U+00F3", "U+00F4", "U+00F5", "U+00F9", "U+00FA",
"U+00FD", "U+0102", "U+0103", "U+0110", "U+0111", "U+0128", "U+0129",
"U+0168", "U+0169", "U+01A0", "U+01A1", "U+01AF", "U+01B0", "U+1EA0",
"U+1EA1", "U+1EA2", "U+1EA3", "U+1EA4", "U+1EA5", "U+1EA6", "U+1EA7",
"U+1EA8", "U+1EA9", "U+1EAA", "U+1EAB", "U+1EAC", "U+1EAD", "U+1EAE",
"U+1EAF", "U+1EB0", "U+1EB1", "U+1EB2", "U+1EB3", "U+1EB4", "U+1EB5",
"U+1EB6", "U+1EB7", "U+1EB8", "U+1EB9", "U+1EBA", "U+1EBB", "U+1EBC",
"U+1EBD", "U+1EBE", "U+1EBF", "U+1EC0", "U+1EC1", "U+1EC2", "U+1EC3",
"U+1EC4", "U+1EC5", "U+1EC6", "U+1EC7", "U+1EC8", "U+1EC9", "U+1ECA",
"U+1ECB", "U+1ECC", "U+1ECD", "U+1ECE", "U+1ECF", "U+1ED0", "U+1ED1",
"U+1ED2", "U+1ED3", "U+1ED4", "U+1ED5", "U+1ED6", "U+1ED7", "U+1ED8",
"U+1ED9", "U+1EDA", "U+1EDB", "U+1EDC", "U+1EDD", "U+1EDE", "U+1EDF",
"U+1EE0", "U+1EE1", "U+1EE2", "U+1EE3", "U+1EE4", "U+1EE5", "U+1EE6",
"U+1EE7", "U+1EE8", "U+1EE9", "U+1EEA", "U+1EEB", "U+1EEC", "U+1EED",
"U+1EEE", "U+1EEF", "U+1EF0", "U+1EF1", "U+1EF2", "U+1EF3", "U+1EF4",
"U+1EF5", "U+1EF6", "U+1EF7", "U+1EF8", "U+1EF9"), ASCII = c("A",
"A", "A", "A", "E", "E", "E", "I", "I", "O", "O", "O", "O", "U",
"U", "Y", "a", "a", "a", "a", "e", "e", "e", "i", "i", "o", "o",
"o", "o", "u", "u", "y", "A", "a", "D", "d", "I", "i", "U", "u",
"O", "o", "U", "u", "A", "a", "A", "a", "A", "a", "A", "a", "A",
"a", "A", "a", "A", "a", "A", "a", "A", "a", "A", "a", "A", "a",
"A", "a", "E", "e", "E", "e", "E", "e", "E", "e", "E", "e", "E",
"e", "E", "e", "E", "e", "I", "i", "I", "i", "O", "o", "O", "o",
"O", "o", "O", "o", "O", "o", "O", "o", "O", "o", "O", "o", "O",
"o", "O", "o", "O", "o", "O", "o", "U", "u", "U", "u", "U", "u",
"U", "u", "U", "u", "U", "u", "U", "u", "Y", "y", "Y", "y", "Y",
"y", "Y", "y"), English_Name = c("LATIN CAPITAL LETTER A WITH GRAVE",
"LATIN CAPITAL LETTER A WITH ACUTE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX",
"LATIN CAPITAL LETTER A WITH TILDE", "LATIN CAPITAL LETTER E WITH GRAVE",
"LATIN CAPITAL LETTER E WITH ACUTE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX",
"LATIN CAPITAL LETTER I WITH GRAVE", "LATIN CAPITAL LETTER I WITH ACUTE",
"LATIN CAPITAL LETTER O WITH GRAVE", "LATIN CAPITAL LETTER O WITH ACUTE",
"LATIN CAPITAL LETTER O WITH CIRCUMFLEX", "LATIN CAPITAL LETTER O WITH TILDE",
"LATIN CAPITAL LETTER U WITH GRAVE", "LATIN CAPITAL LETTER U WITH ACUTE",
"LATIN CAPITAL LETTER Y WITH ACUTE", "LATIN SMALL LETTER A WITH GRAVE",
"LATIN SMALL LETTER A WITH ACUTE", "LATIN SMALL LETTER A WITH CIRCUMFLEX",
"LATIN SMALL LETTER A WITH TILDE", "LATIN SMALL LETTER E WITH GRAVE",
"LATIN SMALL LETTER E WITH ACUTE", "LATIN SMALL LETTER E WITH CIRCUMFLEX",
"LATIN SMALL LETTER I WITH GRAVE", "LATIN SMALL LETTER I WITH ACUTE",
"LATIN SMALL LETTER O WITH GRAVE", "LATIN SMALL LETTER O WITH ACUTE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX", "LATIN SMALL LETTER O WITH TILDE",
"LATIN SMALL LETTER U WITH GRAVE", "LATIN SMALL LETTER U WITH ACUTE",
"LATIN SMALL LETTER Y WITH ACUTE", "LATIN CAPITAL LETTER A WITH BREVE",
"LATIN SMALL LETTER A WITH BREVE", "LATIN CAPITAL LETTER D WITH STROKE",
"LATIN SMALL LETTER D WITH STROKE", "LATIN CAPITAL LETTER I WITH TILDE",
"LATIN SMALL LETTER I WITH TILDE", "LATIN CAPITAL LETTER U WITH TILDE",
"LATIN SMALL LETTER U WITH TILDE", "LATIN CAPITAL LETTER O WITH HORN",
"LATIN SMALL LETTER O WITH HORN", "LATIN CAPITAL LETTER U WITH HORN",
"LATIN SMALL LETTER U WITH HORN", "LATIN CAPITAL LETTER A WITH DOT BELOW",
"LATIN SMALL LETTER A WITH DOT BELOW", "LATIN CAPITAL LETTER A WITH HOOK ABOVE",
"LATIN SMALL LETTER A WITH HOOK ABOVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER A WITH BREVE AND ACUTE",
"LATIN SMALL LETTER A WITH BREVE AND ACUTE", "LATIN CAPITAL LETTER A WITH BREVE AND GRAVE",
"LATIN SMALL LETTER A WITH BREVE AND GRAVE", "LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE",
"LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE", "LATIN CAPITAL LETTER A WITH BREVE AND TILDE",
"LATIN SMALL LETTER A WITH BREVE AND TILDE", "LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW",
"LATIN SMALL LETTER A WITH BREVE AND DOT BELOW", "LATIN CAPITAL LETTER E WITH DOT BELOW",
"LATIN SMALL LETTER E WITH DOT BELOW", "LATIN CAPITAL LETTER E WITH HOOK ABOVE",
"LATIN SMALL LETTER E WITH HOOK ABOVE", "LATIN CAPITAL LETTER E WITH TILDE",
"LATIN SMALL LETTER E WITH TILDE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER I WITH HOOK ABOVE",
"LATIN SMALL LETTER I WITH HOOK ABOVE", "LATIN CAPITAL LETTER I WITH DOT BELOW",
"LATIN SMALL LETTER I WITH DOT BELOW", "LATIN CAPITAL LETTER O WITH DOT BELOW",
"LATIN SMALL LETTER O WITH DOT BELOW", "LATIN CAPITAL LETTER O WITH HOOK ABOVE",
"LATIN SMALL LETTER O WITH HOOK ABOVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE", "LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW",
"LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW", "LATIN CAPITAL LETTER O WITH HORN AND ACUTE",
"LATIN SMALL LETTER O WITH HORN AND ACUTE", "LATIN CAPITAL LETTER O WITH HORN AND GRAVE",
"LATIN SMALL LETTER O WITH HORN AND GRAVE", "LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE",
"LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE", "LATIN CAPITAL LETTER O WITH HORN AND TILDE",
"LATIN SMALL LETTER O WITH HORN AND TILDE", "LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW",
"LATIN SMALL LETTER O WITH HORN AND DOT BELOW", "LATIN CAPITAL LETTER U WITH DOT BELOW",
"LATIN SMALL LETTER U WITH DOT BELOW", "LATIN CAPITAL LETTER U WITH HOOK ABOVE",
"LATIN SMALL LETTER U WITH HOOK ABOVE", "LATIN CAPITAL LETTER U WITH HORN AND ACUTE",
"LATIN SMALL LETTER U WITH HORN AND ACUTE", "LATIN CAPITAL LETTER U WITH HORN AND GRAVE",
"LATIN SMALL LETTER U WITH HORN AND GRAVE", "LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE",
"LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE", "LATIN CAPITAL LETTER U WITH HORN AND TILDE",
"LATIN SMALL LETTER U WITH HORN AND TILDE", "LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW",
"LATIN SMALL LETTER U WITH HORN AND DOT BELOW", "LATIN CAPITAL LETTER Y WITH GRAVE",
"LATIN SMALL LETTER Y WITH GRAVE", "LATIN CAPITAL LETTER Y WITH DOT BELOW",
"LATIN SMALL LETTER Y WITH DOT BELOW", "LATIN CAPITAL LETTER Y WITH HOOK ABOVE",
"LATIN SMALL LETTER Y WITH HOOK ABOVE", "LATIN CAPITAL LETTER Y WITH TILDE",
"LATIN SMALL LETTER Y WITH TILDE"), Unicode_int = c(192L, 193L,
194L, 195L, 200L, 201L, 202L, 204L, 205L, 210L, 211L, 212L, 213L,
217L, 218L, 221L, 224L, 225L, 226L, 227L, 232L, 233L, 234L, 236L,
237L, 242L, 243L, 244L, 245L, 249L, 250L, 253L, 258L, 259L, 208L,
273L, 296L, 297L, 360L, 361L, 416L, 417L, 431L, 432L, 7840L,
7841L, 7842L, 7843L, 7844L, 7845L, 7846L, 7847L, 7848L, 7849L,
7850L, 7851L, 7852L, 7853L, 7854L, 7855L, 7856L, 7857L, 7858L,
7859L, 7860L, 7861L, 7862L, 7863L, 7864L, 7865L, 7866L, 7867L,
7868L, 7869L, 7870L, 7871L, 7872L, 7873L, 7874L, 7875L, 7876L,
7877L, 7878L, 7879L, 7880L, 7881L, 7882L, 7883L, 7884L, 7885L,
7886L, 7887L, 7888L, 7889L, 7890L, 7891L, 7892L, 7893L, 7894L,
7895L, 7896L, 7897L, 7898L, 7899L, 7900L, 7901L, 7902L, 7903L,
7904L, 7905L, 7906L, 7907L, 7908L, 7909L, 7910L, 7911L, 7912L,
7913L, 7914L, 7915L, 7916L, 7917L, 7918L, 7919L, 7920L, 7921L,
7922L, 7923L, 7924L, 7925L, 7926L, 7927L, 7928L, 7929L), TCVN3_int = list(
c(65L, 181L), c(65L, 184L), 162L, c(65L, 183L), c(69L, 204L
), c(69L, 208L), 163L, c(73L, 215L), c(73L, 221L), c(79L,
223L), c(79L, 227L), 164L, c(79L, 226L), c(85L, 239L), c(85L,
243L), c(89L, 253L), 181L, 184L, 169L, 183L, 204L, 208L,
170L, 215L, 221L, 223L, 227L, 171L, 226L, 239L, 243L, 253L,
161L, 168L, 167L, 174L, c(73L, 220L), 220L, c(85L, 242L),
242L, 165L, 172L, 166L, 173L, c(65L, 185L), 185L, c(65L,
182L), 182L, c(162L, 202L), 202L, c(162L, 199L), 199L, c(162L,
200L), 200L, c(162L, 201L), 201L, c(162L, 203L), 203L, c(161L,
190L), 190L, c(161L, 187L), 187L, c(161L, 188L), 188L, c(161L,
189L), 189L, c(161L, 198L), 198L, c(69L, 209L), 209L, c(69L,
206L), 206L, c(69L, 207L), 207L, c(163L, 213L), 213L, c(163L,
210L), 210L, c(163L, 211L), 211L, c(163L, 212L), 212L, c(163L,
214L), 214L, c(73L, 216L), 216L, c(73L, 222L), 222L, c(79L,
228L), 228L, c(79L, 225L), 225L, c(164L, 232L), 232L, c(164L,
229L), 229L, c(164L, 230L), 230L, c(164L, 231L), 231L, c(164L,
233L), 233L, c(165L, 237L), 237L, c(165L, 234L), 234L, c(165L,
235L), 235L, c(165L, 236L), 236L, c(165L, 238L), 238L, c(85L,
244L), 244L, c(85L, 241L), 241L, c(166L, 248L), 248L, c(166L,
245L), 245L, c(166L, 246L), 246L, c(166L, 247L), 247L, c(166L,
249L), 249L, c(89L, 250L), 250L, c(89L, 254L), 254L, c(89L,
251L), 251L, c(89L, 252L), 252L), VISCII_int = c(192L, 193L,
194L, 195L, 200L, 201L, 202L, 204L, 205L, 210L, 211L, 212L, NA,
217L, 218L, 221L, 224L, 225L, 226L, 227L, 232L, 233L, 234L, 236L,
237L, 242L, 243L, 244L, 245L, 249L, 250L, 253L, 197L, 229L, 208L,
240L, 206L, 238L, 157L, 251L, 180L, 189L, 191L, 223L, 8364L,
213L, 196L, 228L, 8222L, 164L, 8230L, 165L, 8224L, 166L, 6L,
231L, 8225L, 167L, 129L, 161L, 8218L, 162L, 2L, 198L, 5L, 199L,
402L, 163L, 8240L, 169L, 203L, 235L, 710L, 168L, 352L, 170L,
8249L, 171L, 338L, 172L, 141L, 173L, 381L, 174L, 8250L, 239L,
732L, 184L, 353L, 247L, 8482L, 246L, 143L, 175L, 144L, 176L,
8216L, 177L, 8217L, 178L, 8220L, 181L, 8226L, 190L, 8211L, 182L,
8212L, 183L, 179L, 222L, 8221L, 254L, 382L, 248L, 339L, 252L,
186L, 209L, 187L, 215L, 188L, 216L, 255L, 230L, 185L, 241L, 376L,
207L, 30L, 220L, 20L, 214L, 25L, 219L), VNI_int = list(c(65L,
216L), c(65L, 217L), c(65L, 194L), c(65L, 213L), c(69L, 216L),
c(69L, 217L), c(69L, 194L), 204L, 205L, c(79L, 216L), c(79L,
217L), c(79L, 194L), c(79L, 213L), c(85L, 216L), c(85L, 217L
), c(89L, 217L), c(97L, 248L), c(97L, 249L), c(97L, 226L),
c(97L, 245L), c(101L, 248L), c(101L, 249L), c(101L, 226L),
236L, 237L, c(111L, 248L), c(111L, 249L), c(111L, 226L),
c(111L, 245L), c(117L, 248L), c(117L, 249L), c(121L, 249L
), c(65L, 202L), c(97L, 234L), 209L, 241L, 211L, 243L, c(85L,
213L), c(117L, 245L), 212L, 244L, 214L, 246L, c(65L, 207L
), c(97L, 239L), c(65L, 219L), c(97L, 251L), c(65L, 193L),
c(97L, 225L), c(65L, 192L), c(97L, 224L), c(65L, 197L), c(97L,
229L), c(65L, 195L), c(97L, 227L), c(65L, 196L), c(97L, 228L
), c(65L, 201L), c(97L, 233L), c(65L, 200L), c(97L, 232L),
c(65L, 218L), c(97L, 250L), c(65L, 220L), c(97L, 252L), c(65L,
203L), c(97L, 235L), c(69L, 207L), c(101L, 239L), c(69L,
219L), c(101L, 251L), c(69L, 213L), c(101L, 245L), c(69L,
193L), c(101L, 225L), c(69L, 192L), c(101L, 224L), c(69L,
197L), c(101L, 229L), c(69L, 195L), c(101L, 227L), c(69L,
196L), c(101L, 228L), 198L, 230L, 210L, 242L, c(79L, 207L
), c(111L, 239L), c(79L, 219L), c(111L, 251L), c(79L, 193L
), c(111L, 225L), c(79L, 192L), c(111L, 224L), c(79L, 197L
), c(111L, 229L), c(79L, 195L), c(111L, 227L), c(79L, 196L
), c(111L, 228L), c(212L, 217L), c(244L, 249L), c(212L, 216L
), c(244L, 248L), c(212L, 219L), c(244L, 251L), 212:213,
244:245, c(212L, 207L), c(244L, 239L), c(85L, 207L), c(117L,
239L), c(85L, 219L), c(117L, 251L), c(214L, 217L), c(246L,
249L), c(214L, 216L), c(246L, 248L), c(214L, 219L), c(246L,
251L), 214:213, 246:245, c(214L, 207L), c(246L, 239L), c(89L,
216L), c(121L, 248L), 206L, 238L, c(89L, 219L), c(121L, 251L
), c(89L, 213L), c(121L, 245L)), VPS_int = c(8364L, 193L,
194L, 8218L, 215L, 201L, 202L, 181L, 180L, 188L, 185L, 212L,
190L, 168L, 218L, 221L, 224L, 225L, 226L, 227L, 232L, 233L, 234L,
236L, 237L, 242L, 243L, 244L, 245L, 249L, 250L, 353L, 710L, 230L,
241L, 199L, 184L, 239L, 172L, 219L, 247L, 214L, 208L, 220L, 2L,
229L, 129L, 228L, 402L, 195L, 8222L, 192L, 8230L, 196L, 28L,
197L, 3L, 198L, 141L, 161L, 381L, 162L, 143L, 163L, 240L, 164L,
4L, 165L, 5L, 203L, 222L, 200L, 254L, 235L, 144L, 8240L, 8220L,
352L, 8221L, 8249L, 8226L, 205L, 6L, 338L, 183L, 204L, 16L, 206L,
17L, 8224L, 189L, 213L, 8211L, 211L, 8212L, 210L, 732L, 176L,
8482L, 8225L, 18L, 182L, 157L, 167L, 382L, 169L, 376L, 170L,
166L, 171L, 19L, 174L, 20L, 248L, 209L, 251L, 173L, 217L, 175L,
216L, 177L, 186L, 29L, 187L, 21L, 191L, 178L, 255L, 25L, 339L,
253L, 8250L, 179L, 207L), VNU_int = c(129L, 8364L, 8222L, 195L,
200L, 201L, 8240L, 204L, 205L, 210L, 338L, 141L, 213L, 217L,
732L, 382L, 161L, 376L, 181L, 172L, 191L, 190L, 197L, 214L, 207L,
221L, 220L, 225L, 223L, 239L, 238L, 251L, 402L, 175L, 8225L,
189L, 296L, 216L, 360L, 241L, 381L, 231L, 339L, 245L, 7840L,
173L, 8218L, 168L, 8230L, 182L, 7846L, 183L, 7848L, 184L, 6L,
185L, 7852L, 186L, 7854L, 176L, 7856L, 177L, 7858L, 178L, 7860L,
179L, 7862L, 180L, 7864L, 194L, 7866L, 192L, 7868L, 193L, 7870L,
198L, 7872L, 203L, 7874L, 204L, 7876L, 205L, 7878L, 206L, 7880L,
215L, 7882L, 217L, 7884L, 224L, 7886L, 222L, 7888L, 226L, 7890L,
227L, 7892L, 228L, 7894L, 229L, 7896L, 230L, 7898L, 232L, 7900L,
233L, 143L, 234L, 7904L, 235L, 7906L, 236L, 7908L, 242L, 8482L,
240L, 157L, 246L, 7914L, 247L, 7916L, 248L, 7918L, 249L, 7920L,
250L, 7922L, 252L, 7924L, 255L, 7926L, 253L, 7928L, 254L)), row.names = c(NA,
-134L), class = "data.frame")
out$Unicode <- sapply(out$Unicode_int, intToUtf8)
out$TCVN3 <- sapply(out$TCVN3_int, intToUtf8)
out$VISCII <- sapply(out$VISCII_int, intToUtf8)
out$VNI <- sapply(out$VNI_int, intToUtf8)
out$VPS <- sapply(out$VPS_int, intToUtf8)
out$VNU <- sapply(out$VNU_int, intToUtf8)
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/vietnameseConverter/R/loadEncodingTableVN.R
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "viewpoly")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/app_config.R
|
#' The application server-side
#'
#' @param input,output,session Internal parameters for {shiny}.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
app_server <- function( input, output, session ) {
# Your application server logic
# Upload size
options(shiny.maxRequestSize=50000*1024^2)
## Start modules
datas <- callModule(mod_upload_server,
"upload_ui_1",
parent_session=session)
# QTL view
callModule(mod_qtl_view_server,
"qtl_view_ui_1",
loadMap = datas$loadMap,
loadQTL = datas$loadQTL,
parent_session=session)
# Genes view
callModule(mod_genes_view_server,
"genes_view_ui_1",
loadMap = datas$loadMap,
loadQTL = datas$loadQTL,
loadJBrowse_fasta = datas$loadJBrowse_fasta,
loadJBrowse_gff3 = datas$loadJBrowse_gff3,
loadJBrowse_vcf = datas$loadJBrowse_vcf,
loadJBrowse_align = datas$loadJBrowse_align,
loadJBrowse_wig = datas$loadJBrowse_wig,
parent_session=session)
# Map view
callModule(mod_map_view_server,
"map_view_ui_1",
loadMap = datas$loadMap,
loadQTL = datas$loadQTL,
parent_session=session)
# Hidecan view
callModule(mod_hidecan_view_server,
"hidecan_view_ui_1",
loadHidecan = datas$loadHidecan,
parent_session=session)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/app_server.R
|
#' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @import shinydashboard
#' @import markdown
#' @importFrom shiny NS tagList
#' @importFrom shinyjs extendShinyjs useShinyjs
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# Your application UI logic
dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
# Lab colors
tags$head(
tags$style(
HTML('
a.action-button {
color: #6c81c0;
}
a.action-button.red {
color: #6c81c0;
}
.navbar-static-top {background-color: green;}
.navbar-default .navbar-nav>.active>a {background-color: green;}
.body {
background-color: #22284c;
}
.box {margin-bottom: 40px;}
.box.box-solid > .box-header > .box-tools .btn {
position: relative;
bottom: 5px;
box-shadow: none;
}
.box.box-solid.box-primary > .box-header a,
.box.box-solid.box-primary > .box-header .btn {
color: #ffffff;
}
.box.box-solid.box-primary>.box-header {
height: 50px;
color:#fff;
background:#6c81c0
}
.box.box-solid.box-primary{
border-bottom-color:#6c81c0;
border-left-color:#6c81c0;
border-right-color:#6c81c0;
border-top-color:#6c81c0;
}
.box.box-solid.box-info>.box-header {
height: 50px;
color:#fff;
background:#22284c
}
.box.box-solid.box-info{
border-bottom-color:#22284c;
border-left-color:#22284c;
border-right-color:#22284c;
border-top-color:#22284c;
}
.box.box-solid.box-warning>.box-header {
color:#fff;
background:#a91021ff
}
.box.box-solid.box-warning{
border-bottom-color:#a91021ff;
border-left-color:#a91021ff;
border-right-color:#a91021ff;
border-top-color:#a91021ff;
}
'))),
tags$head(tags$style(HTML('.navbar-static-top {background-color: #22284c;}',
'.navbar-default .navbar-nav>.active>a {background-color: #22284c;}'))),
fluidPage(
navbarPage(
title = "VIEWpoly",
id = "viewpoly",
theme = shinythemes::shinytheme("flatly"), # <--- Specify theme here
tabPanel("About", value = "about",
includeMarkdown(system.file("ext", "about.Rmd", package = "viewpoly")),
tags$script(HTML("var header = $('.navbar > .container-fluid');
header.append('<div style=\"float:right\"><a href=\"https://www.polyploids.org/\"><img src=\"www/logo_white.png\" alt=\"alt\" style=\"float:right;width:120px;height:80px;padding-top:10px;padding-bottom:10px;\"> </a>`</div>');
console.log(header)")
),
),
tabPanel("Input data", value = "upload",
mod_upload_ui("upload_ui_1")),
tabPanel("QTL", value = "qtl",
mod_qtl_view_ui("qtl_view_ui_1")),
tabPanel("Genome", value = "genes",
mod_genes_view_ui("genes_view_ui_1")),
tabPanel("Map", value = "map",
mod_map_view_ui("map_view_ui_1")),
tabPanel("Hidecan", value = "hidecan",
mod_hidecan_view_ui("hidecan_view_ui_1"))
)
)
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'viewpoly'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/app_ui.R
|
#' Draws linkage map, parents haplotypes and marker doses
#' Adapted from MAPpoly
#'
#' @param left.lim covered window in the linkage map start position
#' @param right.lim covered window in the linkage map end position
#' @param ch linkage group ID
#' @param maps list containing a vector for each linkage group markers with marker positions (named with marker names)
#' @param ph.p1 list containing a data.frame for each group with parent 1 estimated phases. The data.frame contain the columns:
#' 1) Character vector with chromosome ID; 2) Character vector with marker ID;
#' 3 to (ploidy number)*2 columns with each parents haplotypes
#' @param ph.p2 list containing a data.frame for each group with parent 2 estimated phases. See ph.p1 parameter description.
#' @param d.p1 list containing a data.frame for each group with parent 1 dosages. The data.frame contain the columns:
#' 1) character vector with chromosomes ID;
#' 2) Character vector with markers ID; 3) Character vector with parent ID;
#' 4) numerical vector with dosage
#' @param d.p2 list containing a data.frame for each group with parent 2 dosages. See d.p1 parameter description
#' @param snp.names logical TRUE/FALSE. If TRUE it includes the marker names in the plot
#' @param software character defined from each software it comes from
#'
#' @return graphic representing selected section of a linkage group
#' @importFrom graphics legend
#'
#' @keywords internal
draw_map_shiny<-function(left.lim = 0, right.lim = 5, ch = 1,
maps.dist, ph.p1, ph.p2, d.p1, d.p2, snp.names=TRUE, software = NULL)
{
par <- lines <- points <- axis <- mtext <- text <- NULL
Set1 <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
Dark2 <- c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02", "#A6761D", "#666666")
setout <- c("#9E0142", "#BE2449", "#DA464C", "#EC6145", "#F7834D", "#FCAA5F", "#FDC877", "#FEE391",
"#FEF5AF", "#F7FCB3", "#E8F69C", "#CAE99D","#A6DBA4", "#7ECBA4", "#59B4AA", "#3B92B8", "#4470B1", "#5E4FA2")
ch <- as.numeric(ch)
ploidy <- dim(ph.p1[[1]])[2]
# if(is.character(ch))
# ch <- as.numeric(strsplit(ch, split = " ")[[1]][3])
if(software == "onemap"){
alleles <- unique(as.vector(sapply(ph.p1, function(x) unique(unlist(x)))))
alleles <- sort(unique(c(alleles, as.vector(sapply(ph.p2, function(x) unique(unlist(x)))))))
} else alleles <- unique(as.vector(ph.p1[[1]]))
if(length(alleles) < 3) var.col <- c("#E41A1C", "#377EB8") else var.col <- Set1[1:length(alleles)]
names(var.col) <- alleles
if(ploidy < 3) d.col <- c(NA, "#1B9E77", "#D95F02") else d.col<-c(NA, Dark2[1:ploidy])
names(d.col) <- 0:ploidy
d.col[1]<-NA
x <- maps.dist[[ch]]
lab <- names(maps.dist[[ch]])
zy <- seq(0, 0.5, length.out = ploidy) + 1.5
pp1 <- ph.p1[[ch]]
pp2 <- ph.p2[[ch]]
dp1 <- d.p1[[ch]]
dp2 <- d.p2[[ch]]
x1<-abs(left.lim - x)
x2<-abs(right.lim - x)
id.left<-which(x1==min(x1))[1]
id.right<-rev(which(x2==min(x2)))[1]
par(mai = c(0.5,0.15,0,0))
curx<-x[id.left:id.right]
exten <- curx
exten[1] <- exten[1] - 1
exten[length(exten)] <- exten[length(exten)] + 1
plot(x = exten,
y = rep(.5,length(curx)),
type = "n" ,
ylim = c(.1, 5.5),
#xlim = c(min(curx), max(curx)),
axes = FALSE)
lines(c(x[id.left], x[id.right]), c(.5, .5), lwd=15, col = "gray")
points(x = curx,
y = rep(.5,length(curx)),
xlab = "", ylab = "",
pch = "|", cex=1.5,
ylim = c(0,2))
axis(side = 1, line = -1)
mtext(text = "Distance (cM)", side = 1, adj = 1, line = 1)
#Parent 2
for(i in 1:ploidy)
{
lines(c(x[id.left], x[id.right]), c(zy[i], zy[i]), lwd=10, col = "gray")
points(x = seq(x[id.left], x[id.right], length.out = length(curx)),
y = rep(zy[i], length(curx)),
col = var.col[pp2[id.left:id.right,i]],
pch = 15,
cex = 2)
}
mtext(text = "Parent 2", side = 2, at = mean(zy), line = -3, font = 4, padj =1)
for(i in 1:ploidy)
mtext(letters[(2*ploidy):(ploidy+1)][i], at = zy[i], side = 2, line = -4, font = 1, padj =1)
connect.lines<-seq(x[id.left], x[id.right], length.out = length(curx))
for(i in 1:length(connect.lines))
lines(c(curx[i], connect.lines[i]), c(0.575, zy[1]-.05), lwd=0.3)
if(software == "mappoly") {
points(x = seq(x[id.left], x[id.right], length.out = length(curx)),
y = zy[ploidy]+0.05+dp2[id.left:id.right]/20,
col = d.col[as.character(dp2[id.left:id.right])],
pch = 19, cex = .7)
}
corners = par("usr")
par(xpd = TRUE)
text(x = corners[1]+.5, y = mean(zy[ploidy]+0.05+(1:ploidy/20)), "Doses")
#Parent 1
zy<-zy+1.1
for(i in 1:ploidy)
{
lines(c(x[id.left], x[id.right]), c(zy[i], zy[i]), lwd=10, col = "gray")
points(x = seq(x[id.left], x[id.right], length.out = length(curx)),
y = rep(zy[i], length(curx)),
col = var.col[pp1[id.left:id.right,i]],
pch = 15,
cex = 2)
}
mtext(text = "Parent 1", side = 2, at = mean(zy), line = -3, font = 4)
if(software == "mappoly") {
points(x = seq(x[id.left], x[id.right], length.out = length(curx)),
y = zy[ploidy]+0.05+dp1[id.left:id.right]/20,
col = d.col[as.character(dp1[id.left:id.right])],
pch = 19, cex = .7)
}
corners = par("usr")
par(xpd = TRUE)
text(x = corners[1]+.5, y = mean(zy[ploidy]+0.05+(1:ploidy/20)), "Doses")
if(snp.names)
text(x = seq(x[id.left], x[id.right], length.out = length(curx)),
y = rep(zy[ploidy]+0.05+.3, length(curx)),
labels = names(curx),
srt=90, adj = 0, cex = .7)
for(i in 1:ploidy)
mtext(letters[ploidy:1][i], at = zy[i], side = 2, line = -4, font = 1, padj =1)
legend("topleft", legend= c(alleles, "-"),
fill =c(var.col, "white"), horiz = TRUE,
box.lty=0, bg="transparent")
}
#' Gets summary information from map.
#' Adapted from MAPpoly
#'
#' @param left.lim covered window in the linkage map start position
#' @param right.lim covered window in the linkage map end position
#' @param ch linkage group ID
#' @param maps list containing a vector for each linkage group markers with marker positions (named with marker names)
#' @param d.p1 list containing a data.frame for each group with parent 1 dosages. The data.frame contain the columns:
#' 1) character vector with chromosomes ID;
#' 2) Character vector with markers ID; 3) Character vector with parent ID;
#' 4) numerical vector with dosage
#' @param d.p2 list containing a data.frame for each group with parent 2 dosages. See d.p1 parameter description
#'
#' @return list with linkage map information: doses; number snps by group; cM per snp; map size; number of linkage groups
#'
#'
#' @keywords internal
map_summary<-function(left.lim = 0, right.lim = 5, ch = 1,
maps, d.p1, d.p2){
ch <- as.numeric(ch)
# if(is.character(ch))
# ch <- as.numeric(strsplit(ch, split = " ")[[1]][3])
x <- maps[[ch]]
lab <- names(maps[[ch]])
ploidy = max(c(d.p1[[ch]], d.p2[[ch]]))
d.p1<-d.p2[[ch]]
d.p2<-d.p1[[ch]]
x1<-abs(left.lim - x)
x2<-abs(right.lim - x)
id.left<-which(x1==min(x1))[1]
id.right<-rev(which(x2==min(x2)))[1]
curx<-x[id.left:id.right]
w<-table(paste(d.p1[id.left:id.right], d.p2[id.left:id.right], sep = "-"))
M<-matrix(0, nrow = ploidy+1, ncol = ploidy+1, dimnames = list(0:ploidy, 0:ploidy))
for(i in as.character(0:ploidy))
for(j in as.character(0:ploidy))
M[i,j]<-w[paste(i,j,sep = "-")]
M[is.na(M)]<-0
return(list(doses = M,
number.snps = length(curx),
length = diff(range(curx)),
cM.per.snp = round(diff(range(curx))/length(curx), 3),
full.size = as.numeric(maps[[ch]][length(maps[[ch]])]),
number.of.lgs = length(maps)))
}
#' Summary maps - adapted from MAPpoly
#'
#' This function generates a brief summary table
#'
#' @param viewmap a list of objects of class \code{viewmap}
#' @param software character defined from each software it comes from
#'
#' @return a data frame containing a brief summary of all maps
#'
#' @author Gabriel Gesteira, \email{[email protected]}
#' @author Cristiane Taniguti, \email{[email protected]}
#'
#'
#' @keywords internal
summary_maps = function(viewmap, software = NULL){
max_gap <- sapply(viewmap$maps, function(x) max(diff(x$l.dist)))
if(software == "mappoly"){
simplex <- mapply(function(x,y) {
sum((x == 1 & y == 0) | (x == 0 & y == 1) |
(x == max(x) & y == (max(y) -1)) |
(x == (max(x) -1) & y == max(y)))
}, viewmap$d.p1, viewmap$d.p2)
double_simplex <- mapply(function(x,y) {
sum((x == 1 & y == 1) | (x == 3 & y == 3))
}, viewmap$d.p1, viewmap$d.p2)
results = data.frame("LG" = as.character(seq(1,length(viewmap$maps),1)),
"Genomic sequence" = as.character(unlist(lapply(viewmap$maps, function(x) paste(unique(x$g.chr), collapse = "-")))),
"Map length (cM)" = round(sapply(viewmap$maps, function(x) x$l.dist[length(x$l.dist)]),2),
"Markers/cM" = round(sapply(viewmap$maps, function(x) length(x$l.dist)/x$l.dist[length(x$l.dist)]),2),
"Simplex" = simplex,
"Double-simplex" = double_simplex,
"Multiplex" = sapply(viewmap$maps, function(x) length(x$mk.names)) - (simplex + double_simplex),
"Total" = sapply(viewmap$maps, function(x) length(x$mk.names)),
"Max gap" = round(max_gap,2),
check.names = FALSE, stringsAsFactors = F)
results = rbind(results, c('Total', NA, sum(as.numeric(results$`Map length (cM)`)),
round(mean(as.numeric(results$`Markers/cM`)),2),
sum(as.numeric(results$Simplex)),
sum(as.numeric(results$`Double-simplex`)),
sum(as.numeric(results$Multiplex)),
sum(as.numeric(results$Total)),
round(mean(as.numeric(results$`Max gap`)),2)))
} else if(software == "onemap"){
counts <- lapply(viewmap$d.p1, function(x)
as.data.frame(pivot_longer(as.data.frame(table(names(x))), cols = 2)[,-2]))
colnames(counts[[1]])[2] <- paste0("LG",1)
all_count <- counts[[1]]
for(i in 2:(length(counts))){
colnames(counts[[i]])[2] <- paste0("LG",i)
all_count <- full_join(all_count, counts[[i]], by="Var1")
}
rm.na <- as.matrix(all_count[,2:4])
rm.na[which(is.na(rm.na))] <- 0
all_count <- data.frame(marker_types = all_count[,1], rm.na)
all_count <- t(all_count)
colnames(all_count) <- all_count[1,]
all_count <- all_count[-1,]
all_count <- apply(all_count, 2, as.numeric)
LG = as.character(seq(1,length(viewmap$maps),1))
if(any(sapply(viewmap$maps, function(x) any(is.na(x$g.chr))))){
warning("There are missing genomic position information in at least one of the groups")
}
chr <- sapply(viewmap$maps, function(x) unique(x$g.chr[-which(is.na(x$g.chr))]))
if(is.list(chr)) {
warning("There are groups with combination of more than one genomic chromosome.")
chr[which(sapply(chr, length) >= 2)] <- NA
chr <- unlist(chr)
}
results1 = data.frame(LG,
"Genomic sequence" = chr,
"Map length (cM)" = round(sapply(viewmap$maps, function(x) x$l.dist[length(x$l.dist)]),2),
"Markers/cM" = round(sapply(viewmap$maps, function(x) length(x$l.dist)/x$l.dist[length(x$l.dist)]),2))
colnames(results1) <- c("LG", "Genomic sequence", "Map length (cM)", "Markers/cM")
results2 = data.frame("Total" = sapply(viewmap$maps, function(x) length(x$mk.names)),
"Max gap" = round(max_gap,2),
check.names = FALSE, stringsAsFactors = F)
results <- cbind(results1, all_count, results2)
results<- rbind(results, c("Total", "NA", apply(results[,3:ncol(results)], 2, sum)))
}
return(results)
}
#' Plot a genetic map - Adapted from MAPpoly
#'
#' This function plots a genetic linkage map(s)
#'
#' @param viewmap object of class \code{viewmap}
#'
#' @param horiz logical. If FALSE, the maps are plotted vertically with the first map to the left.
#' If TRUE (default), the maps are plotted horizontally with the first at the bottom
#'
#' @param col a vector of colors for the bars or bar components (default = 'lightgrey')
#' \code{ggstyle} produces maps using the default \code{ggplot} color palette
#'
#' @param title a title (string) for the maps (default = 'Linkage group')
#'
#' @return A \code{data.frame} object containing the name of the markers and their genetic position
#'
#' @author Marcelo Mollinari, \email{[email protected]}
#' @author Cristiane Taniguti, \email{[email protected]}
#'
#' @references
#' Mollinari, M., and Garcia, A. A. F. (2019) Linkage
#' analysis and haplotype phasing in experimental autopolyploid
#' populations with high ploidy level using hidden Markov
#' models, _G3: Genes, Genomes, Genetics_.
#' \doi{10.1534/g3.119.400378}
#'
#'
#' @keywords internal
plot_map_list <- function(viewmap, horiz = TRUE, col = "ggstyle", title = "Linkage group"){
axis <- NULL
map.list <- viewmap$maps
if(all(col == "ggstyle"))
col <- gg_color_hue(length(map.list))
if(length(col) == 1)
col <- rep(col, length(map.list))
z <- NULL
if(is.null(names(map.list)))
names(map.list) <- 1:length(map.list)
max.dist <- max(sapply(map.list, function(x) x$l.dist[length(x$l.dist)]))
if(horiz){
plot(0,
xlim = c(0, max.dist),
ylim = c(0,length(map.list)+1),
type = "n", axes = FALSE,
xlab = "Map position (cM)",
ylab = title)
axis(1)
for(i in 1:length(map.list)){
z <- rbind(z, data.frame(mrk = map.list[[i]]$mk.names,
LG = names(map.list)[i], pos = map.list[[i]]$l.dist))
plot_one_map(map.list[[i]]$l.dist, i = i, horiz = TRUE, col = col[i])
}
axis(2, at = 1:length(map.list), labels = names(map.list), lwd = 0, las = 2)
} else{
plot(0,
ylim = c(-max.dist, 0),
xlim = c(0,length(map.list)+1),
type = "n", axes = FALSE,
ylab = "Map position (cM)",
xlab = title)
x <- axis(2, labels = FALSE, lwd = 0)
axis(2, at = x, labels = abs(x))
for(i in 1:length(map.list)){
z <- rbind(z, data.frame(mrk = map.list[[i]]$mk.names,
LG = names(map.list)[i],pos = map.list[[i]]$l.dist))
plot_one_map(map.list[[i]]$l.dist, i = i, horiz = FALSE, col = col[i])
}
axis(3, at = 1:length(map.list), labels = names(map.list), lwd = 0, las = 2)
}
invisible(z)
}
#' Color pallet ggplot-like - Adapted from MAPpoly
#'
#' @param n number of colors
#'
#' @importFrom grDevices hcl col2rgb hsv rgb2hsv
#'
#'
#' @keywords internal
gg_color_hue <- function(n) {
x <- rgb2hsv(col2rgb("steelblue"))[, 1]
cols = seq(x[1], x[1] + 1, by = 1/n)
cols = cols[1:n]
cols[cols > 1] <- cols[cols > 1] - 1
return(hsv(cols, x[2], x[3]))
}
#' Plot a single linkage group with no phase - from MAPpoly
#'
#' @param x vector of genetic distances
#' @param i margins size
#' @param horiz logical TRUE/FALSE. If TRUE the map is plotted horizontally.
#' @param col color pallete to be used
#'
#' @keywords internal
plot_one_map <- function(x, i = 0, horiz = FALSE, col = "lightgray")
{
rect <- tail <- lines <- NULL
if(horiz)
{
rect(xleft = x[1], ybottom = i-0.25,
xright = tail(x,1), ytop = i+0.25,
col = col)
for(j in 1:length(x))
lines(x = c(x[j], x[j]), y = c(i-0.25, i+0.25), lwd = .5)
} else {
x <- -rev(x)
rect(xleft = i-0.25, ybottom = x[1],
xright = i+0.25, ytop = tail(x,1),
col = col)
for(j in 1:length(x))
lines(y = c(x[j], x[j]), x = c(i-0.25, i+0.25), lwd = .5)
}
}
#' Scatter plot relating linkage map and genomic positions
#'
#' @param viewmap object of class \code{viewmap}
#' @param group selected group ID
#' @param range.min minimum value of the selected position range
#' @param range.max maximum value of the selected position range
#'
#' @keywords internal
plot_cm_mb <- function(viewmap, group, range.min, range.max) {
l.dist <- g.dist <- high <- mk.names <- NULL
map.lg <- viewmap$maps[[as.numeric(group)]]
map.lg$high <- map.lg$g.dist
map.lg$high[round(map.lg$l.dist,5) < range.min | round(map.lg$l.dist,5) > range.max] <- "black"
map.lg$high[round(map.lg$l.dist,5) >= range.min & round(map.lg$l.dist,5) <= range.max] <- "red"
map.lg$high <- as.factor(map.lg$high)
p <- ggplot(map.lg, aes(x=l.dist, y = g.dist/1000,
colour = high,
text = paste("Marker:", mk.names, "\n",
"Genetic:", round(l.dist,2), "cM \n",
"Genomic:", g.dist/1000, "Mb"))) +
geom_point() + scale_color_manual(values=c('black','red')) +
theme(legend.position = "none") +
labs(x = "Linkage map (cM)", y = "Reference genome (Mb)") +
theme_bw()
return(p)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/functions_map.R
|
#' Logarithm of \emph{P}-value (LOP) profile plots. Modified version of QTLpoly function.
#'
#' Plots profiled logarithm of score-based \emph{P}-values (LOP) from individual or combined traits.
#'
#' @param profile data.frame with: pheno - phenotype ID; LOP - significance value for the QTL.
#' It can be LOP, LOD or DIC depending of the software used
#' @param qtl_info data.frame with: LG - linkage group ID; Pos - position in linkage map (cM);
#' Pheno - phenotype ID; Pos_lower - lower position of confidence interval;
#' Pos_upper - upper position of the confidence interval; Pval - QTL p-value; h2 - herdability
#' @param selected_mks data.frame with: LG - linkage group ID; mk - marker ID; pos - position in linkage map (cM)
#' @param pheno.col integer identifying phenotype
#' @param lgs.id integer identifying linkage group
#' @param by_range logical TRUE/FALSE. If TRUE range.min and range.max will set a colored window in the plot and the other positions will be gray.
#' If FALSE, range.min and range.max is ignored
#' @param range.min position in centimorgan defining the start of the colored window
#' @param range.max position in centimorgan defining the end of the colored window
#' @param plot logical TRUE/FALSE. If FALSE the function return a data.frame with information for \code{only_plot_profile} function.
#' If TRUE, it returns a ggplot graphic.
#' @param software character defining which software was used for QTL analysis. Currently support for: QTLpoly, diaQTL and polyqtlR.
#'
#' @import ggplot2
#' @import dplyr
#' @importFrom plotly TeX
#' @importFrom utils tail
#'
#' @return ggplot graphic (if plot == TRUE) or data.frame (if plot == FALSE) with information
#' from QTL significance profile
#'
#' @keywords internal
plot_profile <- function(profile, qtl_info, selected_mks, pheno.col = NULL,
lgs.id = NULL, by_range = TRUE, range.min = NULL,
range.max = NULL, plot=TRUE, software = NULL) {
pheno <- LG <- `Position (cM)` <- Trait <- INT <- . <- NULL
lgs.size <- selected_mks %>% group_by(.data$LG) %>% group_map(~ tail(.x, 1)) %>% do.call(rbind, .)
lgs.size <- lgs.size$pos
lines <- points <- thre <- map <- data.frame()
y.dat <- trait.names <- c()
count <- 0
nphe <- length(pheno.col)
LGS <- selected_mks$LG
POS <- selected_mks$pos
for(p in 1:nphe) { #lines
TRT <- rep(unique(profile$pheno)[pheno.col[p]], length(LGS))
SIG <- profile[which(profile$pheno == TRT),2]
lines <- rbind(lines, data.frame(TRT=as.factor(TRT), LGS=LGS, POS=POS, SIG=SIG))
}
count <- 0
y.dat <- c()
for(p in 1:nphe) { #points
trait.names <- unique(profile$pheno)[pheno.col[p]]
if(!is.null(qtl_info)) {
qtl_info.sub <- qtl_info %>% filter(pheno == trait.names) %>% filter(LG %in% lgs.id)
if(dim(qtl_info.sub)[1] > 0){
nqtls <- qtl_info.sub %>% summarize(n())
TRT <- qtl_info.sub$pheno
LGS <- qtl_info.sub$LG
POS <- qtl_info.sub$Pos
INF <- qtl_info.sub$Pos_lower
SUP <- qtl_info.sub$Pos_upper
PVAL <- qtl_info.sub[,6]
H2 <- qtl_info.sub$h2
if(!is.null(H2)){
points <- rbind(points, data.frame(TRT=TRT, LGS=LGS, POS=POS, INF=INF, SUP=SUP, PVAL = PVAL, H2 = round(H2,2)))
} else
points <- rbind(points, data.frame(TRT=TRT, LGS=LGS, POS=POS, INF=INF, SUP=SUP, PVAL = PVAL))
count <- count+1
y.dat <- c(y.dat, rep((-0.5*count), nqtls))
}
}
}
points <- cbind(points, y.dat)
# The axis name change according with software
y.lab <- colnames(profile)[2]
if(y.lab == "LOP") {
if(by_range){
y.lab <- "LOP"
} else {
y.lab <- expression(-log[10](italic(P)))
}
} else if(y.lab == "deltaDIC") {
lines$SIG <- -lines$SIG
y.lab <- "-\U0394 DIC"
}
# Filter group
if(!is.null(lgs.id)){
lines <- lines[which(lines$LGS %in% lgs.id),]
points <- points[which(points$LGS %in% lgs.id),]
}
# Interval
lines$INT <- NA
for(i in 1:dim(points)[1]){
idx <- which(lines$POS >= points$INF[i] &
lines$POS <= points$SUP[i] &
lines$LGS == points$LGS[i] &
lines$TRT == points$TRT[i])
lines$INT[idx] <- lines$POS[idx]
}
# Filter position
lines$range <- NA
if(!is.null(range.min)){
lines$range[which(lines$POS >= range.min & lines$POS <= range.max)] <- lines$SIG[which(lines$POS >= range.min & lines$POS <= range.max)]
lines$SIG[which(lines$POS > range.min & lines$POS < range.max)] <- NA
}
if(dim(points)[1] > 0){
dot.height <- data.frame(trt = unique(points$TRT), heigth = unique(points$y.dat))
y.dat.lines <- dot.height$heigth[match(lines$TRT, dot.height$trt)]
lines$y.dat <- y.dat.lines
colnames(points)[1:3] <- c("Trait", "LG", "Position (cM)")
} else lines$y.dat <- NA
colnames(lines) <- c("Trait", "LG", "Position (cM)", "SIG", "INT", "range","y.dat")
if(max(lgs.size[lgs.id]) > 200) cutx <- 150 else cutx <- 100
if(length(lgs.size[lgs.id]) > 10) {linesize <- 1} else {cutx <- 50; linesize <- 1.25}
lines$y.dat <- lines$y.dat + min(lines$SIG, na.rm = T)
points$y.dat <- points$y.dat + min(lines$SIG, na.rm = T)
scale.max <- round(max(lines$SIG[which(is.finite(lines$SIG))], na.rm = T),0)
scale.max <- scale.max*1.2
scale.min <- round(min(lines$SIG[which(is.finite(lines$SIG))], na.rm = T),0)
if(scale.max > 50) {
lines$y.dat <- lines$y.dat*3
points$y.dat <- points$y.dat*3
scale.each <- 10
} else scale.each = 2
if(plot){
if(by_range){
pl <- ggplot(data = lines, aes(x = `Position (cM)`, color = Trait)) +
facet_grid(.~LG, space = "free") +
{if(!all(is.na(lines$INT))) geom_path(data=lines, aes(x = INT, y = y.dat), colour = "black", na.rm = TRUE)} +
geom_line(data=lines, aes(y = range, color = Trait), linewidth=linesize, alpha=0.8, lineend = "round", na.rm = TRUE) +
geom_line(data=lines, aes(y = SIG, group = Trait), colour = "gray", linewidth=linesize, alpha=0.8, lineend = "round", na.rm = TRUE) +
scale_x_continuous(breaks=seq(0,max(lgs.size),cutx)) +
{if(dim(points)[1] > 0) geom_point(data=points, aes(y = y.dat, color = Trait), shape = 2, size = 2, stroke = 1, alpha = 0.8)} +
scale_y_continuous(breaks=seq(scale.min, scale.max,scale.each)) +
guides(color = guide_legend("Trait")) +
labs(y = y.lab, x = "Position (cM)", subtitle="Linkage group") +
theme_classic() + theme(plot.margin = margin(0.8,1,1.5,1.2, "cm"))
} else {
pl <- ggplot(data = lines, aes(x = `Position (cM)`, color = Trait, group=1)) +
facet_grid(.~LG, space = "free") +
{if(!all(is.na(lines$INT))) geom_path(data=lines, aes(x = INT, y =y.dat), colour = "black", na.rm = TRUE)} +
geom_line(data=lines, aes(y = SIG, color = Trait), linewidth=linesize, alpha=0.8, lineend = "round", na.rm = TRUE) +
scale_x_continuous(breaks=seq(0,max(lgs.size),cutx)) +
{if(dim(points)[1] > 0) geom_point(data=points, aes(y = y.dat, color = Trait), shape = 2, size = 2, stroke = 1, alpha = 0.8)} +
scale_y_continuous(breaks=seq(scale.min, scale.max, scale.each)) +
guides(color = guide_legend("Trait")) +
labs(y = y.lab, x = "Position (cM)", subtitle="Linkage group") +
theme_classic()
}
} else {
pl <- list(lines = lines, points =points, linesize = linesize,
cutx = cutx, y.lab = y.lab)
size <- table(pl$lines$Trait)[1]
pl$lines$x <- rep(1:size, length(table(pl$lines$Trait)))
pl$lines$x.int <- NA
pl$lines$x.int[which(!is.na(pl$lines$INT))] <- pl$lines$x[which(!is.na(pl$lines$INT))]
if(dim(points)[1] > 0){
all <- paste0(pl$lines$Trait, "_", round(pl$lines$`Position (cM)`,2), "_", pl$lines$LG)
point <- paste0(pl$points$Trait, "_", round(pl$points$`Position (cM)`,2), "_", pl$points$LG)
pl$points$x <- pl$lines$x[match(point, all)]
}
pl$lines$SIG[which(pl$lines$SIG == "Inf")] <- NA ## Bugfix!!!
}
return(pl)
}
#' Only the plot part of plot_profile function
#'
#' @param pl.in output object from \code{plot_profile} when plot == TRUE
#'
#' @return ggplot graphic with QTL significance profile
#'
#'
#' @keywords internal
only_plot_profile <- function(pl.in){
x <- x.int <- y.dat <- SIG <- Trait <- qtl <- NULL
vlines <- split(pl.in$lines$x, pl.in$lines$LG)
vlines <- sapply(vlines, function(x) x[1])
pl <- ggplot(data = pl.in$lines, aes(x = x)) +
{if(!all(is.na( pl.in$lines$INT))) geom_path(data= pl.in$lines, aes(x = x.int, y =y.dat), colour = "black", na.rm = TRUE)} +
geom_line(data=pl.in$lines, aes(y = SIG, color = Trait), linewidth=pl.in$linesize, alpha=0.8) +
#guides(color = guide_legend("Trait")) +
{if(dim(pl.in$points)[1] > 0) geom_point(data=pl.in$points, aes(y = y.dat, color = Trait), shape = 2, size = 2, stroke = 1, alpha = 0.8)} +
{if(length(vlines) > 1) geom_vline(xintercept=vlines, linetype="dashed", linewidth=.5, alpha=0.8, na.rm = TRUE)} + #threshold
labs(y = pl.in$y.lab, x = "Linkage group") +
annotate(x=vlines,y=+Inf,label= paste0("LG", names(vlines)),vjust=1, hjust= -0.1,geom="label") +
ylim(c(min(pl.in$lines$y.dat),max(pl.in$lines$SIG, na.rm = T) + 3)) +
theme_classic() + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank())
return(pl)
}
#' Get effects information
#'
#' @param qtl_info data.frame with: LG - linkage group ID; Pos - position in linkage map (cM);
#' Pheno - phenotype ID; Pos_lower - lower position of confidence interval;
#' Pos_upper - upper position of the confidence interval; Pval - QTL p-value; h2 - herdability
#' @param effects data.frame with: pheno - phenotype ID; qtl.id - QTL ID; haplo - haplotype ID; effect - haplotype effect value
#' @param pheno.col integer identifying phenotype
#' @param parents vector with parents ID
#' @param lgs vector of integers with linkage group ID of selected QTL/s
#' @param groups vector of integers with selected linkage group ID
#' @param position vector of centimorgan positions of selected QTL/s
#' @param software character defining which software was used for QTL analysis. Currently support for: QTLpoly, diaQTL and polyqtlR.
#' @param design character defining the graphic design. Options: `bar` - barplot of the effects;
#' `circle` - circular plot of the effects (useful to compare effects of different traits);
#' `digenic` - heatmap plotting sum of additive effects (bottom diagonal) and digenic effects (top diagonal) when present
#'
#' @return ggplot graphic
#'
#'
#' @importFrom tidyr pivot_longer
#' @importFrom dplyr filter `%>%`
#' @import ggplot2
#'
#' @keywords internal
data_effects <- function(qtl_info, effects, pheno.col = NULL,
parents = NULL,
lgs = NULL, groups = NULL, position = NULL,
software, design = c("bar", "circle", "digenic")) {
CI.lower <- CI.upper <- x <- y <- z <- Estimates <- LG <- unique.id <- NULL
x.axis <- haplo <- effect <- qtl.id <- Alleles <- . <- NULL
if(is.null(pheno.col)) {
pheno.col.n <- 1:length(unique(qtl_info$pheno))
} else {
pheno.col.n <- which(unique(qtl_info$pheno) %in% pheno.col)
}
if(software == "QTLpoly" | software == "diaQTL"){
if(software == "QTLpoly"){
ploidy <- max(nchar(effects$haplo))
if(is.null(parents)) {# Multi-population still not implemented
parents <- c("P1", "P2")
}
if(ploidy == 4){
p1_old <- c("a","b","c","d")
p2_old <- c("e","f","g", "h")
} else if(ploidy == 6){
p1_old <- c("a","b","c","d","e","f")
p2_old <- c("g", "h","i", "j","k","l")
}
duo <- expand.grid(c(p1_old, p2_old), c(p1_old, p2_old))
duo <- apply(duo, 1, function(x) paste0(sort(unique(x)),collapse = ""))
duo <- unique(duo)
p1 <- parents[1]
p2 <- parents[2]
p1_new <- paste0(p1,".",1:ploidy)
p2_new <- paste0(p2,".",1:ploidy)
duo_new <- expand.grid(c(p1_new, p2_new), c(p1_new, p2_new))
duo_new <- apply(duo_new, 1, function(x) paste0(sort(unique(x)),collapse = "x"))
duo_new <- unique(duo_new)
names(duo_new) <- duo
} else if(software == "diaQTL") {
get.size <- filter(effects, .data$pheno == unique(qtl_info$pheno)[1] & .data$qtl.id == 1 & !grepl("x",.data$haplo)) # issue if parents name has x: fixme!
ploidy = as.numeric(table(substring(unique(get.size$haplo), 1, nchar(unique(get.size$haplo)) -2))[1])
old.parents.names <- unique(substr(get.size$haplo,1,nchar(get.size$haplo)-2))
n.parents <- length(old.parents.names)
if(is.null(parents)) {
parents <- paste0("P", 1:n.parents)
} else {
if(length(parents) != n.parents)
stop(safeError(paste0("Your data set has", n.parents, " parental genotyopes. Please, provide a name for each one.")))
}
# Update parents names in effects data
for(z in 1:n.parents)
effects$haplo <- gsub(old.parents.names[z], parents[z], effects$haplo)
} else if(software == "polyqtlR"){
ploidy <- (dim(effects)[2] - 3)/2
if(is.null(parents)) {# Multi-population still not implemented
p1 <- "P1"
p2 <- "P2"
} else {
p1 <- parents[1]
p2 <- parents[2]
}
}
qtl_info.sub <- qtl_info %>% filter(.data$pheno %in% unique(qtl_info$pheno)[pheno.col.n]) %>%
filter(.data$Pos %in% position) %>% filter(.data$LG %in% lgs)
total <- split(qtl_info, qtl_info$pheno)
total <- lapply(total, function(x) paste0(x[,1], "_", x[,2], "_", x[,5]))
total <- total[match(unique(qtl_info$pheno), names(total))]
sub <- split(qtl_info.sub, qtl_info.sub$pheno)
sub <- lapply(sub, function(x) paste0(x[,1], "_", x[,2], "_", x[,5]))
sub <- sub[order(match(names(sub), pheno.col))]
group.idx <- list()
for(i in 1:length(pheno.col.n)){
idx <- match(names(sub)[i], names(total))
group.idx[[idx]] <- match(sub[[i]], total[[idx]])
}
plots2 <- all.additive <- list()
count <- count.p <- 1
for(p in pheno.col.n) {
effects.sub <- effects %>% filter(.data$pheno == unique(qtl_info$pheno)[p]) %>%
filter(.data$qtl.id %in% group.idx[[p]])
nqtl <- length(unique(effects.sub$qtl.id))
if(nqtl > 0) {
plots1 <- list()
count.q <- 1
for(q in group.idx[[p]]) {
data <- filter(effects.sub, qtl.id == q)
if(ploidy == 4) {
if(software == "diaQTL"){
if(any(data$type == "Digenic")){
data <- data.frame(Estimates=as.numeric(data$effect), CI.lower = data$CI.lower, CI.upper = data$CI.upper, Alleles=data$haplo, Parent=c(rep(parents, each = ploidy),rep(NA,dim(data)[1]-n.parents*ploidy)), Effects=c(rep("Additive",n.parents*ploidy),rep("Digenic",dim(data)[1]-n.parents*ploidy)))
} else {
data <- data.frame(Estimates=as.numeric(data$effect), CI.lower = data$CI.lower, CI.upper = data$CI.upper, Alleles=data$haplo, Parent=rep(parents, each = ploidy), Effects="Additive")
}
} else {
data <- data[1:36,]
data <- data.frame(Estimates=as.numeric(data$effect), Alleles=data$haplo, Parent=c(rep(p1,4),rep(p2,4),rep(p1,14),rep(p2,14)), Effects=c(rep("Additive",8),rep("Digenic",28)))
data$Alleles <- duo_new[match(data$Alleles, names(duo_new))]
}
} else if(ploidy == 6) {
#data <- data[-c(18:23,28:33,37:42,45:50,52:63,83:88,92:97,100:105,107:133,137:142,145:150,152:178,181:186,188:214,216:278,299:1763),] # fix me
data <- data[1:78,]
data <- data.frame(Estimates=as.numeric(data$effect), Alleles=data$haplo, Parent=c(rep(p1,6),rep(p2,6),rep(p1,33),rep(p2,33)), Effects=c(rep("Additive",12),rep("Digenic",66)))
data$Alleles <- duo_new[match(data$Alleles, names(duo_new))]
}
data$Parent <- factor(data$Parent, levels=unique(data$Parent))
if(design == "bar"){
if(software == "QTLpoly"){
lim <- max(abs(data[which(data$Effects == "Additive"),]$Estimates))
} else
lim <- max(abs(c(data[which(data$Effects == "Additive"),]$CI.lower, data[which(data$Effects == "Additive"),]$CI.upper)))
plot <- ggplot(data[which(data$Effects == "Additive"),], aes(x = Alleles, y = Estimates, fill = Estimates)) +
geom_bar(stat="identity") + ylim(c(-lim, lim)) +
{if(software == "diaQTL") geom_errorbar(aes(ymin=CI.lower, ymax=CI.upper), width=.2, position=position_dodge(.9))} +
scale_fill_gradient2(low = "red", high = "blue", guide = "none") +
labs(title=unique(qtl_info$pheno)[p], subtitle=paste("LG:", sapply(strsplit(sub[[count.p]][count.q], "_"), "[",1),
"Pos:", sapply(strsplit(sub[[count.p]][count.q], "_"), "[",2))) +
facet_wrap(. ~ Parent, scales="free_x", ncol = 2, strip.position="bottom") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5),
axis.text.x.bottom = element_text(hjust = 1, vjust = 0.5, angle = 90))
plots1[[q]] <- plot
} else if(design == "digenic"){
if(!all(is.na(data[which(data$Effects == "Digenic"),]$Estimates))){
temp <- do.call(rbind, strsplit(data$Alleles, "x"))
data$x <- temp[,1]
data$y <- temp[,2]
digenic.effects <- data[which(data$Effects == "Digenic"),]
additive.effects <- data[which(data$Effects == "Additive"),]
if(software == "QTLpoly") {
plot.data <- data.frame(x= c(digenic.effects$y),
y= c(digenic.effects$x),
z= c(additive.effects$Estimates[match(digenic.effects$x, additive.effects$Alleles)] +
additive.effects$Estimates[match(digenic.effects$y, additive.effects$Alleles)]))
} else {
plot.data <- data.frame(x= c(digenic.effects$x, digenic.effects$y),
y= c(digenic.effects$y, digenic.effects$x),
z= c(digenic.effects$Estimates, digenic.effects$Estimates+
additive.effects$Estimates[match(digenic.effects$x, additive.effects$Alleles)] +
additive.effects$Estimates[match(digenic.effects$y, additive.effects$Alleles)]))
}
plot = ggplot(data= plot.data,aes(x= x, y= y, fill= z)) +
geom_tile() + scale_fill_gradient2(name="") +
labs(title = paste("Trait:", unique(qtl_info$pheno)[p]),
subtitle = paste("LG:", sapply(strsplit(sub[[count.p]][count.q], "_"), "[",1),
"Pos:", sapply(strsplit(sub[[count.p]][count.q], "_"), "[",2))) +
theme_bw() + xlab("") + ylab("") +
theme(text = element_text(size=13),axis.text.x = element_text(angle = 90,vjust=0.5,hjust=1)) +
coord_fixed(ratio=1)
plots1[[q]] <- plot
} else plots1[[q]] <- NULL
} else if(design == "circle"){
additive.effects <- data[which(data$Effects == "Additive"),]
additive.effects$pheno <- unique(qtl_info$pheno)[p]
additive.effects$qtl_id <- q
additive.effects$LG <- qtl_info.sub$LG[count]
additive.effects$Pos <- qtl_info.sub$Pos[count]
additive.effects$Estimates <- additive.effects$Estimates/max(abs(additive.effects$Estimates)) # normalize to be between -1 and 1
all.additive[[count]] <- additive.effects
names(all.additive)[count] <- unique(additive.effects$LG)
count <- count + 1
plots1 <- NULL
}
count.q <- count.q + 1
}
plots2[[p]] <- plots1
}
count.p <- count.p + 1
}
if(design != "circle"){
p <- unlist(plots2, recursive = F)
nulls <- which(sapply(p, is.null))
if(length(nulls) > 0) p <- p[-nulls]
return(p)
} else {
all.additive <- lapply(all.additive, function(x) rbind(x, x[1,]))
all.additive <- do.call(rbind, all.additive)
all.additive$unique.id <- paste0(all.additive$pheno, "/ LG:", all.additive$LG, "/ Pos:", all.additive$Pos)
breaks <- c(-1,0,1)
lgs <- unique(all.additive$LG)
p <- list()
for(i in 1:length(lgs)){
p[[i]] <- all.additive %>% filter(LG == lgs[i]) %>%
ggplot(aes(x=Alleles, y=Estimates, group=unique.id, colour=unique.id, alpha = abs(Estimates))) +
geom_path(alpha =0.7, linewidth = 1.5) +
#geom_polygon(fill = NA, size =1, alpha = abs(data_temp$Estimates))+
geom_point(size=5) +
coord_radar() +
labs(title = paste0("LG", lgs[i])) +
annotate(x= 0,y=c(-1.3,breaks), label= round(c(NA, breaks),2),geom="text", na.rm = TRUE) +
theme_bw() +
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.title.x=element_blank(),
legend.title = element_blank()) + guides(alpha = "none")
}
return(p)
}
} else if(software == "polyqtlR"){
if(design == "circle" | design == "digenic"){
stop(safeError("Design option not available for: polyqtlR"))
} else {
effects.df <- effects %>% filter(.data$pheno %in% unique(qtl_info$pheno)[pheno.col.n]) %>%
filter(.data$LG %in% groups) %>% pivot_longer(cols = 4:ncol(.), names_to = "haplo", values_to = "effect")
effects.df <- effects.df %>% group_by(.data$haplo, .data$pheno) %>% mutate(x.axis = 1:n()) %>% ungroup() %>% as.data.frame()
vlines <- split(effects.df$x.axis, effects.df$LG)
vlines <- sapply(vlines, function(x) x[1])
p <- list()
for(i in 1:length(pheno.col.n)){
p[[i]] <- effects.df %>% filter(.data$pheno == unique(qtl_info$pheno)[pheno.col.n][i]) %>%
ggplot() +
geom_path(aes(x=x.axis, y=haplo, col = effect), linewidth = 5) +
scale_color_gradient2(low = "purple4", mid = "white",high = "seagreen") +
{if(length(vlines) > 1) geom_vline(xintercept=vlines, linetype="dashed", linewidth=.5, alpha=0.8, na.rm = TRUE)} +
labs(y = "Haplotype", x = "Linkage group", title = unique(qtl_info$pheno)[pheno.col.n][i]) +
annotate(x=vlines,y=+Inf,label= paste0("LG", names(vlines)),vjust= 1, hjust= -0.1,geom="label") +
coord_cartesian(ylim = c(1,8.5)) +
theme_classic() + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank(), legend.title = element_blank())
}
return(p)
}
}
}
#' Change ggplot coordinates to plot radar - From package see
#'
#' @param theta ariable to map angle to (x or y)
#' @param start offset of starting point from 12 o'clock in radians. Offset is applied clockwise or anticlockwise depending on value of direction.
#' @param direction 1, clockwise; -1, anticlockwise
#'
#'
#' @keywords internal
coord_radar <- function (theta = "x", start = 0, direction = 1) {
theta <- match.arg(theta, c("x", "y"))
r <- if (theta == "x") "y" else "x"
ggproto("CordRadar", CoordPolar, theta = theta, r = r, start = start,
direction = sign(direction),
is_linear = function(coord) TRUE)
}
#' Plot effects data
#'
#' @param data_effects.obj output of function \code{data_effects}
#' @param software character defining which software was used for QTL analysis. Currently support for: QTLpoly, diaQTL and polyqtlR.
#' @param design character defining the graphic design. Options: `bar` - barplot of the effects;
#' `circle` - circular plot of the effects (useful to compare effects of different traits);
#' `digenic` - heatmap plotting sum of additive effects (bottom diagonal) and digenic effects (top diagonal) when present
#'
#'
#' @keywords internal
plot_effects <- function(data_effects.obj, software,
design = c("bar", "circle", "digenic")){
if(software == "polyqtlR"){
p.t <- ggarrange(plotlist = data_effects.obj, common.legend = T, ncol = 1, legend = "right")
} else {
if(design == "circle"){
rows <- ceiling(length(data_effects.obj)/2)
if(rows == 0) rows <- 1
p.t <- ggarrange(plotlist = data_effects.obj, nrow = rows, ncol = 2)
} else {
rows <- ceiling(length(data_effects.obj)/4)
if(rows == 0) rows <- 1
p.t <- ggarrange(plotlist = data_effects.obj, nrow = rows, ncol = 4)
}
}
return(p.t)
}
#' Estimate breeding values - Adapted function from QTLpoly
#'
#' @param qtl_info data.frame with: LG - linkage group ID; Pos - position in linkage map (cM);
#' Pheno - phenotype ID; Pos_lower - lower position of confidence interval;
#' Pos_upper - upper position of the confidence interval; Pval - QTL p-value; h2 - herdability
#' @param probs data.frame with first column (named `ind`) as individuals ID and next columns
#' named with markers ID and containing the genotype probability at each marker
#' @param selected_mks data.frame with: LG - linkage group ID; mk - marker ID; pos - position in linkage map (cM)
#' @param blups data.frame with: haplo - haplotype ID; pheno - phenotype ID; qtl - QTL ID; u.hat - QTL estimated BLUPs
#' @param beta.hat data.frame with: pheno - phenotype ID; beta.hat - estimated beta
#' @param pos selected QTL position (cM)
#'
#' @return data.frame containing breeding values
#'
#' @import dplyr
#'
#'
#' @keywords internal
breeding_values <- function(qtl_info, probs, selected_mks, blups, beta.hat, pos) {
pheno.names <- unique(as.character(qtl_info$pheno))
results <- vector("list", length(pheno.names))
names(results) <- pheno.names
# possible to index individuals
phenos <- which(pheno.names %in% names(pos))
for(p in phenos) { # select pheno
nqtl <- length(pos[[pheno.names[p]]])
infos <- filter(qtl_info, .data$pheno == pheno.names[p])
infos <- infos[which(infos$Pos %in% pos[[pheno.names[p]]]),]
markers <- which((round(selected_mks$pos,2) %in% infos$Pos) & (selected_mks$LG %in% infos$LG))
Z <- probs[,markers,] # select by pos
u.hat <- filter(blups, .data$pheno == pheno.names[p])
u.hat <- split(u.hat$u.hat, u.hat$qtl)
beta.hat.sub <- filter(beta.hat, .data$pheno == pheno.names[p])
beta.hat.v <- beta.hat.sub$beta.hat
Zu <- vector("list", nqtl)
if(nqtl > 1) {
for(m in 1:nqtl) {
Zu[[m]] <- t(Z[,m,]) %*% u.hat[[m]]
}
nind <- dim(Z)[3]
y.hat <- matrix(rep(beta.hat.v, nind), byrow = FALSE) + Reduce("+", Zu)
} else if(nqtl == 1) {
Zu <- t(Z) %*% u.hat[[1]]
nind <- dim(Z)[2]
y.hat <- matrix(rep(beta.hat.v, nind), byrow = FALSE) + Zu
}
colnames(y.hat) <- pheno.names[p]
results[[p]] <- round(y.hat,2)
}
id.names <- rownames(results[[which(sapply(results, function(x) !is.null(x)))[1]]])
results <- as.data.frame(do.call(cbind, results))
results <- cbind(gen=id.names, results)
return(results)
}
#' Calculates homologues probabilities - Adapted from MAPpoly
#'
#' @param probs data.frame with first column (named `ind`) as individuals ID and next columns named with markers ID and containing the genotype probability at each marker
#' @param selected_mks data.frame with: LG - linkage group ID; mk - marker ID; pos - position in linkage map (cM)
#' @param selected_lgs vector containing selected LGs ID
#'
#' @return object of class \code{mappoly.homoprob}
#'
#'
#' @importFrom reshape2 melt
#' @importFrom dplyr filter
#'
#' @keywords internal
calc_homologprob <- function(probs, selected_mks, selected_lgs){
input.genoprobs <- probs
each.split <- vector()
sizes <- table(selected_mks$LG)
probs.b <- probs
input.genoprobs <- list()
for(i in 1:length(sizes)){
input.genoprobs[[i]] <- probs.b[,1:sizes[i],]
probs.b <- probs.b[,-c(1:sizes[i]),]
}
lgs <- as.numeric(unique(selected_lgs))
input.genoprobs <- input.genoprobs[sort(lgs)]
selected_mks_lg <- filter(selected_mks, .data$LG %in% lgs)
pos <- split(selected_mks_lg$pos, selected_mks_lg$LG)
df.res <- NULL
for(j in 1:length(input.genoprobs)){
stt.names <- dimnames(input.genoprobs[[j]])[[1]] ## state names
mrk.names <- dimnames(input.genoprobs[[j]])[[2]] ## mrk names
ind.names <- dimnames(input.genoprobs[[j]])[[3]] ## individual names
v <- c(2,4,6,8,10,12)
names(v) <- choose(v,v/2)^2
ploidy <- v[as.character(length(stt.names))]
hom.prob <- array(NA, dim = c(ploidy*2, length(mrk.names), length(ind.names)))
dimnames(hom.prob) <- list(letters[1:(2*ploidy)], mrk.names, ind.names)
for(i in letters[1:(2*ploidy)])
hom.prob[i,,] <- apply(input.genoprobs[[j]][grep(stt.names, pattern = i),,], c(2,3), function(x) round(sum(x, na.rm = TRUE),4))
df.hom <- melt(hom.prob)
map <- data.frame(map.position = pos[[j]], marker = mrk.names)
colnames(df.hom) <- c("homolog", "marker", "individual", "probability")
df.hom <- merge(df.hom, map, sort = FALSE)
df.hom$LG <- names(pos)[j]
df.res <- rbind(df.res, df.hom)
}
if(ploidy == 4){
df.res$homolog <- gsub("a", paste0("P1.1_"), df.res$homolog)
df.res$homolog <- gsub("b", paste0("P1.2_"), df.res$homolog)
df.res$homolog <- gsub("c", paste0("P1.3_"), df.res$homolog)
df.res$homolog <- gsub("d", paste0("P1.4_"), df.res$homolog)
df.res$homolog <- gsub("e", paste0("P2.1_"), df.res$homolog)
df.res$homolog <- gsub("f", paste0("P2.2_"), df.res$homolog)
df.res$homolog <- gsub("g", paste0("P2.3_"), df.res$homolog)
df.res$homolog <- gsub("h", paste0("P2.4_"), df.res$homolog)
df.res$homolog = substring(df.res$homolog,1, nchar(df.res$homolog)-1)
} else if(ploidy == 6){
df.res$homolog <- gsub("a", paste0("P1.1_"), df.res$homolog)
df.res$homolog <- gsub("b", paste0("P1.2_"), df.res$homolog)
df.res$homolog <- gsub("c", paste0("P1.3_"), df.res$homolog)
df.res$homolog <- gsub("d", paste0("P1.4_"), df.res$homolog)
df.res$homolog <- gsub("e", paste0("P1.5_"), df.res$homolog)
df.res$homolog <- gsub("f", paste0("P1.6_"), df.res$homolog)
df.res$homolog <- gsub("g", paste0("P2.1_"), df.res$homolog)
df.res$homolog <- gsub("h", paste0("P2.2_"), df.res$homolog)
df.res$homolog <- gsub("i", paste0("P2.3_"), df.res$homolog)
df.res$homolog <- gsub("j", paste0("P2.4_"), df.res$homolog)
df.res$homolog <- gsub("k", paste0("P2.5_"), df.res$homolog)
df.res$homolog <- gsub("l", paste0("P2.6_"), df.res$homolog)
df.res$homolog = substring(df.res$homolog,1, nchar(df.res$homolog)-1)
}
structure(list(info = list(ploidy = ploidy,
n.ind = length(ind.names)) ,
homoprob = df.res), class = "mappoly.homoprob")
}
#' Plots mappoly.homoprob from MAPpoly
#'
#' @param x an object of class \code{mappoly.homoprob}
#'
#' @param stack logical. If \code{TRUE}, probability profiles of all homologues
#' are stacked in the plot (default = FALSE)
#'
#' @param lg indicates which linkage group should be plotted. If \code{NULL}
#' (default), it plots the first linkage group. If
#' \code{"all"}, it plots all linkage groups
#'
#' @param ind indicates which individuals should be plotted. It can be the
#' position of the individuals in the dataset or it's name.
#' If \code{NULL} (default), the function plots the first
#' individual
#'
#' @param verbose if \code{TRUE} (default), the current progress is shown; if
#' \code{FALSE}, no output is produced
#'
#' @param ... unused arguments
#'
#'
#' @keywords internal
plot.mappoly.homoprob <- function(x, stack = FALSE, lg = NULL,
ind = NULL,
verbose = TRUE, ...){
qtl <- NULL
all.ind <- as.character(unique(x$homoprob$individual))
#### Individual handling ####
if(length(ind) > 1){
if (verbose) message("More than one individual provided: using the first one")
ind <- ind[1]
}
if(is.null(ind)){
ind <- as.character(all.ind[1])
df.pr1 <- subset(x$homoprob, individual == ind)
} else if(is.numeric(ind)) {
if(ind > length(all.ind))
stop("Please chose an individual number between 1 and ", length(all.ind))
ind <- as.character(all.ind[ind])
df.pr1 <- subset(x$homoprob, individual == ind)
} else if (is.character(ind)){
if(!ind%in%all.ind)
stop(safeError("Invalid individual name"))
} else stop(safeError("Invalid individual name"))
#### LG handling ####
if(is.null(lg))
lg <- 1
if(all(lg == "all"))
lg <- unique(x$homoprob$LG)
LG <- individual <- map.position <- probability <- homolog <- NULL
if(length(lg) > 1 & !stack)
{
if (verbose) message("Using 'stack = TRUE' to plot multiple linkage groups")
stack <- TRUE
}
if(stack){
##subset linkage group
if(!is.null(lg)){
df.pr1 <- subset(x$homoprob, LG%in%lg)
df.pr1 <- subset(df.pr1, individual == ind)
} else
df.pr1 <- subset(x$homoprob, individual == ind)
p <- ggplot(df.pr1, aes(x = map.position, y = probability, fill = homolog, color = homolog)) +
geom_density(stat = "identity", alpha = 0.7, position = "stack") +
ggtitle(ind) +
facet_grid(rows = vars(LG)) +
ylab(label = "Homologs probabilty") +
xlab(label = "Map position") +
geom_vline(data = df.pr1, aes(xintercept = qtl), linetype="dashed", na.rm = TRUE) +
theme_minimal()
} else {
##subset linkage group
if(is.null(lg)){
lg <- 1
df.pr1 <- subset(x$homoprob, LG %in% lg)
} else df.pr1 <- subset(x$homoprob, LG %in% lg)
df.pr1 <- subset(df.pr1, individual == ind)
p <- ggplot(df.pr1, aes(x = map.position, y = probability, fill = homolog, color = homolog)) +
geom_density(stat = "identity", alpha = 0.7) +
ggtitle(paste(ind, " LG", lg)) +
facet_grid(rows = vars(homolog)) +
theme_minimal() +
ylab(label = "Homologs probabilty") +
xlab(label = "Map position") +
geom_vline(data = df.pr1, aes(xintercept = qtl), linetype="dashed", na.rm = TRUE)
}
return(p)
}
#' Plot selected haplotypes
#'
#' @param input.haplo character vector with selected haplotypes. It contains the information: "Trait:<trait ID>_LG:<linkage group ID_Pos:<QTL position>"
#' @param exclude.haplo character vector with haplotypes to be excluded. It contains the information: "Trait:<trait ID>_LG:<linkage group ID_Pos:<QTL position>"
#' @param probs data.frame with first column (named `ind`) as individuals ID and next columns named with markers ID and containing the genotype probability at each marker
#' @param selected_mks data.frame with: LG - linkage group ID; mk - marker ID; pos - position in linkage map (cM)
#' @param effects.data output object from \code{data_effects} function
#'
#' @return ggplot graphic
#'
#' @import dplyr tidyr
#'
#' @keywords internal
select_haplo <- function(input.haplo,probs, selected_mks, effects.data, exclude.haplo = NULL){
LG <- map.position <- individual <- probability <- NULL
include <- strsplit(unlist(input.haplo), "_")
if(!is.null(exclude.haplo)) exclude <- strsplit(unlist(exclude.haplo), "_") else exclude <- NULL
lgs <- c(sapply(include, "[[", 2), sapply(exclude, "[[", 2))
lgs <- gsub("LG:", "", unique(lgs))
homo.dat <- calc_homologprob(probs = probs, selected_mks = selected_mks, selected_lgs = lgs)
data_match <- paste0("LG:",homo.dat$homoprob$LG, "_Pos:",
round(homo.dat$homoprob$map.position,0),
"_homolog:", homo.dat$homoprob$homolog)
# Include haplo
include <- sapply(include, function(x) paste0(x[-1], collapse = "_"))
subset <- homo.dat$homoprob[which(data_match %in% include),]
subset <- subset[which(subset$probability > 0.5),]
counts <- subset %>% group_by(individual) %>% summarise(n = n())
selected <- counts$individual[counts$n == length(input.haplo)]
if(length(selected) == 0) stop("None of the inviduals have these combination of haplotypes")
# Exclude haplo
if(!is.null(exclude.haplo)){
exclude <- sapply(exclude, function(x) paste0(x[-1], collapse = "_"))
subset <- homo.dat$homoprob[which(data_match %in% exclude),]
subset <- subset[which(subset$probability > 0.5),]
selected <- selected[-which(selected %in% unique(subset$individual))]
}
if(length(selected) == 0) stop("None of the inviduals have these combination of haplotypes")
dashline <- strsplit(c(unlist(input.haplo), unlist(exclude.haplo)), "_")
dashline <- sapply(dashline, function(x) paste0(x[-c(1,4)], collapse = "_"))
data_match <- sapply(strsplit(data_match, "_"), function(x) paste0(x[-length(x)], collapse = "_"))
homo.dat$homoprob$qtl <- NA
homo.dat$homoprob$qtl[which(data_match %in% dashline)] <- homo.dat$homoprob$map.position[which(data_match %in% dashline)]
p <- list()
for(i in 1:length(selected)){
p[[i]] <- plot.mappoly.homoprob(x = homo.dat,
lg = unique(as.numeric(lgs)),
ind = as.character(selected)[i],
use.plotly = FALSE)
}
return(list(p, inds = as.character(selected)))
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/functions_qtl.R
|
#' Upload example files
#'
#' @param example character indicating the example dataset selected
#'
#' @return object of class \code{viewpoly}
#'
#'
#' @importFrom utils download.file
#'
#' @keywords internal
prepare_examples <- function(example){
viewmap_tetra <- viewqtl_tetra <- NULL
if(example == "tetra_map"){
viewmap_tetra <- readRDS(system.file("ext/viewmap_tetra.rds", package = "viewpoly"))
viewqtl_tetra <- readRDS(system.file("ext/viewqtl_tetra.rds", package = "viewpoly"))
structure(list(map=viewmap_tetra,
qtl=viewqtl_tetra,
fasta = "https://gesteira.statgen.ncsu.edu/files/genome-browser/Stuberosum_448_v4.03.fa.gz",
gff3 = "https://gesteira.statgen.ncsu.edu/files/genome-browser/Stuberosum_448_v4.03.gene_exons.gff3.gz",
vcf = NULL,
align = NULL,
wig = NULL,
version = packageVersion("viewpoly")),
class = "viewpoly")
}
}
#' Upload hidecan example files
#'
#' @param example character indicating the example dataset selected
#'
#' @return object of class \code{viewpoly}
#'
#' @import hidecan
#'
#' @keywords internal
prepare_hidecan_examples <- function(example){
gwas <- read.csv(system.file("ext/gwas.csv", package = "viewpoly"))
de <- read.csv(system.file("ext/de.csv", package = "viewpoly"))
can <- read.csv(system.file("ext/can.csv", package = "viewpoly"))
structure(list(GWASpoly = NULL,
GWAS= gwas,
DE= de,
CAN = can))
}
#' Converts map information in custom format files to viewmap object
#'
#'
#' @param dosages TSV or TSV.GZ file with both parents dosage information.
#' It should contain four columns: 1) character vector with chromosomes ID;
#' 2) Character vector with markers ID; 3) Character vector with parent ID;
#' 4) numerical vector with dosage.
#' @param phases TSV or TSV.GZ file with phases information. It should contain:
#' 1) Character vector with chromosome ID; 2) Character vector with marker ID;
#' 3 to (ploidy number)*2 columns with each parents haplotypes.
#' @param genetic_map TSV or TSV.GZ file with the genetic map information
#' @param mks_pos TSV or TSV.GZ file with table with three columns: 1) marker ID;
#' 2) genome position; 3) chromosome
#'
#' @return object of class \code{viewmap}
#'
#' @import dplyr
#' @import vroom
#'
#' @keywords internal
prepare_map_custom_files <- function(dosages, phases, genetic_map, mks_pos=NULL){
parent <- chr <- marker <- NULL
ds <- vroom(dosages$datapath, progress = FALSE, col_types = cols())
ph <- vroom(phases$datapath, progress = FALSE, col_types = cols())
map <- vroom(genetic_map$datapath, progress = FALSE, col_types = cols())
if(!is.null(mks_pos)) mks_pos <- vroom(mks_pos$datapath, progress = FALSE, col_types = cols())
parent1 <- unique(ds$parent)[1]
parent2 <- unique(ds$parent)[2]
d.p1 <- ds %>% filter(parent == parent1) %>% select(chr, marker, dosages)
d.p1.names <- split(d.p1$marker, d.p1$chr)
d.p1 <- split(d.p1$dosages, d.p1$chr)
d.p1 <- Map(function(x,y) {
names(x) <- y
return(x)
}, d.p1, d.p1.names)
d.p2 <- ds %>% filter(parent == parent2) %>% select(chr, marker, dosages)
d.p2.names <- split(d.p2$marker, d.p2$chr)
d.p2 <- split(d.p2$dosages, d.p2$chr)
d.p2 <- Map(function(x,y) {
names(x) <- y
return(x)
}, d.p2, d.p2.names)
if(!is.null(mks_pos)) pos <- mks_pos[,2][match(map$marker,mks_pos[,1])] else pos <- NA
maps <- data.frame(mk.names = map$marker,
l.dist = map$dist,
g.chr = map$chr,
g.dist = pos,
alt = NA,
ref= NA)
maps <- split.data.frame(maps, maps$g.chr)
ploidy <- (dim(ph)[2] - 2)/2
ph.p1 <- as.data.frame(select(ph, 3:(ploidy +2)))
rownames(ph.p1) <- ph$marker
ph.p1 <- split(ph.p1, ph$chr)
ph.p1 <- lapply(ph.p1, as.matrix)
ph.p2 <- as.data.frame(select(ph, (ploidy +3):dim(ph)[2]))
rownames(ph.p2) <- ph$marker
ph.p2 <- split(ph.p2, ph$chr)
ph.p2 <- lapply(ph.p2, as.matrix)
structure(list(d.p1 = d.p1,
d.p2 = d.p2,
ph.p1 = ph.p1,
ph.p2 = ph.p2,
maps = maps,
software = "custom"),
class = "viewmap")
}
#' Converts list of mappoly.map object into viewmap object
#'
#' @param mappoly_list list with objects of class \code{mappoly.map}
#'
#' @return object of class \code{viewmap}
#'
#'
#' @keywords internal
prepare_MAPpoly <- function(mappoly_list){
is <- NULL
if(!is(mappoly_list[[1]], "mappoly.map")){
temp <- load(mappoly_list$datapath)
mappoly_list <- get(temp)
}
prep <- lapply(mappoly_list, prepare_map)
structure(list(d.p1 = lapply(prep, "[[", 5),
d.p2 = lapply(prep, "[[", 6),
ph.p1 = lapply(prep, "[[", 3),
ph.p2 = lapply(prep, "[[", 4),
maps = lapply(prep, "[[", 2),
software = "MAPpoly"),
class = "viewmap")
}
#' Converts polymapR ouputs to viewmap object
#'
#' @param polymapR.dataset a \code{polymapR} dataset
#' @param polymapR.map output map sequence from polymapR
#' @param input.type indicates whether the input is discrete ("disc") or probabilistic ("prob")
#' @param ploidy ploidy level
#'
#' @return object of class \code{viewmap}
#'
#'
#' @keywords internal
prepare_polymapR <- function(polymapR.dataset, polymapR.map, input.type, ploidy){
temp <- load(polymapR.dataset$datapath)
polymapR.dataset <- get(temp)
temp <- load(polymapR.map$datapath)
polymapR.map <- get(temp)
data <- import_data_from_polymapR(input.data = polymapR.dataset,
ploidy = ploidy,
parent1 = "P1",
parent2 = "P2",
input.type = ,
prob.thres = 0.95,
pardose = NULL,
offspring = NULL,
filter.non.conforming = TRUE,
verbose = FALSE)
map_seq <- import_phased_maplist_from_polymapR(maplist = polymapR.map,
mappoly.data = data)
viewmap <- prepare_MAPpoly(mappoly_list = map_seq)
viewmap$software <- "polymapR"
structure(viewmap, class = "viewmap")
}
#' Converts QTLpoly outputs to viewqtl object
#'
#'
#' @param data object of class "qtlpoly.data"
#' @param remim.mod object of class "qtlpoly.model" "qtlpoly.remim".
#' @param est.effects object of class "qtlpoly.effects"
#' @param fitted.mod object of class "qtlpoly.fitted"
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#'
#' @return object of class \code{viewqtl}
#'
#' @importFrom tidyr pivot_longer
#' @import dplyr
#'
#' @keywords internal
prepare_QTLpoly <- function(data, remim.mod, est.effects, fitted.mod){
is <- NULL
temp <- load(data$datapath)
data <- get(temp)
temp <- load(remim.mod$datapath)
remim.mod <- get(temp)
temp <- load(est.effects$datapath)
est.effects <- get(temp)
temp <- load(fitted.mod$datapath)
fitted.mod <- get(temp)
# Only selected markers
lgs.t <- lapply(data$lgs, function(x) data.frame(mk = names(x), pos = x))
lgs <- data.frame()
for(i in 1:length(lgs.t)) {
lgs <- rbind(lgs, cbind(LG = i,lgs.t[[i]]))
}
rownames(lgs) <- NULL
qtl_info <- u.hat <- beta.hat <- pvalue <- profile <- effects <- data.frame()
for(i in 1:length(remim.mod$results)){
pheno = names(fitted.mod$results)[i]
if(!is.null(dim(fitted.mod$results[[i]]$qtls)[1])){
lower <- remim.mod$results[[i]]$lower[,1:2]
upper <- remim.mod$results[[i]]$upper[,1:2]
qtl <- remim.mod$results[[i]]$qtls[,c(1,2,6)]
int <- cbind(LG = lower$LG, Pos_lower = lower$Pos_lower,
Pos_upper = upper$Pos_upper, qtl[,2:3])
int <- cbind(pheno = names(remim.mod$results)[i], int)
if(dim(fitted.mod$results[[i]]$qtls)[1] > 1) {
h2 <- fitted.mod$results[[i]]$qtls[-dim(fitted.mod$results[[i]]$qtls)[1],c(1:2,7)]
h2 <- data.frame(apply(h2, 2, unlist))
}else {
h2 <- fitted.mod$results[[i]]$qtls[,c(1:2,7)]
}
int <- merge(int, h2, by = c("LG", "Pos"))
qtl_info <- rbind(qtl_info, int[order(int$LG, int$Pos),])
u.hat.t <- do.call(cbind, fitted.mod$results[[i]]$fitted$U)
colnames(u.hat.t) <- names(fitted.mod$results[[i]]$fitted$U)
u.hat.t <- cbind(haplo = fitted.mod$results[[i]]$fitted$alleles, pheno , as.data.frame(u.hat.t))
u.hat.t <- pivot_longer(u.hat.t, cols = c(1:length(u.hat.t))[-c(1:2)], values_to = "u.hat", names_to = "qtl")
u.hat <- rbind(u.hat, u.hat.t)
u.hat$qtl <- gsub("g", "", u.hat$qtl)
beta.hat.t <- data.frame(pheno, beta.hat = fitted.mod$results[[i]]$fitted$Beta[,1])
beta.hat <- rbind(beta.hat, beta.hat.t)
for(j in 1:length(est.effects$results[[i]]$effects)){
effects.t <- do.call(rbind, lapply(est.effects$results[[i]]$effects[[j]], function(x) data.frame(haplo = names(x), effect = x)))
effects.t <- cbind(pheno = pheno, qtl.id= j, effects.t)
effects <- rbind(effects, effects.t)
}
}
if(is(remim.mod, "qtlpoly.feim")) SIG <- remim.mod$results[[i]][[3]] else SIG <- -log10(as.numeric(remim.mod$results[[i]][[3]]))
profile.t <- data.frame(pheno, LOP = SIG)
profile <- rbind(profile, profile.t)
}
# Rearrange the progeny probabilities into a list
probs <- data$Z
structure(list(selected_mks = lgs,
qtl_info = qtl_info,
blups = as.data.frame(u.hat),
beta.hat = beta.hat,
profile = profile,
effects = effects,
probs = probs,
software = "QTLpoly"),
class = "viewqtl")
}
#' Converts diaQTL output to viewqtl object
#'
#' @param scan1_list list with results from diaQTL \code{scan1} function
#' @param scan1_summaries_list list with results from diaQTL \code{scan1_summaries} function
#' @param fitQTL_list list with results from diaQTL \code{fitQTL} function
#' @param BayesCI_list list with results from diaQTL \code{BayesCI} function
#'
#' @return object of class \code{viewqtl}
#'
#' @importFrom dplyr filter
#'
#'
#' @keywords internal
prepare_diaQTL <- function(scan1_list, scan1_summaries_list, fitQTL_list, BayesCI_list){
marker <- pheno <- NULL
temp <- load(scan1_list$datapath)
scan1_list <- get(temp)
temp <- load(scan1_summaries_list$datapath)
scan1_summaries_list <- get(temp)
temp <- load(fitQTL_list$datapath)
fitQTL_list <- get(temp)
temp <- load(BayesCI_list$datapath)
BayesCI_list <- get(temp)
selected_mks <- scan1_list[[1]][,c(2,1,3)]
colnames(selected_mks) <- c("LG", "mk", "pos")
qtl_info <- data.frame()
for(i in 1:length(scan1_summaries_list)){
temp <- cbind(pheno = names(scan1_summaries_list)[i],scan1_summaries_list[[i]]$peaks)
qtl_info <- rbind(qtl_info, temp)
}
qtls.id <- list()
qtl_info2 <- data.frame()
if(is.null(fitQTL_list[[1]]$plots)) fitQTL_list <- unlist(fitQTL_list, recursive = F)
profile <- effects <- data.frame()
for(i in 1:length(fitQTL_list)){
qtls.id <- colnames(fitQTL_list[[i]]$effects$additive)
trait <- gsub("Trait: ","",fitQTL_list[[i]]$plots[[1]]$additive$labels$title)
qtl_temp <- filter(qtl_info, pheno == trait & marker %in% qtls.id)
qtl_info2 <- rbind(qtl_info2, qtl_temp)
# profile
profile_temp <- data.frame(pheno = trait, deltaDIC = scan1_list[[which(names(scan1_list) == trait)]]$deltaDIC)
profile <- rbind(profile, profile_temp)
# Sometimes there is a graphic about epistasis that is not described anywhere yet. We ignored it here by now.
if(any(grepl("epistasis", names(fitQTL_list[[i]]$plots)))) fitQTL_list[[i]]$plots <- fitQTL_list[[i]]$plots[-grep("epistasis", names(fitQTL_list[[i]]$plots))]
for(j in 1:length(fitQTL_list[[i]]$plots)){
# aditive effect
temp <- fitQTL_list[[i]]$plots[[j]]$additive$data
effects.ad.t <- data.frame(pheno = trait,
haplo = rownames(temp),
qtl.id = j,
effect= temp$mean,
type = "Additive",
CI.lower = temp$CI.lower,
CI.upper = temp$CI.upper)
# digenic effect
temp <- data.frame(haplo = rownames(fitQTL_list[[i]]$effects$digenic), z = fitQTL_list[[i]]$effects$digenic[,j])
if(!is.null(temp)){
effects.di.t <- data.frame(pheno = trait,
haplo = gsub("[+]", "x", temp$haplo),
qtl.id = j,
effect = as.numeric(temp$z),
type = "Digenic",
CI.lower = NA,
CI.upper = NA)
effects.t <- rbind(effects.ad.t, effects.di.t)
} else effects.t <- effects.ad.t
effects.t <- effects.t[order(effects.t$pheno, effects.t$qtl.id, effects.t$type,effects.t$haplo),]
effects <- rbind(effects, effects.t)
}
}
# Ordering Bayes info according to qtl info
BayesCI_list_ord <- list()
for(i in 1:dim(qtl_info2)[1]){
for(j in 1:length(BayesCI_list)){
if(any(paste0(BayesCI_list[[j]]$pheno, BayesCI_list[[j]]$marker, BayesCI_list[[j]]$chrom) %in%
paste0(qtl_info2$pheno[i], qtl_info2$marker[i], qtl_info2$chrom[i]))){
BayesCI_list_ord[[i]] <- BayesCI_list[[j]]
}
}
}
if(length(BayesCI_list_ord) != dim(qtl_info2)[1]) BayesCI_list_ord[[length(BayesCI_list_ord) + 1]] <- NULL
idx <- which(sapply(BayesCI_list_ord, is.null))
if(length(idx) != 0 | length(BayesCI_list_ord) != dim(qtl_info2)[1]){
warning(paste0("Bayes confidence interval information (from diaQTL function BayesCI) was not provided for the QTL in chromosome:", qtl_info2[idx, 3],
"; phenotype: ", qtl_info2[idx, 1]))
}
CI <- lapply(BayesCI_list_ord, function(x) {
y = c(Pos_lower = x$cM[1], Pos_upper = x$cM[length(x$cM)])
return(y)
})
idx <- which(sapply(CI, is.null))
if(length(idx) != 0 | length(BayesCI_list_ord) != dim(qtl_info2)[1]){
if(length(idx) != 0)
CI[[idx]] <- c(NA, NA)
else CI[[length(CI) + 1]] <- c(NA, NA)
}
CI <- do.call(rbind, CI)
qtl_info <- qtl_info2[,c(3,4,1,6)]
qtl_info <- cbind(qtl_info, CI)
qtl_info <- qtl_info[,c(1:3,5,6,4)]
colnames(qtl_info)[1:2] <- c("LG", "Pos")
structure(list(selected_mks = selected_mks,
qtl_info = qtl_info,
profile = profile,
effects = effects,
software = "diaQTL"),
class = "viewqtl")
}
#' Converts polyqtlR outputs to viewqtl object
#'
#' @param polyqtlR_QTLscan_list list containing results from polyqtlR \code{QTLscan_list} function
#' @param polyqtlR_qtl_info data.frame containing the QTL information:LG - group ID; Pos - QTL position (cM);
#' pheno - phenotype ID; Pos_lower - lower position of confidence interval; Pos_upper - upper position of the confidence interval;
#' thresh - LOD threshold applied
#' @param polyqtlR_effects data.frame with results from \code{visualiseQTLeffects} polyqtlR function
#'
#' @return object of class \code{viewqtl}
#'
#'
#' @keywords internal
prepare_polyqtlR <- function(polyqtlR_QTLscan_list, polyqtlR_qtl_info, polyqtlR_effects){
temp <- load(polyqtlR_QTLscan_list$datapath)
polyqtlR_QTLscan_list <- get(temp)
temp <- load(polyqtlR_qtl_info$datapath)
polyqtlR_qtl_info <- get(temp)
temp <- load(polyqtlR_effects$datapath)
polyqtlR_effects <- get(temp)
# selected markers
selected_mks <- polyqtlR_QTLscan_list[[1]]$Map
colnames(selected_mks) <- c("LG", "mk", "pos")
profile <- qtl_info <- effects <- data.frame()
for(i in 1:length(polyqtlR_QTLscan_list)){
pheno <- names(polyqtlR_QTLscan_list)[i]
# profile
profile_temp <- data.frame(pheno = pheno,
LOD = polyqtlR_QTLscan_list[[i]]$QTL.res$LOD)
profile <- rbind(profile, profile_temp)
}
structure(list(selected_mks = selected_mks,
qtl_info = polyqtlR_qtl_info,
profile = profile,
effects = polyqtlR_effects,
software = "polyqtlR"),
class = "viewqtl")
}
#' Converts QTL information in custom files to viewqtl object
#'
#' @param selected_mks data.frame with: LG - linkage group ID; mk - marker ID; pos - position in linkage map (cM)
#' @param qtl_info data.frame with: LG - linkage group ID; Pos - position in linkage map (cM);
#' Pheno - phenotype ID; Pos_lower - lower position of confidence interval;
#' Pos_upper - upper position of the confidence interval; Pval - QTL p-value; h2 - herdability
#' @param blups data.frame with: haplo - haplotype ID; pheno - phenotype ID; qtl - QTL ID; u.hat - QTL estimated BLUPs
#' @param beta.hat data.frame with: pheno - phenotype ID; beta.hat - estimated beta
#' @param profile data.frame with: pheno - phenotype ID; LOP - significance value for the QTL, in this case LOP (can be LOD or DIC depending of the software used)
#' @param effects data.frame with: pheno - phenotype ID; qtl.id - QTL ID; haplo - haplotype ID; effect - haplotype effect value
#' @param probs data.frame with first column (named `ind`) as individuals ID and next columns named with markers ID and containing the genotype probability at each marker
#'
#'
#' @return object of class \code{viewqtl}
#'
#' @import vroom
#' @import abind
#'
#' @keywords internal
prepare_qtl_custom_files <- function(selected_mks, qtl_info, blups, beta.hat,
profile, effects, probs){
qtls <- list()
qtls$selected_mks <- as.data.frame(vroom(selected_mks$datapath, progress = FALSE, col_types = cols()))
qtls$qtl_info <- as.data.frame(vroom(qtl_info$datapath, progress = FALSE, col_types = cols()))
qtls$blups <- as.data.frame(vroom(blups$datapath, progress = FALSE, col_types = cols()))
qtls$beta.hat <- as.data.frame(vroom(beta.hat$datapath, progress = FALSE, col_types = cols()))
qtls$profile <- as.data.frame(vroom(profile$datapath, progress = FALSE, col_types = cols()))
qtls$profile[,2] <- as.numeric(qtls$profile[,2])
qtls$effects <- as.data.frame(vroom(effects$datapath, progress = FALSE, col_types = cols()))
probs.t <- vroom(probs$datapath, progress = FALSE, col_types = cols())
ind <- probs.t$ind
probs.t <- as.data.frame(probs.t[,-1])
probs.df <- split(probs.t, ind)
qtls$probs <- abind(probs.df, along = 3)
qtls$software <- "custom"
structure(qtls, class = "viewqtl")
}
#' Check hidecan inputs
#'
#' @param input_list shiny input result containing file path
#' @param func hidecan read input function
#'
#' @importFrom stats setNames
#' @importFrom utils read.csv
#' @import hidecan
#' @import purrr
#'
read_input_hidecan <- function(input_list, func){
## Turning the hidecan constructors into safe functions
## i.e. instead of throwing an error they return the error
## message -> useful to escalate the error message in the app
safe_func <- safely(func)
## Read all files uploaded
res <- lapply(input_list$datapath,
read.csv)
## Add file name
names(res) <- input_list$name
## Apply the hidecan constructor to each file: this will
## check whether the input files have the correct columns etc
res <- lapply(res, safe_func) |>
## rather than the resulting list being: level 1 = file, level 2 = result and error
## we get the result and error as level 1, and files as level 2
transpose()
## Checking whether any file returned an error
no_error <- res$error |>
purrr::map_lgl(is.null) |>
all()
if(!no_error){
## Extract error message
error_msg <- res$error |>
setNames(input_list$name) |>
purrr::map(purrr::pluck, "message") |>
purrr::imap(~ paste0("Input file ", .y, ": ", .x)) |>
purrr::reduce(paste0, collapse = "\n")
## Remove NULL elements from the list or results
## If all are NULL, will return an empty list()
res$result <- purrr::discard(res$result,
is.null)
showNotification(error_msg, type = "error", duration = 20)
validate(need(no_error, error_msg))
}
return(res$result)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/functions_upload.R
|
#' genes_view UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @importFrom shinyjs inlineCSS useShinyjs
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_genes_view_ui <- function(id){
ns <- NS(id)
tagList(
fluidPage(
verticalLayout(
fluidRow(
inlineCSS(".form-group {margin-bottom: 0;}
.irs-with-grid {bottom: 0px;}
.irs-grid {height: 13px;}
.irs-grid-text {height: 0px;}
"
),
column(width = 12,
div(style = "position:absolute;right:1em;",
div(
actionButton(ns("goQTL"), "Go to QTL",icon("arrow-circle-left", verify_fa = FALSE), class = "btn btn-primary"),
actionButton(ns("goMap"), label = div("Go to Map", icon("arrow-circle-right", verify_fa = FALSE)), class = "btn btn-primary"))
)
),
tags$h2(tags$b("VIEWgenome")), br(), hr(),
column(6,
column(12,
box(
background = "light-blue",
"Required inputs (*)", br(),
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select phenotypes *",
pickerInput(ns("phenotypes"),
label = h4("Phenotypes:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE),
), br(),
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select linkage group *",
selectInput(inputId = ns("group"), label = p("Linkage group:"), choices = 1:15, selected = 1),
), br(),
)
),
), hr(),
wellPanel(
sliderInput(ns("range"), "Map range (cM)", min = 0, max = 300,
value = c(0, 20), step = 1),
uiOutput(ns("interval"))
),
box(id = ns("box_profile"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("profileID"), label = "QTL profile"),
column(12,
box(
width = 5, background = "light-blue",
"* QTL analysis files or viewpoly object or example dataset (check `Input data` tab)"
)
),
column(3,
useShinyjs(),
tags$head(tags$style(".butt{background-color:#add8e6; border-color: #add8e6; color: #337ab7;}")),
downloadButton(ns('bn_download'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_profile"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_profile"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_profile"), "DPI", value = 300)
), br(),
column(12,
hr(),
plotlyOutput(ns("plot_qtl"))
)
), br(),
box(id = ns("box_phi"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("phiID"), label = "Linkage Map position (cM) x Physical position (Mb)"),
column(12,
box(
width = 5, background = "light-blue",
"* MAPpoly linkage map files or viewpoly object or example dataset (check `Input data` tab)",
)
),
column(3,
downloadButton(ns('bn_download_phi'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat_phi"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_phi"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_phi"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_phi"), "DPI", value = 300)
), br(),
column(12,
hr(),
plotlyOutput(ns("plot_pos"))
)
), br(),
box(id = ns("box_jbrowser"), width = 12, height = 1000, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("jbrowserID"), label = "JBrowseR"),
column(12,
box(
width = 5, background = "light-blue",
"* Reference genome FASTA (check `Input data` tab)"
)
),
column(12,
column(6,
numericInput(ns("port"), label = "Choose a port", value = 5000), br(),
actionButton(ns("create_server"), "Open JBrowseR",icon("power-off", verify_fa = FALSE))
),
column(6,
div(style = "position:absolute;right:1em;",
p("Local server:"),
switchInput(ns("reset_server"), value = TRUE)
)
)
),
column(12, br(), hr(),
JBrowseROutput(ns("browserOutput"))
), br()),
box(id = ns("box_anno"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("annoID"), label = "Annotation table"),
column(12,
box(
width = 5, background = "light-blue",
"* Reference genome FASTA (check `Input data` tab)", br(),
"* Genome annotation GFF (check `Input data` tab)"
)
),
column(12,
DT::dataTableOutput(ns("genes_ano"))
)
)
)
)
)
}
#' genes_view Server Functions
#'
#' @importFrom JBrowseR serve_data renderJBrowseR assembly track_feature tracks default_session JBrowseR JBrowseROutput
#' @importFrom plotly event_data layout
#' @importFrom shinyjs inlineCSS js
#' @importFrom dplyr `%>%`
#'
#' @noRd
mod_genes_view_server <- function(input, output, session,
loadMap, loadQTL,
loadJBrowse_fasta, loadJBrowse_gff3, loadJBrowse_vcf, loadJBrowse_align, loadJBrowse_wig,
parent_session){
ns <- session$ns
pheno <- LG <- l.dist <- g.dist <- high <- mk.names <- track_variant <- track_alignments <- track_wiggle <- NULL
start <- end <- seqid <- NULL
#Collapse boxes
observeEvent(input$profileID, {
js$collapse(ns("box_profile"))
})
observeEvent(input$phiID, {
js$collapse(ns("box_phi"))
})
observeEvent(input$jbrowserID, {
js$collapse(ns("box_jbrowser"))
})
observeEvent(input$annoID, {
js$collapse(ns("box_anno"))
})
observe({
# Dynamic linkage group number
if(!is.null(loadMap())){
group_choices <- as.list(1:length(loadMap()$d.p1))
names(group_choices) <- 1:length(loadMap()$d.p1)
} else if(!is.null(loadQTL())){
group_choices <- as.list(1:length(unique(loadQTL()$selected_mks$LG)))
names(group_choices) <- 1:length(unique(loadQTL()$selected_mks$LG))
} else {
group_choices <- as.list("Upload map or QTL data in `upload` session.")
names(group_choices) <- "Upload map or QTL data in `upload` session."
}
updateSelectInput(session, "group",
label="Linkage group",
choices = group_choices,
selected= group_choices[[1]])
# Dynamic QTL
if(!is.null(loadQTL())){
pheno_choices <- as.list(unique(loadQTL()$profile$pheno))
names(pheno_choices) <- unique(loadQTL()$profile$pheno)
updatePickerInput(session, "phenotypes",
label = "Select phenotypes",
choices = pheno_choices,
selected=unlist(pheno_choices)[1])
} else {
updatePickerInput(session, "phenotypes",
label = "Phenotype:",
choices = "Upload QTL information to update",
selected= "Upload QTL information to update")
}
})
observeEvent(input$goMap, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "map")
})
observeEvent(input$goQTL, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "qtl")
})
# Plot QTL bar
qtl.int <- reactive({
if(!is.null(loadQTL())){
data <- loadQTL()$qtl_info %>% filter(.data$pheno %in% input$phenotypes & .data$LG == input$group)
if(dim(data)[1] == 0) return(p(" "))
data <- data[order(data$Pos_lower, data$Pos_upper),]
command <- paste0(round(data$Pos_lower,0), ":", round(data$Pos_upper, 0))
seqs <- list()
for(i in 1:length(command))
seqs[[i]] <- eval(parse(text = command[i]))
maps <- lapply(loadMap()$maps, function(x) {
y <- x$l.dist
names(y) <- x$mk.names
y
})
max_updated <- map_summary(left.lim = input$range[1],
right.lim = input$range[2],
ch = input$group, maps = maps,
d.p1 = loadMap()$d.p1, d.p2 = loadMap()$d.p2)[[5]]
qtls_pos <- Reduce(union, seqs)
chr_all <- 0:max_updated
idx.comp <- chr_all %in% qtls_pos
int <- chr_all[sequence(rle(idx.comp)$length) == 1]
int <- (int*100)/max_updated
# add start and end
ints_all <- unique(c(0,int, 100))
# add qtls
qtls <- (unique(sort(data$Pos))*100)/max_updated
qtls <- sort(c(qtls -0.3, qtls +0.3))
labs <- c(rep("int", length(ints_all)), rep(c("red","#34495E "), length(qtls/2)))
labs <- labs[order(c(ints_all, qtls))]
labs[which(labs == "red")-1] <- "#34495E "
labs[which(labs == "int")] <- "#D5D8DC"
labs <- labs[-length(labs)]
ints_all <- diff(sort(c(ints_all, qtls)))
# Each interval add small blank space to the scale - need to remove
reduce <- cumsum(ints_all)[length(cumsum(ints_all))] - 99.7
ints_all[which(labs != "red")] <- ints_all[which(labs != "red")] - reduce
# Add gradient colors
OrRd <- c("#FFF7EC", "#FEE8C8", "#FDD49E", "#FDBB84", "#FC8D59", "#EF6548", "#D7301F", "#B30000", "#7F0000")
if(length(labs[which(labs == "red")]) < 3){
qtl.colors <- OrRd[1:7][-c(1:5)][1:length(labs[which(labs == "red")])]
} else {
qtl.colors <- OrRd[1:length(labs[which(labs == "red")])]
}
labs[which(labs == "red")][order(as.numeric(data$Pval), decreasing = T)] <- qtl.colors
divs <- paste0("display:inline-block; width: ", ints_all ,"% ; background-color: ", labs, ";")
if(!is.null(input$phenotypes)){
divs_lst <- list()
for(i in 1:length(divs)){
divs_lst[[i]] <- div(id= paste0("belowslider",i), style= divs[i], p())
}
}
p(divs_lst, "QTL")
} else {
return(p(" "))
}
})
output$interval <- renderUI({
qtl.int()
})
# Plot QTL profile
output$plot_qtl <- renderPlotly({
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature.")
)
idx <- which(unique(loadQTL()$profile$pheno) %in% input$phenotypes)
pl <- plot_profile(profile = loadQTL()$profile, qtl_info = loadQTL()$qtl_info, selected_mks = loadQTL()$selected_mks,
pheno.col = idx,
lgs.id = as.numeric(input$group),
range.min = input$range[1],
range.max = input$range[2],
by_range=T,
software = loadQTL()$software)
ggplotly(source = "qtl_profile", pl, tooltip=c("Trait","Position (cM)")) %>%
layout(legend = list(orientation = 'h', y = -0.3),
modebar = list(
remove = c("toImage",
"hovercompare",
"hoverCompareCartesian")),
clickmode ="none",
dragmode = FALSE)
})
# cM x Mb
output$plot_pos <- renderPlotly({
validate(
need(!(!is.null(loadMap()) & loadMap()$software == "polymapR"), "Feature not implemented for software polymapR."),
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
p <- plot_cm_mb(loadMap(), input$group, input$range[1], input$range[2])
max_updated = reactive({
dist <- loadMap()$maps[[as.numeric(input$group)]]$l.dist
max.range <- max(dist)
max.range
})
observeEvent(max_updated, {
updateSliderInput(inputId = "range", max = round(max_updated(),2))
})
ggplotly(p, tooltip="text") %>% layout(showlegend = FALSE,
modebar = list(
remove = c("toImage",
"hovercompare",
"hoverCompareCartesian")),
clickmode ="none",
dragmode = FALSE)
})
# Open JBrowser server
button <- eventReactive(input$create_server, {
if(!is.null(loadJBrowse_fasta())){
if(loadJBrowse_fasta() != "") {
path.fa <- loadJBrowse_fasta()
} else path.fa <- NULL
} else path.fa <- NULL
if(!is.null(loadJBrowse_gff3())){
if(loadJBrowse_gff3() != "") {
path.gff <- loadJBrowse_gff3()
if(grepl("^http", loadJBrowse_gff3())){
gff.dir <- tempfile()
if(havingIP()){
download.file(loadJBrowse_gff3(), destfile = gff.dir)
gff <- vroom(gff.dir, delim = "\t", skip = 3, col_names = F, progress = FALSE, show_col_types = FALSE)
} else {
print("No internet conection.")
}
} else {
gff <- vroom(loadJBrowse_gff3(), delim = "\t", skip = 3, col_names = F, progress = FALSE, show_col_types = FALSE)
}
} else path.gff <- gff <- NULL
} else path.gff <- gff <- NULL
if(!is.null(loadJBrowse_vcf())){
if(loadJBrowse_vcf() != ""){
path.vcf <- loadJBrowse_vcf()
} else path.vcf <- NULL
} else path.vcf <- NULL
if(!is.null(loadJBrowse_align())){
if(loadJBrowse_align() != "") {
path.align <- loadJBrowse_align()
} else path.align <- NULL
} else path.align <- NULL
if(!is.null(loadJBrowse_wig())){
if(loadJBrowse_wig() != "") {
path.wig <- loadJBrowse_wig()
} else path.wig <- NULL
} else path.wig <- NULL
validate(
need(!is.null(path.fa), "Upload the genome information in upload session to access this feature.")
)
if(!grepl("^http", path.fa)){
data_server <- serve_data(dirname(path.fa), port = input$port)
} else data_server = NULL
list(path.fa = path.fa,
path.gff = path.gff,
path.vcf = path.vcf,
path.align = path.align,
path.wig = path.wig,
data_server = data_server,
gff = gff)
})
# Reset server
reset <- reactive({
if(!input$reset_server) {
if(!is.null(button()$data_server)){
button()$data_server$stop_server()
}
return(TRUE)
} else {
return(FALSE)
}
})
# Link the UI with the browser widget
output$browserOutput <- renderJBrowseR({
if(reset()) stop(safeError("The server is off, you can now submit new files in the upload tab."))
validate(
need(!(!is.null(loadMap()) & loadMap()$software == "polymapR"), "Feature not implemented for software polymapR."),
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
if(!grepl("^http", button()$path.fa)){
assembly <- assembly(
paste0("http://127.0.0.1:", input$port, "/", basename(button()$path.fa)),
bgzip = TRUE
)
} else {
assembly <- assembly(
button()$path.fa,
bgzip = TRUE
)
}
## create configuration for a JB2 GFF FeatureTrack
if(!is.null(button()$path.gff)){
if(!grepl("^http", button()$path.gff)){
annotations_track <- track_feature(
paste0("http://127.0.0.1:", input$port, "/", basename(button()$path.gff)),
assembly
)
} else {
annotations_track <- track_feature(
button()$path.gff,
assembly
)
}
} else annotations_track <- NULL
if(!is.null(button()$path.vcf)){
if(!grepl("^http", button()$path.vcf)){
vcf_track <- track_variant(
paste0("http://127.0.0.1:", input$port, "/", basename(button()$path.vcf)),
assembly
)
} else {
vcf_track <- track_variant(
button()$path.vcf,
assembly
)
}
} else vcf_track <- NULL
if(!is.null(button()$path.align)){
if(!grepl("^http", button()$path.align)){
align_track <- track_alignments(
paste0("http://127.0.0.1:", input$port, "/", basename(button()$path.align)),
assembly
)
} else {
align_track <- track_alignments(
button()$path.align,
assembly
)
}
} else align_track <- NULL
if(!is.null(button()$path.wig)){
if(!grepl("^http", button()$path.wig)){
wiggle_track <- track_wiggle(
paste0("http://127.0.0.1:", input$port, "/", basename(button()$path.wig)),
assembly
)
} else {
wiggle_track <- track_wiggle(
button()$path.wig,
assembly
)
}
} else wiggle_track <- NULL
## create the tracks array to pass to browser
tracks <- tracks(annotations_track, vcf_track, align_track, wiggle_track)
## select default window
group <- as.numeric(input$group)
mk.pos <- loadMap()$maps[[group]]
mks <- mk.pos[order(mk.pos$l.dist),]
mks.range <- which(mks$l.dist >= input$range[1] & mks$l.dist <= input$range[2])
mks.range.1 <- mks$g.dist[mks.range[1]]
mks.range.2 <- mks$g.dist[mks.range[length(mks.range)]]
validate(
need(mks.range.1 < mks.range.2, "Inverted region. Check graphic `Genomic position (bp) x Linkage Map position (cM)`")
)
tracks_set <- c(annotations_track, vcf_track, align_track, wiggle_track)
theme <- JBrowseR::theme("#6c81c0", "#22284c")
if(any(!is.null(tracks_set))){
default_session <- default_session(
assembly,
tracks_set[which(!is.null(tracks_set))]
)
JBrowseR(
"View",
assembly = assembly,
tracks = tracks,
location = paste0(unique(mks$g.chr),":", mks.range.1,"..",mks.range.2),
defaultSession = default_session,
theme = theme
)
} else {
JBrowseR(
"View",
assembly = assembly,
location = paste0(unique(mks$g.chr),":", mks.range.1,"..",mks.range.2),
theme = theme
)
}
})
output$genes_ano <- DT::renderDataTable(server = FALSE, {
validate(
need(!(!is.null(loadMap()) & loadMap()$software == "polymapR"), "Feature not implemented for software polymapR."),
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature."),
need(!is.null(button()$gff), "Upload annotation file (.gff3) in the upload session to access this feature.")
)
group <- as.numeric(input$group)
mks<- loadMap()$maps[[group]]
mks <- mks[order(mks$l.dist),]
mks.range <- which(mks$l.dist >= input$range[1] & mks$l.dist <= input$range[2])
mks.range.1 <- mks$g.dist[mks.range[1]]
mks.range.2 <- mks$g.dist[mks.range[length(mks.range)]]
df <- button()$gff
colnames(df) <- c("seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes")
df <- df %>% filter(seqid == unique(mks$g.chr) & start > mks.range.1 & end < mks.range.2)
DT::datatable(df, extensions = 'Buttons',
options = list(
scrollX = TRUE,
dom = 'Bfrtlp',
buttons = c('copy', 'csv', 'excel', 'pdf')
),
class = "display")
})
## Downloads
# QTL profile
fn_downloadname <- reactive({
seed <- sample(1:1000,1)
if(input$fformat=="png") filename <- paste0("profile","_",seed,".png")
if(input$fformat=="tiff") filename <- paste0("profile","_",seed,".tiff")
if(input$fformat=="jpeg") filename <- paste0("profile","_",seed,".jpg")
if(input$fformat=="pdf") filename <- paste0("profile","_",seed,".pdf")
if(input$fformat=="RData") filename <- paste0("profile","_",seed,".RData")
return(filename)
})
# download profile
fn_download <- function()
{
idx <- which(unique(loadQTL()$profile$pheno) %in% input$phenotypes)
pl <- plot_profile(profile = loadQTL()$profile, qtl_info = loadQTL()$qtl_info, selected_mks = loadQTL()$selected_mks,
pheno.col = idx,
lgs.id = as.numeric(input$group),
range.min = input$range[1],
range.max = input$range[2],
by_range=T,
software = loadQTL()$software)
if(input$fformat!="RData"){
ggsave(pl, filename = fn_downloadname(),
width = input$width_profile, height = input$height_profile,
units = "mm", dpi = input$dpi_profile)
} else save(pl, file = fn_downloadname())
}
observe({
if (!is.null(loadQTL()) & input$width_profile > 1 & input$height_profile > 1 & input$dpi_profile > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download")
} else {
shinyjs::disable("bn_download")
}
})
# download handler
output$bn_download <- downloadHandler(
filename = fn_downloadname,
content = function(file) {
fn_download()
file.copy(fn_downloadname(), file, overwrite=T)
file.remove(fn_downloadname())
}
)
# Download cMxMb
fn_downloadname_phi <- reactive({
seed <- sample(1:1000,1)
if(input$fformat_phi=="png") filename <- paste0("linkageXphisical","_",seed,".png")
if(input$fformat_phi=="tiff") filename <- paste0("linkageXphisical","_",seed,".tiff")
if(input$fformat_phi=="jpeg") filename <- paste0("linkageXphisical","_",seed,".jpg")
if(input$fformat_phi=="pdf") filename <- paste0("linkageXphisical","_",seed,".pdf")
if(input$fformat_phi=="RData") filename <- paste0("linkageXphisical","_",seed,".RData")
return(filename)
})
# download
fn_download_phi <- function()
{
l.dist <- g.dist <- high <- mk.names <- NULL
map.lg <- loadMap()$maps[[as.numeric(input$group)]]
map.lg$high <- map.lg$g.dist
map.lg$high[round(map.lg$l.dist,5) < input$range[1] | round(map.lg$l.dist,5) > input$range[2]] <- "black"
map.lg$high[round(map.lg$l.dist,5) >= input$range[1] & round(map.lg$l.dist,5) <= input$range[2]] <- "red"
map.lg$high <- as.factor(map.lg$high)
p <- ggplot(map.lg, aes(x=l.dist, y = g.dist/1000, colour = high, text = paste("Marker:", mk.names, "\n",
"Genetic:", round(l.dist,2), "cM \n",
"Genomic:", g.dist/1000, "Mb"))) +
geom_point() + scale_color_manual(values=c('black','red')) +
labs(x = "Linkage map (cM)", y = "Reference genome (Mb)") +
theme_bw() + theme(legend.position = "none")
if(input$fformat_phi!="RData"){
ggsave(p, filename = fn_downloadname_phi(),
width = input$width_phi, height = input$height_phi,
units = "mm", dpi = input$dpi_phi)
} else save(p, file = fn_downloadname_phi())
}
observe({
if (!is.null(loadMap()) & input$width_phi > 1 & input$height_phi > 1 & input$dpi_phi > 1) {
if (loadMap()$software != "polymapR" ) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download_phi")
} else {
shinyjs::disable("bn_download_phi")
}
} else shinyjs::disable("bn_download_phi")
})
# download handler
output$bn_download_phi <- downloadHandler(
filename = fn_downloadname_phi,
content = function(file) {
fn_download_phi()
file.copy(fn_downloadname_phi(), file, overwrite=T)
file.remove(fn_downloadname_phi())
}
)
}
## To be copied in the UI
# mod_genes_view_ui("genes_view_ui_1")
## To be copied in the server
# mod_genes_view_server("genes_view_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/mod_genes_view.R
|
#' hidecan_view UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @importFrom shinyjs inlineCSS useShinyjs
#' @importFrom plotly plotlyOutput
#' @importFrom shiny NS tagList
#' @import hidecan
#'
#' @noRd
#'
mod_hidecan_view_ui <- function(id){
ns <- NS(id)
tagList(
fluidPage(
verticalLayout(
fluidRow(
column(width = 12,
div(style = "position:absolute;right:1em;",
div(
actionButton(ns("goMap"), "Go to Map", icon("arrow-circle-left", verify_fa = FALSE), class = "btn btn-primary"))
)
),
tags$h2(tags$b("HIDECAN")), br(), hr(),
column(12,
column(12,
box(
background = "light-blue",
"Required inputs (*)", br(),
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select dataset *",
pickerInput(ns("tracks"),
label = h4("Select data sets to be displayed as tracks:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE),
pickerInput(ns("chrom"),
label = h4("Select chromosomes to be displayed:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE))
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Define thresholds *",
## Input sliders for GWAS score threshold
sliderInput(ns("score_thr_gwas"), "Score threshold for GWAS results", value = 4, min = 0, max = 10, step = 0.1),br(),
## Input sliders for DE score and log2FC threshold
sliderInput(ns("score_thr_de"), "Score threshold for DE results", value = 1.3, min = 0, max = 10, step = 0.1), br(),
sliderInput(ns("log2fc_thr_de"), "log2(fold-change) threshold for DE results", value = 1, min = 0, max = 10, step = 0.1)
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", collapsible = TRUE, collapsed = TRUE, title = "HIDECAN plot options",
textInput(ns("title"), "Title"),
textInput(ns("subtitle"), "Subtitle"),
fluidRow(
column(6,
numericInput(ns("nrows"), "Number of rows", value = NULL, min = 1, max = Inf)),
column(6,
numericInput(ns("ncols"), "Number of columns", value = 2, min = 1, max = Inf))
),
selectInput(ns("legend_position"), "Legend position", c("bottom", "top", "left", "right", "none")),
fluidRow(
column(6,
numericInput(ns("point_size"), "Point size", value = 3, min = 0, max = Inf))
),
fluidRow(
column(6,
numericInput(ns("label_size"), "Label size", value = 3.5, min = 0, max = Inf)),
column(6,
numericInput(ns("label_padding"), "Label padding", value = 0.15, min = 0, max = Inf))
), br(),
textInput(ns("data_names"), label = "No inputs detected", value = NULL), br(),
checkboxInput(ns("colour_genes_by_score"), "Colour genes by score?", value = TRUE),
checkboxInput(ns("remove_empty_chrom"), "Remove empty chromosomes?", value = TRUE)
)
)
),
column(12,
box(id = ns("box_hidecan"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("hidecanID"), label = "HIDECAN plot"),
column(12,
box(
background = "light-blue",
"* HIDECAN analysis files or viewpoly object or example dataset (check `Input data` tab)"
)
),
column(12,
column(3,
useShinyjs(),
tags$head(tags$style(".butt{background-color:#add8e6; border-color: #add8e6; color: #337ab7;}")),
downloadButton(ns('bn_download'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_hidecan"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_hidecan"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_hidecan"), "DPI", value = 300)
)), br(),
column(12,
hr(),
uiOutput(ns("plot.ui"))
)
)
)
)
)
)
)
}
# Inputs for tests
# input <- list()
# input$colour_genes_by_score <- TRUE
# input$remove_empty_chrom <- TRUE
# input$title <- NULL
# input$subtitle <- NULL
# input$ncols <- 2
# input$legend_position <- "bottom"
# input$point_size <- 3
# input$label_size <- 3.5
# input$label_padding <- 0.15
# input$score_thr_gwas <- 4
# input$score_thr_de <- 1.3
# input$log2fc_thr_de <- 1
#' hidecan_view Server Functions
#'
#' @importFrom plotly ggplotly renderPlotly
#' @importFrom dplyr `%>%`
#' @importFrom shinyjs js
#' @noRd
mod_hidecan_view_server <- function(input, output, session,
loadHidecan,
parent_session){
ns <- session$ns
observeEvent(input$goMap, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "map")
})
observe({
if(!is.null(loadHidecan()$GWASpoly)){
shinyjs::disable("score_thr_gwas")
shinyjs::disable("score_thr_de")
shinyjs::disable("log2fc_thr_de")
} else if(length(loadHidecan()$GWAS)){
max_score <- max(sapply(loadHidecan()$GWAS, function(x){max(x$score, na.rm = TRUE)}))
updateSliderInput(inputId = "score_thr_gwas", max = round(max_score,2), step = round(max_score/20,1))
} else {
shinyjs::disable("score_thr_gwas")
}
if(length(loadHidecan()$DE)){
max_score <- max(sapply(loadHidecan()$DE, function(x){max(x$score, na.rm = TRUE)}))
max_log2fc <- max(sapply(loadHidecan()$DE, function(x){max(abs(x$log2FoldChange), na.rm = TRUE)}))
updateSliderInput(inputId = "score_thr_de", max = round(max_score,2), step = round(max_score/20,1))
updateSliderInput(inputId = "log2fc_thr_de", max = round(max_log2fc,2), step = round(max_log2fc/10,1))
} else {
shinyjs::disable("score_thr_de")
shinyjs::disable("log2fc_thr_de")
}
})
observe({
if (!is.null(hidecan_data()) & input$width_hidecan > 1 & input$height_hidecan > 1 & input$dpi_hidecan > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download")
} else {
shinyjs::disable("bn_download")
}
})
plot_nrows <- reactive({
res <- input$nrows
if(missing(res) | is.na(res)) res <- NULL
res
})
hidecan_data <- reactive({
if(!is.null(loadHidecan()[["GWASpoly"]])){
x <- loadHidecan()[["GWASpoly"]]$gwas_data_thr_list
} else {
x <- list()
}
## Adding file name as custom names for the different tracks
## For GWAS data, add file name if 1) there is some GWAS data from GWASpoly
## or 2) if there is more than one file uploaded
if(!is.null(loadHidecan()[["GWASpoly"]])){
csv_names_gwas <- names(loadHidecan()[["GWAS"]])
} else {
if(length(loadHidecan()[["GWAS"]]) == 0){
csv_names_gwas <- NULL
} else if(length(loadHidecan()[["GWAS"]]) == 1){
csv_names_gwas <- " "
} else {
csv_names_gwas <- names(loadHidecan()[["GWAS"]])
}
}
## For DE and GWAS data, add file name only if there is more than one file
## uploaded
csv_names <- c(
csv_names_gwas,
lapply(loadHidecan()[c("DE", "CAN")],
function(x){
if(length(x) == 0) return(NULL)
if(length(x) > 1) return(names(x))
return(" ")
}) |>
unlist()
)
x_csv <- c(
loadHidecan()[["GWAS"]] |>
lapply(hidecan::apply_threshold, input$score_thr_gwas),
loadHidecan()[["DE"]] |>
lapply(hidecan::apply_threshold, input$score_thr_de, input$log2fc_thr_de),
loadHidecan()[["CAN"]] |>
lapply(hidecan::apply_threshold)
)
## Use custom names computed above
names(x_csv) <- csv_names
x <- c(x, x_csv)
chrom_length <- combine_chrom_length(
c(
loadHidecan()[["GWASpoly"]][["gwas_data_list"]],
loadHidecan()[["GWAS"]],
loadHidecan()[["DE"]],
loadHidecan()[["CAN"]]
)
)
hidecan_data <- list(x, chrom_length)
hidecan_data
})
## Function to create a name for each dataset to use when choosing which
## dataset should be plotted
make_names_hidecan_data <- function(hidecan_list){
data_type_labels <- c("GWAS_data_thr" = "GWAS data",
"DE_data_thr" = "DE data",
"CAN_data_thr" = "Candidate genes list")
labels <- sapply(hidecan_list, function(x){class(x)[[1]]})
labels <- paste0(
data_type_labels[labels],
" (",
names(hidecan_list),
")"
)
labels <- sub(" ( )", "", labels, fixed = TRUE)
labels
}
## Function to create a placeholder in the text input section when adding
## custom prefix to track names
make_placeholders_hidecan_data <- function(hidecan_list){
data_type_labels <- c("GWAS_data_thr" = "[GWAS peaks]",
"DE_data_thr" = "[DE genes]",
"CAN_data_thr" = "[Candidate genes]")
labels <- sapply(hidecan_list, function(x){data_type_labels[class(x)[[1]]]}) |>
unname()
labels[names(hidecan_list) != " "] <- names(hidecan_list)[names(hidecan_list) != " "]
return(labels)
}
observe({
updateTextInput(inputId = "data_names",
label = paste0("Add custom prefix for your ",length(hidecan_data()[[1]]),
" tracks."), value = NULL, placeholder = make_placeholders_hidecan_data(hidecan_data()[[1]]))
track_choices <- as.list(make_names_hidecan_data(hidecan_data()[[1]]))
names(track_choices) <- make_names_hidecan_data(hidecan_data()[[1]])
updatePickerInput(session, "tracks",
label = "Select data sets to be displayed as tracks:",
choices = track_choices,
selected=unlist(track_choices))
chrom_choices <- as.list(unique(hidecan_data()[[2]]$chromosome))
names(chrom_choices) <- unique(hidecan_data()[[2]]$chromosome)
updatePickerInput(session, "chrom",
label = "Select chromosomes",
choices = chrom_choices,
selected=unlist(chrom_choices))
})
hidecan_plot <- reactive({
validate(
need(!is.null(loadHidecan()), "Upload HIDECAN information in the upload session to access this feature.")
)
x <- hidecan_data()[[1]]
x <- x[match(input$tracks, make_names_hidecan_data(x))]
## At the start of the app input$chrom is equal to "This will be updated"
## which would throw an error if trying to use it to subset the data
validate(
need(input$chrom != "This will be updated", "Waiting to initialise chromosomes selection.")
)
x <- lapply(x, function(y) y[which(y$chromosome %in% input$chrom),])
chrom_length <- hidecan_data()[[2]]
chrom_length <- chrom_length[match(input$chrom, chrom_length$chromosome),]
## Handling custom prefix for the tracks
if(input$data_names != ""){
## Read in the prefixes
new_names <- unlist(strsplit(input$data_names, ","))
## If one value is just space, it means no input
new_names[grep("^[:blank:]*$", new_names)] <- NA
## Making sure that there is a value per dataset (not more, not less)
## This will select the first n values if there are too many values
## or fill the vector with NAs if there are not enough values
length(new_names) <- length(x)
## For the tracks where there is no input, use what was originally planned
## (e.g. custom label or nothing)
names(x) <- coalesce(new_names, names(x))
} else{
names(x) <- names(x)
}
p <- create_hidecan_plot(x,
chrom_length,
colour_genes_by_score = input$colour_genes_by_score,
remove_empty_chrom = input$remove_empty_chrom,
title = input$title,
subtitle = input$subtitle,
n_rows = plot_nrows(),
n_cols = input$ncols,
legend_position = input$legend_position,
point_size = input$point_size,
label_size = input$label_size,
label_padding = input$label_padding)
p
})
plotHeight <- reactive({
validate(
need(!all(c(is.null(loadHidecan()$GWAS),is.null(loadHidecan()$GWASpoly))), "Upload HIDECAN information in upload session to access this feature."),
)
## Extract number of chromosomes directly from the plot
n.chr <- length(unique(hidecan_plot()$data$chromosome))
## Also use the number of tracks on the y axis
n.ytracks <- length(unique(hidecan_plot()$data$dataset))
size <- (n.ytracks * n.chr/input$ncols)*80
size
})
output$plot_hidecan <- renderPlot({
validate(
need(!is.null(loadHidecan()), "Upload HIDECAN information in the upload session to access this feature.")
)
hidecan_plot()
})
output$plot.ui <- renderUI({
plotOutput(ns("plot_hidecan"), height = plotHeight())
})
# HIDECAN download
fn_downloadname <- reactive({
seed <- sample(1:1000,1)
if(input$fformat=="png") filename <- paste0("hidecan","_",seed,".png")
if(input$fformat=="tiff") filename <- paste0("hidecan","_",seed,".tiff")
if(input$fformat=="jpeg") filename <- paste0("hidecan","_",seed,".jpg")
if(input$fformat=="pdf") filename <- paste0("hidecan","_",seed,".pdf")
if(input$fformat=="RData") filename <- paste0("hidecan","_",seed,".RData")
return(filename)
})
# download profile
fn_download <- function()
{
if(input$fformat!="RData"){
ggsave(hidecan_data(), filename = fn_downloadname(),
width = input$width_hidecan, height = input$height_hidecan,
units = "mm", dpi = input$dpi_hidecan)
} else save(hidecan_data(), file = fn_downloadname())
}
observe({
if (!is.null(hidecan_data()) & input$width_hidecan > 1 & input$height_hidecan > 1 & input$dpi_hidecan > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download")
} else {
shinyjs::disable("bn_download")
}
})
# download handler
output$bn_download <- downloadHandler(
filename = fn_downloadname,
content = function(file) {
fn_download()
file.copy(fn_downloadname(), file, overwrite=T)
file.remove(fn_downloadname())
}
)
}
## To be copied in the UI
# mod_hidecan_view_ui("hidecan_view_ui_1")
## To be copied in the server
# mod_hidecan_view_server("hidecan_view_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/mod_hidecan.R
|
#' map_view UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @importFrom shinyjs inlineCSS useShinyjs
#' @importFrom plotly plotlyOutput
#' @importFrom shiny NS tagList
#'
#' @noRd
#'
mod_map_view_ui <- function(id){
ns <- NS(id)
tagList(
fluidPage(
verticalLayout(
fluidRow(
inlineCSS(".form-group {margin-bottom: 0;}
.irs-with-grid {bottom: 0px;}
.irs-grid {height: 13px;}
.irs-grid-text {height: 0px;}
"
),
column(width = 12,
div(style = "position:absolute;right:1em;",
div(
actionButton(ns("goGenes"), "Go to Genome",icon("arrow-circle-left", verify_fa = FALSE), class = "btn btn-primary"),
actionButton(ns("goHidecan"), label = div("Go to HIDECAN", icon("arrow-circle-right", verify_fa = FALSE)), class = "btn btn-primary"))
)
),
tags$h2(tags$b("VIEWmap")), br(), hr(),
column(6,
column(12,
box(
background = "light-blue",
"Required inputs (*)", br(),
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select phenotypes *",
pickerInput(ns("phenotypes"),
label = h4("Phenotypes:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE),
), br(),
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select linkage group *",
selectInput(inputId = ns("group"), label = p("Linkage group:"), choices = 1:15, selected = 1),
checkboxInput(ns("op"), label = "Show SNP names", value = TRUE)
), br(),
)
),
), hr(),
wellPanel(
sliderInput(ns("range"), "Map range (cM)", min = 0, max = 300,
value = c(0, 20), step = 1),
uiOutput(ns("interval"))
),
box(id= ns("box_profile"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("profileID"), label = "QTL profile"),
column(12,
box(
width = 5, background = "light-blue",
"* QTL analysis files or viewpoly object or example dataset (check `Input data` tab)",
)
),
column(3,
tags$head(tags$style(".butt{background-color:#add8e6; border-color: #add8e6; color: #337ab7;}")),
useShinyjs(),
downloadButton(ns('bn_download'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_profile"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_profile"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_profile"), "DPI", value = 300)
), br(),
column(12,
hr(),
plotlyOutput(ns("plot_qtl")),
)
), br(),
box(id = ns("box_map"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("mapID"), label = "Map"),
column(12,
box(
width = 5, background = "light-blue",
"* Linkage map files or viewpoly object or example dataset (check `Input data` tab)",
)
),
column(3,
downloadButton(ns('bn_download_map'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat_map"), "File type", choices=c("png","tiff","jpeg","pdf"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_map"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_map"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_map"), "DPI", value = 300)
), br(),
column(12,
hr(),
plotOutput(ns("plot_map"), height = "500px"), br(),
box(id = ns("box_phaplo"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("phaploID"), label = "Parents haplotypes table"),
DT::dataTableOutput(ns("parents_haplo"))
)
)
),
box(id = ns("box_mapsumm"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("mapsummID"), label = "Map summary"),
column(12,
box(
width = 5, background = "light-blue",
"* Linkage map files or viewpoly object or example dataset (check `Input data` tab)",
)
),
column(12,
DT::dataTableOutput(ns("summary")), br(), hr()
),
column(3,
downloadButton(ns('bn_download_summary'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat_summary"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T), br(),
),
column(2,
numericInput(ns("width_summary"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_summary"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_summary"), "DPI", value = 300)
), br(),
column(12,
hr(),
plotOutput(ns("map_summary"))
)
)
)
)
)
}
#' map_view Server Functions
#'
#' @importFrom plotly ggplotly renderPlotly
#' @importFrom dplyr `%>%`
#' @importFrom shinyjs js
#' @noRd
mod_map_view_server <- function(input, output, session,
loadMap, loadQTL,
parent_session){
ns <- session$ns
pheno <- LG <- NULL
#Collapse boxes
observeEvent(input$profileID, {
js$collapse(ns("box_profile"))
})
observeEvent(input$mapID, {
js$collapse(ns("box_map"))
})
observeEvent(input$mapsummID, {
js$collapse(ns("box_mapsumm"))
})
observeEvent(input$phaploID, {
js$collapse(ns("box_phaplo"))
})
observeEvent(input$goGenes, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "genes")
})
observeEvent(input$goHidecan, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "hidecan")
})
observe({
# Dynamic linkage group number
if(!is.null(loadMap())){
group_choices <- as.list(1:length(loadMap()$d.p1))
names(group_choices) <- 1:length(loadMap()$d.p1)
} else if(!is.null(loadQTL())){
group_choices <- as.list(1:length(unique(loadQTL()$selected_mks$LG)))
names(group_choices) <- 1:length(unique(loadQTL()$selected_mks$LG))
} else {
group_choices <- as.list("Upload map or QTL data in `upload` session.")
names(group_choices) <- "Upload map or QTL data in `upload` session."
}
updateSelectInput(session, "group",
label="Linkage group",
choices = group_choices,
selected= group_choices[[1]])
# Dynamic QTL
if(!is.null(loadQTL())){
pheno_choices <- as.list(unique(loadQTL()$profile$pheno))
names(pheno_choices) <- unique(loadQTL()$profile$pheno)
updatePickerInput(session, "phenotypes",
label = "Select phenotypes",
choices = pheno_choices,
selected=unlist(pheno_choices)[1])
} else {
updatePickerInput(session, "phenotypes",
label = "Phenotype:",
choices = "Upload QTL information to update",
selected= "Upload QTL information to update")
}
})
# Plot QTL bar
qtl.int <- reactive({
if(!is.null(loadQTL())){
data <- loadQTL()$qtl_info %>% filter(pheno %in% input$phenotypes & LG == input$group)
if(dim(data)[1] == 0) return(p(" "))
data <- data[order(data$Pos_lower, data$Pos_upper),]
command <- paste0(round(data$Pos_lower,0), ":", round(data$Pos_upper, 0))
seqs <- list()
for(i in 1:length(command))
seqs[[i]] <- eval(parse(text = command[i]))
maps.dist <- lapply(loadMap()$maps, function(x) {
y <- x$l.dist
names(y) <- x$mk.names
y
})
ch <- as.numeric(input$group)
max_updated <- as.numeric(maps.dist[[ch]][length(maps.dist[[ch]])])
qtls_pos <- Reduce(union, seqs)
chr_all <- 0:max_updated
idx.comp <- chr_all %in% qtls_pos
int <- chr_all[sequence(rle(idx.comp)$length) == 1]
int <- (int*100)/max_updated
# add start and end
ints_all <- unique(c(0,int, 100))
# add qtl
qtls <- (unique(sort(data$Pos))*100)/max_updated
qtls <- sort(c(qtls -0.3, qtls +0.3))
labs <- c(rep("int", length(ints_all)), rep(c("red","#34495E "), length(qtls/2)))
labs <- labs[order(c(ints_all, qtls))]
labs[which(labs == "red")-1] <- "#34495E "
labs[which(labs == "int")] <- "#D5D8DC"
labs <- labs[-length(labs)]
ints_all <- diff(sort(c(ints_all, qtls)))
# Each interval add small blank space to the scale - need to remove
reduce <- cumsum(ints_all)[length(cumsum(ints_all))] - 99.7
ints_all[which(labs != "red")] <- ints_all[which(labs != "red")] - reduce
# Add gradient colors
OrRd <- c("#FFF7EC", "#FEE8C8", "#FDD49E", "#FDBB84", "#FC8D59", "#EF6548", "#D7301F", "#B30000", "#7F0000")
if(length(labs[which(labs == "red")]) < 3){
qtl.colors <- OrRd[1:7][-c(1:5)][1:length(labs[which(labs == "red")])]
} else {
qtl.colors <- OrRd[1:length(labs[which(labs == "red")])]
}
labs[which(labs == "red")][order(as.numeric(data$Pval), decreasing = T)] <- qtl.colors
divs <- paste0("display:inline-block; width: ", ints_all ,"% ; background-color: ", labs, ";")
if(!is.null(input$phenotypes)){
divs_lst <- list()
for(i in 1:length(divs)){
divs_lst[[i]] <- div(id= paste0("belowslider",i), style= divs[i], p())
}
p(divs_lst, "QTL")
}
} else
return(p(" "))
})
output$interval <- renderUI({
qtl.int()
})
# Plot QTL profile
output$plot_qtl <- renderPlotly({
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature.")
)
idx <- which(unique(loadQTL()$profile$pheno) %in% input$phenotypes)
pl <- plot_profile(profile = loadQTL()$profile,
qtl_info = loadQTL()$qtl_info,
selected_mks = loadQTL()$selected_mks,
pheno.col = idx,
lgs.id = as.numeric(input$group),
range.min = input$range[1],
range.max = input$range[2], by_range=T)
ggplotly(source = "qtl_profile", pl, tooltip=c("Trait", "Position (cM)")) %>%
layout(legend = list(orientation = 'h', y = -0.3),
modebar = list(
remove = c("toImage",
"hovercompare",
"hoverCompareCartesian")),
clickmode ="none",
dragmode = FALSE)
})
# Plot map
output$plot_map <- renderPlot({
validate(
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
maps.dist <- lapply(loadMap()$maps, function(x) {
y <- x$l.dist
names(y) <- x$mk.names
y
})
draw_map_shiny(left.lim = input$range[1],
right.lim = input$range[2],
ch = input$group,
d.p1 = loadMap()$d.p1,
d.p2 = loadMap()$d.p2,
maps.dist = maps.dist,
ph.p1 = loadMap()$ph.p1,
ph.p2 = loadMap()$ph.p2,
snp.names = input$op, software = loadMap()$software)
max_updated = reactive({
ch <- as.numeric(input$group)
as.numeric(maps.dist[[ch]][length(maps.dist[[ch]])])
})
observeEvent(max_updated, {
updateSliderInput(inputId = "range", max = round(max_updated(),2))
})
})
output$parents_haplo <- DT::renderDataTable(server = FALSE, {
validate(
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
group <- as.numeric(input$group)
mks<- loadMap()$maps[[group]]
mks <- mks[order(mks$l.dist),]
mks.range <- which(mks$l.dist >= input$range[1] & mks$l.dist <= input$range[2])
p1 <- loadMap()$ph.p1[[group]][mks.range,]
#colnames(p1) <- paste0("p1.",1:dim(p1)[2])
p2 <- loadMap()$ph.p2[[group]][mks.range,]
#colnames(p2) <- paste0("p2.",1:dim(p2)[2])
p.haplo <- cbind(p1,p2)
DT::datatable(p.haplo, extensions = 'Buttons',
options = list(
dom = 'Bfrtlp',
buttons = c('copy', 'csv', 'excel', 'pdf')
),
class = "display")
})
# Map summary
output$summary <- DT::renderDataTable(server = FALSE, {
validate(
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
summary <- summary_maps(loadMap(), loadMap()$software)
DT::datatable(summary, extensions = 'Buttons',
options = list(
scrollX = TRUE,
dom = 'Bfrtlp',
buttons = c('copy', 'csv', 'excel', 'pdf')
),
class = "display")
})
output$map_summary <- renderPlot({
validate(
need(!is.null(loadMap()$ph.p1), "Upload map information in the upload session to access this feature.")
)
plot_map_list(loadMap())
})
### Downloads
# QTL profile
fn_downloadname <- reactive({
seed <- sample(1:1000,1)
if(input$fformat=="png") filename <- paste0("profile","_",seed,".png")
if(input$fformat=="tiff") filename <- paste0("profile","_",seed,".tiff")
if(input$fformat=="jpeg") filename <- paste0("profile","_",seed,".jpg")
if(input$fformat=="pdf") filename <- paste0("profile","_",seed,".pdf")
if(input$fformat=="RData") filename <- paste0("profile","_",seed,".RData")
return(filename)
})
# download profile
fn_download <- function()
{
idx <- which(unique(loadQTL()$profile$pheno) %in% input$phenotypes)
pl <- plot_profile(profile = loadQTL()$profile, qtl_info = loadQTL()$qtl_info, selected_mks = loadQTL()$selected_mks,
pheno.col = idx,
lgs.id = as.numeric(input$group),
range.min = input$range[1],
range.max = input$range[2],
by_range=T,
software = loadQTL()$software)
if(input$fformat!="RData"){
ggsave(pl, filename = fn_downloadname(),
width = input$width_profile, height = input$height_profile,
units = "mm", dpi = input$dpi_profile)
} else save(pl, file = fn_downloadname())
}
observe({
if (!is.null(loadQTL()) & input$width_profile > 1 & input$height_profile > 1 & input$dpi_profile > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download")
} else {
shinyjs::disable("bn_download")
}
})
# download handler
output$bn_download <- downloadHandler(
filename = fn_downloadname,
content = function(file) {
fn_download()
file.copy(fn_downloadname(), file, overwrite=T)
file.remove(fn_downloadname())
}
)
# Map file
fn_downloadname_map <- reactive({
png <- tiff <- jpeg <- pdf <- dev.off <- NULL
seed <- sample(1:1000,1)
if(input$fformat_map=="png") filename <- paste0("map","_",seed,".png")
if(input$fformat_map=="tiff") filename <- paste0("map","_",seed,".tiff")
if(input$fformat_map=="jpeg") filename <- paste0("map","_",seed,".jpg")
if(input$fformat_map=="pdf") filename <- paste0("map","_",seed,".pdf")
return(filename)
})
# download profile
fn_download_map <- function()
{
maps <- lapply(loadMap()$maps, function(x) {
y <- x$l.dist
names(y) <- x$mk.names
y
})
if(input$fformat_map == "png"){
png(fn_downloadname_map(), width = input$width_map, height = input$height_map, units = "mm", res = input$dpi_map)
} else if(input$fformat_map == "tiff"){
tiff(fn_downloadname_map(), width = input$width_map, height = input$height_map, units = "mm", res = input$dpi_map)
} else if(input$fformat_map == "jpeg"){
jpeg(fn_downloadname_map(), width = input$width_map, height = input$height_map, units = "mm", res = input$dpi_map)
} else if(input$fformat_map == "pdf"){
pdf(fn_downloadname_map(), width = input$width_map, height = input$height_map, units = "mm", res = input$dpi_map)
}
draw_map_shiny(left.lim = input$range[1],
right.lim = input$range[2],
ch = input$group,
d.p1 = loadMap()$d.p1,
d.p2 = loadMap()$d.p2,
maps.dist = maps,
ph.p1 = loadMap()$ph.p1,
ph.p2 = loadMap()$ph.p2,
snp.names = input$op, software = loadMap()$software)
dev.off()
}
observe({
if (!is.null(loadMap()) & input$width_map > 1 & input$height_map > 1 & input$dpi_map > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download_map")
} else {
shinyjs::disable("bn_download_map")
}
})
# download handler
output$bn_download_map <- downloadHandler(
filename = fn_downloadname_map,
content = function(file) {
fn_download_map()
file.copy(fn_downloadname_map(), file, overwrite=T)
file.remove(fn_downloadname_map())
}
)
# Summary map
fn_downloadname_summary <- reactive({
png <- tiff <- jpeg <- pdf <- dev.off <- NULL
seed <- sample(1:1000,1)
if(input$fformat_summary=="png") filename <- paste0("map","_",seed,".png")
if(input$fformat_summary=="tiff") filename <- paste0("map","_",seed,".tiff")
if(input$fformat_summary=="jpeg") filename <- paste0("map","_",seed,".jpg")
if(input$fformat_summary=="pdf") filename <- paste0("map","_",seed,".pdf")
if(input$fformat_summary=="RData") filename <- paste0("map","_",seed,".RData")
return(filename)
})
# download profile
fn_download_summary <- function()
{
png <- tiff <- jpeg <- pdf <- dev.off <- NULL
maps <- lapply(loadMap()$maps, function(x) {
y <- x$l.dist
names(y) <- x$mk.names
y
})
if(input$fformat_summary != "RData"){
if(input$fformat_summary == "png"){
png(fn_downloadname_summary(), width = input$width_summary, height = input$height_summary, units = "mm", res = input$dpi_summary)
} else if(input$fformat_summary == "tiff"){
tiff(fn_downloadname_summary(), width = input$width_summary, height = input$height_summary, units = "mm", res = input$dpi_summary)
} else if(input$fformat_summary == "jpeg"){
jpeg(fn_downloadname_summary(), width = input$width_summary, height = input$height_summary, units = "mm", res = input$dpi_summary)
} else if(input$fformat_summary == "pdf"){
pdf(fn_downloadname_summary(), width = input$width_summary, height = input$height_summary, units = "mm", res = input$dpi_summary)
}
plot_map_list(loadMap())
dev.off()
} else {
p <- plot_map_list(loadMap())
save(p, file = fn_downloadname_summary())
}
}
observe({
if (!is.null(loadMap()) & input$width_summary > 1 & input$height_summary > 1 & input$dpi_summary > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download_summary")
} else {
shinyjs::disable("bn_download_summary")
}
})
# download handler
output$bn_download_summary <- downloadHandler(
filename = fn_downloadname_summary,
content = function(file) {
fn_download_summary()
file.copy(fn_downloadname_summary(), file, overwrite=T)
file.remove(fn_downloadname_summary())
}
)
}
## To be copied in the UI
# mod_map_view_ui("map_view_ui_1")
## To be copied in the server
# mod_map_view_server("map_view_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/mod_map_view.R
|
#' qtl_view UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @import shinydashboard
#' @import shinyWidgets
#' @importFrom shinyjs inlineCSS useShinyjs
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_qtl_view_ui <- function(id){
ns <- NS(id)
tagList(
useShinyjs(),
extendShinyjs(text = jscode, functions = "collapse"),
fluidPage(
verticalLayout(
fluidRow(
column(width = 12,
div(style = "position:absolute;right:1em;",
div(
actionButton(ns("goUploads"), "Go to Input data", icon("arrow-circle-left", verify_fa = FALSE), class = "btn btn-primary"),
actionButton(ns("goGenes"), label = div("Go to Genome", icon("arrow-circle-right", verify_fa = FALSE)), class = "btn btn-primary"))
)
),
tags$h2(tags$b("VIEWqtl")), br(), hr(),
column(6,
column(12,
box(
background = "light-blue",
"Required inputs (*)", br(),
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select linkage group/s *",
pickerInput(ns("group"),
label = h6("Linkage group/s:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE)
)
),
column(6,
box(width = 12, solidHeader = TRUE, status="info", title = "Select phenotype/s *",
pickerInput(ns("phenotypes"),
label = h6("Phenotype/s:"),
choices = "This will be updated",
selected = "This will be updated",
options = list(
`actions-box` = TRUE,
size = 10,
`selected-text-format` = "count > 3"
),
multiple = TRUE)
)
)
),
column(12,
box(id = ns("box_profile"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("profileID"), label = "QTL profile"),
column(12,
box(
background = "light-blue",
"* QTL analysis files or viewpoly object or example dataset (check `Input data` tab)"
)
),
column(12,
column(3,
useShinyjs(),
tags$head(tags$style(".butt{background-color:#add8e6; border-color: #add8e6; color: #337ab7;}")),
downloadButton(ns('bn_download'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_profile"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_profile"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_profile"), "DPI", value = 300)
)), br(),
column(12,
hr(),
plotOutput(ns("plot_qtl"),
click=ns("plot_click"), brush = ns("plot_brush"))
),
box(id = ns("box_effects"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("effectsID"), label = "Effects"),
column(12,
box(
background = "light-blue",
"* QTL analysis files or viewpoly object or example dataset (check `Input data` tab)", br(),
"* Selection of QTL/s (triangle/s at the bottom of QTL profile graphic)"
)
),
div(style = "position:absolute;right:3em;",
radioButtons(ns("effects_design"), "Design",
choices = c("Additive (bar)" = "bar", "Additive (circle)" = "circle", "Alleles combination" = "digenic"),
selected = "bar")
), br(), br(),
column(3,
downloadButton(ns('bn_download_effects'), "Download", class = "butt")
),
column(3,
numericInput(ns("width_effects"), "Width (mm)", value = 180),
),
column(3,
numericInput(ns("height_effects"), "Height (mm)", value = 120),
),
column(3,
numericInput(ns("dpi_effects"), "DPI", value = 300)
), br(),
column(12,
column(6,
radioButtons(ns("fformat_effects"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(6,
textInput(ns("parents_name"), "Parents name", value = "P1, P2")
),
),
column(12,
hr(),
uiOutput(ns("plot.ui"))
)
), br(),
box(id = ns("box_haplo"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("haploID"), label = "Progeny haplotypes"),
column(12,
box(
background = "light-blue",
"* QTLpoly analysis files or viewpoly object or example dataset (check `Input data` tab)", br(),
"* Selection of QTL/s (triangle/s at the bottom of QTL profile graphic)"
)
),
column(12,
actionBttn(ns("haplo_update"), style = "jelly", color = "royal", size = "sm", label = "update available haplotypes", icon = icon("refresh", verify_fa = FALSE)),
br(), br(),
pickerInput(ns("haplo"),
label = h6("Select haplotypes*"),
choices = "Click on `update available haplotype` to update",
selected = "Click on `update available haplotype` to update",
options = pickerOptions(
size = 15,
`selected-text-format` = "count > 3",
`live-search`=TRUE,
actionsBox = TRUE,
dropupAuto = FALSE,
dropdownAlignRight = TRUE
),
multiple = TRUE), br(),
pickerInput(ns("haplo_exclude"),
label = h6("Exclude haplotypes (optional)"),
choices = "Click on `update available haplotype` to update",
selected = "Click on `update available haplotype` to update",
options = pickerOptions(
size = 15,
`selected-text-format` = "count > 3",
`live-search`=TRUE,
actionsBox = TRUE,
dropupAuto = FALSE,
dropdownAlignRight = TRUE
),
multiple = TRUE), br(),
actionBttn(ns("haplo_submit"), style = "jelly", color = "royal", size = "sm", label = "submit selected haplotypes*", icon = icon("share-square", verify_fa = FALSE)),
br(), hr()),
column(3,
downloadButton(ns('bn_download_haplo'), "Download", class = "butt")
),
column(3,
radioButtons(ns("fformat_haplo"), "File type", choices=c("png","tiff","jpeg","pdf", "RData"), selected = "png", inline = T)
),
column(2,
numericInput(ns("width_haplo"), "Width (mm)", value = 180),
),
column(2,
numericInput(ns("height_haplo"), "Height (mm)", value = 120),
),
column(2,
numericInput(ns("dpi_haplo"), "DPI", value = 300)
), br(),
column(12,
hr(),
htmlOutput(ns("ind_names")), hr(),
uiOutput(ns("plot_haplo.ui"))
)
),
box(id = ns("box_bree"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("breeID"), label = "Breeding values"),
column(12,
box(
background = "light-blue",
"* QTLpoly analysis files or viewpoly object or example dataset (check `Input data` tab)", br(),
"* Selection of QTL/s (triangle/s at the bottom of QTL profile graphic)"
)
),
column(12,
DT::dataTableOutput(ns("breeding_values"))
)
), br(), br(),
box(id = ns("box_summary"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("summaryID"), label = "QTL summary"),
column(12,
box(
background = "light-blue",
"* QTL analysis files or viewpoly object or example dataset (check `Input data` tab)", br(),
"* Selection of QTL/s (triangle/s at the bottom of QTL profile graphic)"
)
),
column(12,
DT::dataTableOutput(ns("info"))
)
)
)
)
)
)
)
)
}
#' qtl_view Server Functions
#'
#' @importFrom ggpubr ggarrange
#' @import shinydashboard
#' @importFrom shinyjs js
#'
#' @noRd
mod_qtl_view_server <- function(input, output, session,
loadMap, loadQTL,
parent_session){
ns <- session$ns
#Collapse boxes
observeEvent(input$profileID, {
js$collapse(ns("box_profile"))
})
observeEvent(input$effectsID, {
js$collapse(ns("box_effects"))
})
observeEvent(input$haploID, {
js$collapse(ns("box_haplo"))
})
observeEvent(input$breeID, {
js$collapse(ns("box_bree"))
})
observeEvent(input$summaryID, {
js$collapse(ns("box_summary"))
})
observe({
# Dynamic linkage group number
if(!is.null(loadMap())){
group_choices <- as.list(1:length(loadMap()$d.p1))
names(group_choices) <- 1:length(loadMap()$d.p1)
} else if(!is.null(loadQTL())){
group_choices <- as.list(1:length(unique(loadQTL()$selected_mks$LG)))
names(group_choices) <- 1:length(unique(loadQTL()$selected_mks$LG))
} else {
group_choices <- as.list("Upload map or QTL data in `upload` session.")
names(group_choices) <- "Upload map or QTL data in `upload` session."
}
if(length(group_choices) < 5) the_choice <- group_choices[[1]] else the_choice <- group_choices[[5]]
updatePickerInput(session, "group",
label="Linkage group/s:",
choices = group_choices,
selected= the_choice)
# Dynamic QTL
if(!is.null(loadQTL())){
pheno_choices <- as.list(unique(loadQTL()$profile$pheno))
names(pheno_choices) <- unique(loadQTL()$profile$pheno)
updatePickerInput(session, "phenotypes",
label = "Phenotype/s:",
choices = pheno_choices,
selected=unlist(pheno_choices)[1])
} else {
updatePickerInput(session, "phenotypes",
label = "Phenotype/s:",
choices = "Upload QTL information to update",
selected= "Upload QTL information to update")
}
})
observeEvent(input$goGenes, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "genes")
})
observeEvent(input$goUploads, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "upload")
})
qtl.data <- reactive({
validate(
need(length(input$phenotypes) != 0 & input$phenotypes != "Upload QTL information to update", "Select at least one phenotype"),
need(length(input$group) != 0 & input$group != "Upload map or QTL data in `upload` session.", "Select at least one linkage group"),
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature.")
)
idx <- which(unique(loadQTL()$profile$pheno) %in% input$phenotypes)
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("building graphic..."))
pl <- plot_profile(profile = loadQTL()$profile,
qtl_info = loadQTL()$qtl_info,
selected_mks = loadQTL()$selected_mks,
pheno.col = idx,
lgs.id = as.numeric(input$group),
by_range=F, plot = F)
})
})
output$plot_qtl <- renderPlot({
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("building graphic..."))
only_plot_profile(pl.in = qtl.data())
})
})
effects.data <- reactive({
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature."),
need(!is.null(input$plot_brush), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
df <- try(brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat"))
validate(
need(dim(df)[1] > 0, "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
parents <- unlist(strsplit(input$parents_name, ","))
parents <- gsub(" ", "", parents)
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("Getting data..."))
data <- data_effects(qtl_info = loadQTL()$qtl_info,
effects = loadQTL()$effects,
pheno.col = as.character(df$Trait),
lgs = df$LG,
position = df$`Position (cM)`,
groups = as.numeric(input$group),
software = loadQTL()$software,
design = input$effects_design,
parents = parents)
})
})
output$effects <- renderPlot({
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("building graphic..."))
plot_effects(effects.data(), software = loadQTL()$software, design = input$effects_design)
})
})
plotHeight <- reactive({
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature."),
need(!is.null(input$plot_brush), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
dframe <- try(brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat"))
validate(
need(!inherits(dframe, "try-error"), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
counts <- nrow(dframe)
counts <- ceiling(counts/4)
if(counts == 0) counts <- 1
if(loadQTL()$software == "polyqtlR") {
size <- counts*650
} else if(input$effects_design == "bar" | input$effects_design == "digenic"){
size <- counts*350
} else if(input$effects_design == "circle"){
counts <- length(unique(dframe$LG))
counts <- ceiling(counts/2)
if(counts == 0) counts <- 1
size <- counts*500
}
size
})
output$plot.ui <- renderUI({
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("building graphic..."))
plotOutput(ns("effects"), height = plotHeight())
})
})
observeEvent(input$haplo_update,{
if(!is.null(loadQTL())){
if(loadQTL()$software == "polyqtlR" | loadQTL()$software == "diaQTL") {
dframe <- NULL
updatePickerInput(session, "haplo",
label = "Select haplotypes",
choices = paste0("Feature not implemented for software: ", loadQTL()$software),
selected= paste0("Feature not implemented for software: ", loadQTL()$software))
updatePickerInput(session, "haplo_exclude",
label = "Exclude haplotypes (optional)",
choices = paste0("Feature not implemented for software: ", loadQTL()$software),
selected= paste0("Feature not implemented for software: ", loadQTL()$software))
} else if(!is.null(input$plot_brush)){
dframe <- brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat")
} else {
dframe <- NULL
updatePickerInput(session, "haplo",
label = "Select haplotypes",
choices = "Select QTL in the profile graphic to update",
selected= "Select QTL in the profile graphic to update")
updatePickerInput(session, "haplo_exclude",
label = "Exclude haplotypes (optional)",
choices = "Select QTL in the profile graphic to update",
selected= "Select QTL in the profile graphic to update")
}
} else {
dframe <- NULL
updatePickerInput(session, "haplo",
label = "Select haplotypes",
choices = "Upload the QTL information in upload session to access this feature.",
selected= "Upload the QTL information in upload session to access this feature.")
updatePickerInput(session, "haplo_exclude",
label = "Exclude haplotypes (optional)",
choices = "Upload the QTL information in upload session to access this feature.",
selected= "Upload the QTL information in upload session to access this feature.")
}
if(!is.null(dframe)){
if(input$effects_design == "digenic" | input$effects_design == "circle") {
updatePickerInput(session, "haplo",
label = "Select haplotypes",
choices = "Select `bar` design to access this feature.",
selected= "Select `bar` design to access this feature.")
updatePickerInput(session, "haplo_exclude",
label = "Exclude haplotypes (optional)",
choices = "Select `bar` design to access this feature.",
selected= "Select `bar` design to access this feature.")
} else {
haplo_choices <- paste0("Trait:", dframe$Trait, "_LG:", dframe$LG, "_Pos:", dframe$`Position (cM)`)
alleles <- effects.data()[[1]]$data$Alleles[!grepl("_",effects.data()[[1]]$data$Alleles)]
alleles <- rep(alleles, length(haplo_choices))
haplo_choices <- rep(haplo_choices, each = length(alleles)/length(haplo_choices))
haplo_choices <- paste0(haplo_choices, "_homolog:", alleles)
haplo_choices <- as.list(haplo_choices)
names(haplo_choices) <- unlist(haplo_choices)
updatePickerInput(session, "haplo",
label = "Select haplotypes",
choices = haplo_choices,
selected= haplo_choices[1:3])
updatePickerInput(session, "haplo_exclude",
label = "Exclude haplotypes (optional)",
choices = haplo_choices,
selected= NULL)
}
}
})
haplo_data <- eventReactive(input$haplo_submit, {
validate(
need(all(input$haplo != paste0("Feature not implemented for software: ", loadQTL()$software)), paste0("Feature not implemented for software: ", loadQTL()$software)),
need(all(input$haplo != "Click on `update available haplotype` to update"), "Click on `update available haplotype` to update"),
need(all(input$haplo != "Select QTL in the profile graphic to update"), "Select QTL in the profile graphic to update"),
need(all(input$haplo != "Select `bar` design to access this feature."), "Select `bar` design to access this feature.")
)
list.p <- select_haplo(input.haplo = as.list(input$haplo),
exclude.haplo = as.list(input$haplo_exclude),
probs = loadQTL()$probs,
selected_mks = loadQTL()$selected_mks,
effects.data = effects.data())
p <- list.p[[1]]
inds <- list.p[[2]]
counts <- ceiling(length(p)/3)
if(counts == 0) counts <- 1
size <- counts*450
list(p, size, inds)
})
output$haplotypes <- renderPlot({
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("building graphic..."))
nrow.lst <- ceiling(length(haplo_data()[[1]])/3)
if(nrow.lst == 0) nrow.lst <- 1
p.all <- ggarrange(plotlist = haplo_data()[[1]], ncol = 3, nrow = nrow.lst, common.legend = TRUE)
})
p.all
})
output$plot_haplo.ui <- renderUI({
plotOutput(ns("haplotypes"), height = haplo_data()[[2]])
})
output$ind_names <- renderUI({
x <- paste0("<strong>Number of individuals with selected haplotypes: ",length(haplo_data()[[3]])," ","<br><strong>Individual's ID </strong>: ", paste(haplo_data()[[3]], collapse = ", "))
HTML(x)
})
output$info <- DT::renderDataTable(server = FALSE, {
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature."),
need(!is.null(input$plot_brush), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
dframe <- try(brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat"))
validate(
need(!inherits(dframe, "try-error"), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
dframe <- dframe[,-c(dim(dframe)[2]-1,dim(dframe)[2])]
if(loadQTL()$software == "QTLpoly"){
colnames(dframe)[c(2,4,5,6,7)] <- c("Linkage group", "Lower interval (cM)", "Upper interval (cM)", "p-value", "h2")
} else if(loadQTL()$software == "diaQTL") {
colnames(dframe)[c(2,4,5,6)] <- c("Linkage group", "Lower interval (cM)", "Upper interval (cM)", "LL")
} else if(loadQTL()$software == "polyqtlR"){
dframe <- dframe[,-c(4,5)]
colnames(dframe)[c(2,4)] <- c("Linkage group", "Threshold")
}
DT::datatable(dframe, extensions = 'Buttons',
options = list(
dom = 'Bfrtlp',
buttons = c('copy', 'csv', 'excel', 'pdf')
),
class = "display")
})
# Breeding values
output$breeding_values <- DT::renderDataTable(server = FALSE, {
validate(
need(!is.null(loadQTL()), "Upload the QTL information in upload session to access this feature."),
need(loadQTL()$software == "QTLpoly", paste("Feature not implemented for software:",loadQTL()$software)),
need(!is.null(input$plot_brush), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
dframe <- try(brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat"))
validate(
need(!inherits(dframe, "try-error"), "Select at least one triangle on the bottom of the QTL profile graphic. The triangles refer to QTL peaks detected. You can click and brush your cursor to select more than one.")
)
pos <- split(dframe$`Position (cM)`, dframe$Trait)
dt <- breeding_values(loadQTL()$qtl_info, loadQTL()$probs,
loadQTL()$selected_mks, loadQTL()$blups,
loadQTL()$beta.hat, pos)
rownames(dt) <- NULL
DT::datatable(dt, extensions = 'Buttons',
options = list(
dom = 'Bfrtlp',
buttons = c('copy', 'csv', 'excel', 'pdf')
),
class = "display")
})
# Download profile
# create filename
fn_downloadname <- reactive({
seed <- sample(1:1000,1)
if(input$fformat=="png") filename <- paste0("profile","_",seed,".png")
if(input$fformat=="tiff") filename <- paste0("profile","_",seed,".tiff")
if(input$fformat=="jpeg") filename <- paste0("profile","_",seed,".jpg")
if(input$fformat=="pdf") filename <- paste0("profile","_",seed,".pdf")
if(input$fformat=="RData") filename <- paste0("profile","_",seed,".RData")
return(filename)
})
# download profile
fn_download <- function()
{
p <- only_plot_profile(pl.in = qtl.data())
if(input$fformat!="RData"){
ggsave(p, filename = fn_downloadname(),
width = input$width_profile, height = input$height_profile, units = "mm", dpi = input$dpi_profile)
} else save(p, file = fn_downloadname())
}
observe({
if (!is.null(loadQTL()) & input$width_profile > 1 & input$height_profile > 1 & input$dpi_profile > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download")
} else {
shinyjs::disable("bn_download")
}
})
# download handler
output$bn_download <- downloadHandler(
filename = fn_downloadname,
content = function(file) {
fn_download()
file.copy(fn_downloadname(), file, overwrite=T)
file.remove(fn_downloadname())
}
)
# Download effects
# create filename
fn_downloadname_effects <- reactive({
seed <- sample(1:1000,1)
if(input$fformat_effects=="png") filename <- paste0("effects","_",seed,".png")
if(input$fformat_effects=="tiff") filename <- paste0("effects","_",seed,".tiff")
if(input$fformat_effects=="jpeg") filename <- paste0("effects","_",seed,".jpg")
if(input$fformat_effects=="pdf") filename <- paste0("effects","_",seed,".pdf")
if(input$fformat_effects=="RData") filename <- paste0("effects","_",seed,".RData")
return(filename)
})
# download
fn_download_effects <- function()
{
validate(
need(!is.null(input$plot_brush), "Select a point or region on QTL profile graphic.")
)
df <- brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat")
parents <- unlist(strsplit(input$parents_name, ","))
parents <- gsub(" ", "", parents)
data <- data_effects(qtl_info = loadQTL()$qtl_info,
effects = loadQTL()$effects,
pheno.col = as.character(df$Trait),
lgs = df$LG,
parents = parents,
position = df$`Position (cM)`,
groups = as.numeric(input$group),
software = loadQTL()$software,
design = input$effects_design)
plots <- plot_effects(data, software = loadQTL()$software, design = input$effects_design)
if(input$fformat_effects!="RData"){
ggsave(plots, filename = fn_downloadname_effects(), height = input$height_effects,
width = input$width_effects, units = "mm", bg = "white", dpi = input$dpi_effects)
} else save(data, file = fn_downloadname_effects())
}
shinyjs::disable("bn_download_effects")
# To make observeEvent watch more than one input
toListen <- reactive({
list(input$plot_brush, input$plot_brush, input$width_effects, input$height_effects, input$dpi_effects)
})
observeEvent(toListen(),{
df <- brushedPoints(qtl.data()[[2]], input$plot_brush, xvar = "x", yvar = "y.dat")
if (dim(df)[1] > 0 & !is.null(loadQTL()) & !is.null(input$plot_brush) & input$width_effects > 1 & input$height_effects > 1 & input$dpi_effects > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download_effects")
} else {
shinyjs::disable("bn_download_effects")
}
})
# download handler
output$bn_download_effects <- downloadHandler(
filename = fn_downloadname_effects,
content = function(file) {
fn_download_effects()
file.copy(fn_downloadname_effects(), file, overwrite=T)
file.remove(fn_downloadname_effects())
}
)
# Download haplotypes
shinyjs::disable("bn_download_haplo")
# create filename
fn_downloadname_haplo <- reactive({
seed <- sample(1:1000,1)
if(input$fformat_haplo=="png") filename <- paste0("haplotypes","_",seed,".png")
if(input$fformat_haplo=="tiff") filename <- paste0("haplotypes","_",seed,".tiff")
if(input$fformat_haplo=="jpeg") filename <- paste0("haplotypes","_",seed,".jpg")
if(input$fformat_haplo=="pdf") filename <- paste0("haplotypes","_",seed,".pdf")
if(input$fformat_haplo=="RData") filename <- paste0("haplotypes","_",seed,".RData")
return(filename)
})
# download
fn_download_haplo <- function()
{
p <- select_haplo(input$haplo, loadQTL()$probs, loadQTL()$selected_mks, effects.data())
plots <- ggarrange(plotlist = p, ncol = 3, common.legend = TRUE)
if(input$fformat_haplo!="RData"){
ggsave(plots, filename = fn_downloadname_haplo(), height = input$height_haplo,
width = input$width_haplo, units = "mm", bg = "white", dpi = input$dpi_haplo)
} else save(p, file = fn_downloadname_haplo())
}
observe({
if (input$haplo_submit & length(grep("Trait",input$haplo)) > 0 & !is.null(input$plot_brush) & input$height_haplo > 1 & input$width_haplo > 1 & input$dpi_haplo > 1) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("bn_download_haplo")
} else {
shinyjs::disable("bn_download_haplo")
}
})
# download handler
output$bn_download_haplo <- downloadHandler(
filename = fn_downloadname_haplo,
content = function(file) {
fn_download_haplo()
file.copy(fn_downloadname_haplo(), file, overwrite=T)
file.remove(fn_downloadname_haplo())
}
)
}
## To be copied in the UI
# mod_qtl_view_ui("qtl_view_ui_1")
## To be copied in the server
# mod_qtl_view_server("qtl_view_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/mod_qtl_view.R
|
#' upload UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shinyjs inlineCSS useShinyjs
#' @importFrom hidecan get_example_data GWAS_data_from_gwaspoly GWAS_data DE_data CAN_data
#' @importFrom shiny NS tagList
mod_upload_ui <- function(id){
ns <- NS(id)
tagList(
fluidRow(
column(width = 12,
div(style = "position:absolute;right:1em;",
div(
actionButton(ns("goAbout"), "Go to About",icon("arrow-circle-left", verify_fa = FALSE), class = "btn btn-primary"),
actionButton(ns("goQTL"), label = div("Go to QTL", icon("arrow-circle-right", verify_fa = FALSE)), class = "btn btn-primary")), br(),
div(style = "position:absolute;right:0em;",
actionButton(ns("reset_all"), "Reset all",icon("undo-alt", verify_fa = FALSE), class = "btn btn-danger"))
),
tags$h2(tags$b("Input data")), br(),
"Use this module to select an example dataset or to upload yours.", br(), br()
), br(),
column(width = 12,
fluidPage(
box(id= ns("box_example"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="primary", title = actionLink(inputId = ns("exampleID"), label = tags$b("Available example datasets")),
radioButtons(ns("example_map"), label = p("They contain a results for a subset of tetraploid potato linkage map and QTL analysis."),
choices = c("Potato - Atlantic x B1829-5" = "tetra_map"),
selected = "tetra_map"), br(), br(), hr(),
tags$p("Access complete example datasets ",
tags$a(href= "https://www.polyploids.org/input-tests","here"))
)
)
), br(),
column(width = 12,
fluidPage(
box(id = ns("box_map"), width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("mapID"), label = tags$b("Upload linkage map files")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("reset_map"), style = "jelly", color = "royal", size = "sm", label = "reset", icon = icon("undo-alt", verify_fa = FALSE))
), br(), br(),
box(id = ns("box_mappoly"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("mappolyID"), label = tags$b("Upload MAPpoly output")),
tags$p("Access further information about how to build a linkage maps with MAPpoly ",
tags$a(href= "https://rpubs.com/mmollin/tetra_mappoly_vignette","here")), br(),
tags$p("Access a example code of how to obtain these inputs using MAPpoly functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_linkage_map_files","here")),
hr(),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_mappoly"), style = "jelly", color = "royal", size = "sm", label = "submit MAPpoly", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
tags$p("Object of class `mappoly.map`."),
fileInput(ns("mappoly_in"), label = h6("File: my_mappoly_list.RData"), multiple = F),
),
box(id = ns("box_polymap"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary",title = actionLink(inputId = ns("polymapID"), label = tags$b("Upload polymapR output")),
tags$p("Access further information about how to build a linkage maps with polymapR ",
tags$a(href= "https://cran.r-project.org/web/packages/polymapR/vignettes/Vignette_polymapR.html","here")), br(),
tags$p("Access a example code of how to obtain these inputs using polymapR functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_linkage_map_files","here")),
hr(),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_polymapR"), style = "jelly", color = "royal", size = "sm", label = "submit polymapR", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
p("Indicates whether the genotype input is discrete or probabilistic."),
prettyRadioButtons(
inputId = ns("input.type"),
label = "Data type:",
choices = c("discrete" = "discrete", "probabilistic" = "probabilistic"),
selected = "discrete",
inline = TRUE,
status = "info",
fill = TRUE
), br(),
p("Indicates the dataset specie ploidy."),
prettyRadioButtons(
inputId = ns("ploidy"),
label = "Ploidy:",
choices = c(4, 6),
selected = 4,
inline = TRUE,
status = "info",
fill = TRUE
), br(),
fileInput(ns("polymapR.dataset"), label = h6("File: polymapR.dataset.RData"), multiple = F),
fileInput(ns("polymapR.map"), label = h6("File: polymapR.map.RData"), multiple = F),
),
box(id = ns("box_onemap"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("onemapID"), label = tags$b("Upload OneMap output")),
tags$p("Access further information about how to build a linkage maps for diploid outcrossing populations with OneMap ",
tags$a(href= "https://cristianetaniguti.github.io/Tutorials/onemap/vignettes_highres/Outcrossing_Populations.html","here")), br(),
tags$p("Access further information about how to build a linkage maps for diploid inbred based populations with OneMap ",
tags$a(href= "https://cristianetaniguti.github.io/Tutorials/onemap/vignettes_highres/Inbred_Based_Populations.html","here")), br(),
tags$p("Access a example code of how to obtain these inputs using OneMap functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_linkage_map_files","here")),
hr(),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_onemap"), style = "jelly", color = "royal", size = "sm", label = "submit OneMap", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
tags$p("Object of class `viewmap`."),
fileInput(ns("onemap_in"), label = h6("File: my_onemap_map.RData"), multiple = F),
),
box(id = ns("box_mapst"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary",title = actionLink(inputId = ns("mapstID"), label = tags$b("Upload linkage map files with standard format (.csv, .tsv or .tsv.gz)")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_map_custom"), style = "jelly", color = "royal", size = "sm", label = "submit map custom", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
fileInput(ns("dosages"), label = h6("File: dosages.tsv"), multiple = F),
fileInput(ns("genetic_map"), label = h6("File: genetic_map.tsv"), multiple = F),
fileInput(ns("phases"), label = h6("File: phases.tsv"), multiple = F),
p("Upload here an TSV file with table with three columns: 1) marker ID; 2) genome position; 3) chromosome"),
fileInput(ns("mks.pos"), label = h6("File: marker information"), multiple = F),
"Check the input file formats with the example files:", br(),
radioButtons(ns("downloadType_map"), "",
choices = c("dosages.tsv" = "dosages",
"genetic_map.tsv" = "genetic_map",
"phases.tsv" = "phases"),
inline = TRUE), br(), br(),
downloadButton(ns("downloadData_map"), "Download"),
),
)
)
), br(),
column(width = 12,
fluidPage(
box(id = ns("box_qtl"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("qtlID"), label = tags$b("Upload QTL analysis files")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("reset_qtl"), style = "jelly", color = "royal", size = "sm", label = "reset", icon = icon("undo-alt", verify_fa = FALSE))
), br(), br(),
box(id= ns("box_qtlpoly"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("qtlpolyID"), label = tags$b("Upload QTLpoly output")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_qtlpoly"), style = "jelly", color = "royal", size = "sm", label = "submit QTLpoly", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
tags$p("Access further information about how to perform QTL analysis with QTLpoly ",
tags$a(href= "https://guilherme-pereira.github.io/QTLpoly/1-tutorial","here")), br(),
tags$p("Access a example code of how to obtain these inputs using QTLpoly functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_QTL_analysis_files","here")),
hr(),
fileInput(ns("qtlpoly_data"), label = h6("File: QTLpoly_data.RData", br(), br(),"Object of class: qtlpoly.data"), multiple = F),
fileInput(ns("qtlpoly_remim.mod"), label = h6("File: QTLpoly_remim.mod.RData", br(), br(), "Object of class: qtlpoly.remim"), multiple = F),
fileInput(ns("qtlpoly_est.effects"), label = h6("File: QTLpoly_est.effects.RData", br(), br(),"Object of class: qtlpoly.effects"), multiple = F),
fileInput(ns("qtlpoly_fitted.mod"), label = h6("File: QTLpoly_fitted.mod.RData", br(), br(), "Object of class: qtlpoly.fitted"), multiple = F),
),
box(id = ns("box_diaqtl"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("diaqtlID"), label = tags$b("Upload diaQTL output")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_diaQTL"), style = "jelly", color = "royal", size = "sm", label = "submit diaQTL", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
tags$p("Access further information about how to perform QTL analysis with diaQTL ",
tags$a(href= "https://jendelman.github.io/diaQTL/diaQTL_Vignette.html","here")), br(),
tags$p("Access a example code of how to obtain these inputs using diaQTL functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_QTL_analysis_files","here")),
hr(),
fileInput(ns("diaQTL_scan1"), label = h6("File: diaQTL_scan1_list.RData"), multiple = F),
fileInput(ns("diaQTL_scan1.summaries"), label = h6("File: diaQTL_scan1.summaries_list.RData"), multiple = F),
fileInput(ns("diaQTL_BayesCI"), label = h6("File: diaQTL_BayesCI_list.RData"), multiple = F),
fileInput(ns("diaQTL_fitQTL"), label = h6("File: diaQTL_fitQTL_list.RData"), multiple = F),
),
box(id = ns("box_polyqtl"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("polyqtlID"), label = tags$b("Upload polyqtlR output")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_polyqtlR"), style = "jelly", color = "royal", size = "sm", label = "submit polyqtlR", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
tags$p("Access further information about how to perform QTL analysis with polyqtlR ",
tags$a(href= "https://cran.r-project.org/web/packages/polyqtlR/vignettes/polyqtlR_vignette.html","here")), br(),
tags$p("Access a example code of how to obtain these inputs using polyqtlR functions ",
tags$a(href= "https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html#Upload_QTL_analysis_files","here")),
hr(),
fileInput(ns("polyqtlR_effects"), label = h6("File: polyqtlR_effects.RData"), multiple = F), hr(),
fileInput(ns("polyqtlR_qtl_info"), label = h6("File: polyqtlR_qtl_info.RData"), multiple = F),
fileInput(ns("polyqtlR_QTLscan_list"), label = h6("File: polyqtlR_QTLscan_list.RData"), multiple = F),
),
box(id = ns("box_qtlst"),width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("qtlstID"), label = tags$b("Upload QTL analysis results with standard format (.csv, .tsv or .tsv.gz)")),
div(style = "position:absolute;right:1em;",
actionBttn(ns("submit_qtl_custom"), style = "jelly", color = "royal", size = "sm", label = "submit QTL custom", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
fileInput(ns("selected_mks"), label = h6("File: selected_mks.tsv"), multiple = F),
fileInput(ns("qtl_info"), label = h6("File: qtl_info.tsv"), multiple = F),
fileInput(ns("blups"), label = h6("File: blups.tsv"), multiple = F),
fileInput(ns("beta.hat"), label = h6("File: beta.hat.tsv"), multiple = F),
fileInput(ns("profile"), label = h6("File: profile.tsv"), multiple = F),
fileInput(ns("effects"), label = h6("File: effects.tsv"), multiple = F),
fileInput(ns("probs"), label = h6("File: probs.tsv"), multiple = F),
"Check the input format with the example file:", br(), br(),
radioButtons(ns("downloadType_qtl"), "",
choices = c("selected_mks.tsv" = "selected_mks",
"qtl_info.tsv" = "qtl_info",
"blups.tsv" = "blups",
"beta.hat.tsv" = "beta.hat",
"profile.tsv" = "profile",
"effects.tsv" = "effects",
"probs.tsv" = "probs"),
inline = TRUE), br(), br(),
downloadButton(ns("downloadData_qtl"), "Download"),
)
)
)
), br(),
column(width = 12,
fluidPage(
box(id = ns("box_genome"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("genomeID"), label = tags$b("Upload Genome Browser files")),
tags$p("Access further information about the files expected in this section ",
tags$a(href= "https://gmod.github.io/JBrowseR/articles/creating-urls.html","here")), br(),
column(6,
tags$h5(tags$b("Upload genome information")),
box(
width = NULL, background = "red",
"Warning! The uploaded .fasta, .gff3, .vcf, .bam, .cram, .wig genome version must be the same one used to build the genetic map"
)
),
column(6,
div(style = "position:absolute;right:1em;",
actionBttn(ns("reset_genome"), style = "jelly", color = "royal", size = "sm", label = "reset", icon = icon("undo-alt", verify_fa = FALSE)), br(),br(),
actionBttn(ns("submit_genome"), style = "jelly", color = "royal", size = "sm", label = "submit", icon = icon("share-square", verify_fa = FALSE)), br(),br(),
),
), br(), br(),
column(12,
br(),
tags$h5(tags$b("Upload .fasta/.fasta.gz and .fasta.fai/.fasta.gz.fai,.fasta.gz.gzi file with assembly information. Using this option, a local HTTP server will be generated.")),
fileInput(ns("fasta"), label = h6("Files: genome_v2.fasta.gz, genome_v2.fasta.gz.fai, genome_v2.fasta.gz.gzi"), multiple = T),
p("or"),
tags$h5(tags$b("Add the URL of the hosted FASTA file location. The loading procedure is more efficient using this option.")),
textInput(ns("fasta_server"), label = h6("https://jbrowse.org/genomes/sars-cov2/fasta/sars-cov2.fa.gz"), value = NULL),
br(), hr(),
tags$h5(tags$b("Upload .gff3/.gff3.gz and .gff3.tbi/.gff3.gz.tbi file with annotation information")),
fileInput(ns("gff3"), label = h6("Files: genome_v2.gff3.gz, genome_v2.gff3.gz.tbi"), multiple = T),
p("or"),
tags$h5(tags$b("Add the URL of the hosted GFF3 file location. The loading procedure is more efficient using this option.")),
textInput(ns("gff3_server"), label = h6("https://jbrowse.org/genomes/sars-cov2/sars-cov2-annotations.sorted.gff.gz"), value = NULL),
br(), hr(),
tags$h5(tags$b("Upload VCF file with variants information")),
fileInput(ns("vcf"), label = h6("Files: markers.vcf, markers.vcf.tbi"), multiple = T),
p("or"),
tags$h5(tags$b("Add the URL of the hosted VCF file location. The loading procedure is more efficient using this option.")),
textInput(ns("vcf_server"), label = h6("https://some/path/file.vcf"), value = NULL),
br(), hr(),
tags$h5(tags$b("Upload .bam and .bam.bai or .cram and .cram.crai file with alignment information")),
fileInput(ns("align"), label = h6("Files: all_ind.bam, all_ind.bam.bai"), multiple = T),
p("or"),
tags$h5(tags$b("Add the URL of the hosted BAM or CRAM file location. The loading procedure is more efficient using this option.")),
textInput(ns("align_server"), label = h6("https://some/path/file.bam"), value = NULL),
br(), hr(),
tags$h5(tags$b("Upload .wig file with bigWig information")),
fileInput(ns("wig"), label = h6("File: data.wig"), multiple = F),
p("or"),
tags$h5(tags$b("Add the URL of the hosted WIG file location. The loading procedure is more efficient using this option.")),
textInput(ns("wig_server"), label = h6("https://some/path/file.wig"), value = NULL),
)
)
)
),
column(width = 12,
fluidPage(
box(id = ns("box_hidecan"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("hidecanID"), label = tags$b("Upload Hidecan files")),
tags$p("Access further information about the files expected in this section ",
tags$a(href= "https://plantandfoodresearch.github.io/hidecan/","here")), br(),
div(style = "position:absolute;right:1em;",
actionBttn(ns("reset_hidecan"), style = "jelly", color = "royal", size = "sm", label = "reset", icon = icon("undo-alt", verify_fa = FALSE)),
actionBttn(ns("submit_hidecan"), style = "jelly", color = "royal", size = "sm", label = "submit HIDECAN", icon = icon("share-square", verify_fa = FALSE)),
), br(), br(),
box(id= ns("box_gwaspoly"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("gwaspolyID"), label = tags$b("Upload GWAS output")),
div(style = "position:absolute;right:1em;",
), br(), br(),
p("Object of class GWASpoly.thresh obtained with the GWASpoly::set.threshold():"), br(),
fileInput(ns("gwaspoly"), label = h6("File: gwaspoly_res_thr.rda"), multiple = F),
p("or"),
fileInput(ns("gwas"), label = h6("File: gwas.csv"), multiple = T)
),
box(id= ns("box_gwas_de"), width = 12, solidHeader = FALSE, collapsible = TRUE, collapsed = TRUE, status="primary", title = actionLink(inputId = ns("gwasID"), label = tags$b("Upload differential expression (DE) and candidate genes (CAN) files")),
div(style = "position:absolute;right:1em;",
), br(), br(),
fileInput(ns("de"), label = h6("File: DE.csv"), multiple = T),
fileInput(ns("can"), label = h6("File: CAN.csv"), multiple = T)
)
)
)
),
column(width = 12,
fluidPage(
box(id = ns("box_viewpoly"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE, status="info", title = actionLink(inputId = ns("viewpolyID"), label = tags$b("Download VIEWpoly dataset")),
p("The uploaded data are converted to the viewpoly format. It keeps the map and the QTL information. Genome information is not stored."), br(),
textInput(ns("data.name"), label = p("Define the dataset name. Do not use spaces between words."), value = "dataset_name"), br(),
tags$head(tags$style(".butt{background-color:#add8e6; border-color: #add8e6; color: #337ab7;}")),
useShinyjs(),
downloadButton(ns('export_viewpoly'), "Download", class = "butt")
)
)
),
column(width = 12,
fluidPage(
box(id = ns("box_viewpolyup"),width = 12, solidHeader = TRUE, collapsible = TRUE, collapsed = FALSE, status="info", title = actionLink(inputId = ns("viewpolyupID"), label = tags$b("Upload VIEWpoly dataset")),
column(8,
radioButtons(ns("viewpoly_env"), width = 500, label = "Check one of the availables datasets:",
choices = "There is no VIEWpoly object in your R environment. Load VIEWpoly object or convert other formats below.",
selected = "There is no VIEWpoly object in your R environment. Load VIEWpoly object or convert other formats below."), br(),
),
column(4,
div(style = "position:absolute;right:1em;",
actionBttn(ns("reset_viewpoly"), style = "jelly", color = "royal", size = "sm", label = "reset", icon = icon("undo-alt", verify_fa = FALSE)), br(), br(),
actionBttn(ns("submit_viewpoly"), style = "jelly", color = "royal", size = "sm", label = "submit VIEWpoly file", icon = icon("share-square", verify_fa = FALSE))
)
),
column(12,
br(), br(), hr(),
p("Upload VIEWpoly RData file here:"),
fileInput(ns("viewpoly_input"), label = h6("File: dataset_name.RData"), multiple = F)
)
)
)
)
)
)
}
#' upload Server Functions
#'
#' @import vroom
#' @importFrom shinyjs js
#' @importFrom utils packageVersion
#'
#' @noRd
mod_upload_server <- function(input, output, session, parent_session){
ns <- session$ns
#Collapse boxes
observeEvent(input$exampleID, {
js$collapse(ns("box_example"))
})
observeEvent(input$mapID, {
js$collapse(ns("box_map"))
})
observeEvent(input$mappolyID, {
js$collapse(ns("box_mappoly"))
})
observeEvent(input$onemapID, {
js$collapse(ns("box_onemap"))
})
observeEvent(input$polymapID, {
js$collapse(ns("box_polymap"))
})
observeEvent(input$mapstID, {
js$collapse(ns("box_mapst"))
})
observeEvent(input$qtlID, {
js$collapse(ns("box_qtl"))
})
observeEvent(input$qtlpolyID, {
js$collapse(ns("box_qtlpoly"))
})
observeEvent(input$diaqtlID, {
js$collapse(ns("box_diaqtl"))
})
observeEvent(input$polyqtlID, {
js$collapse(ns("box_polyqtl"))
})
observeEvent(input$qtlstID, {
js$collapse(ns("box_qtlst"))
})
observeEvent(input$genomeID, {
js$collapse(ns("box_genome"))
})
observeEvent(input$hidecanID, {
js$collapse(ns("box_hidecan"))
})
observeEvent(input$viewpolyID, {
js$collapse(ns("box_viewpoly"))
})
observeEvent(input$viewpolyupID, {
js$collapse(ns("box_viewpolyup"))
})
# Check environment
observe({
Objs <- Filter(function(x) inherits(get(x), 'viewpoly' ), ls(envir = .GlobalEnv) )
if(length(Objs) > 0){
dataset_choices <- as.list(Objs)
names(dataset_choices) <- Objs
updateRadioButtons(session, "viewpoly_env",
label="Check one of the availables datasets:",
choices = dataset_choices,
selected= character(0))
} else {
updateRadioButtons(session, "viewpoly_env",
label="Check one of the availables datasets:",
choices = "There is no viewpoly object in your R environment. Load view viewpoly object or convert formats below",
selected= character(0))
}
})
# Format examples
output$downloadData_map <- downloadHandler(
filename = function() {
paste0(input$downloadType_map, ".tsv")
},
content = function(file) {
if(input$downloadType_map == "dosages") {
filetemp <- vroom(system.file("ext/dosage.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_map == "phases") {
filetemp <- vroom(system.file("ext/phases.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_map == "genetic_map") {
filetemp <- vroom(system.file("ext/map.tsv.gz", package = "viewpoly"))
}
vroom_write(filetemp, file = file)
}
)
output$downloadData_qtl <- downloadHandler(
filename = function() {
paste0(input$downloadType_qtl, ".tsv")
},
content = function(file) {
if(input$downloadType_qtl == "qtl_info") {
filetemp <- vroom(system.file("ext/qtl_info.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_qtl == "blups") {
filetemp <- vroom(system.file("ext/blups.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_qtl == "beta.hat") {
filetemp <- vroom(system.file("ext/beta.hat.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_qtl == "profile.hat") {
filetemp <- vroom(system.file("ext/profile.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_qtl == "effects.hat") {
filetemp <- vroom(system.file("ext/effects.tsv.gz", package = "viewpoly"))
} else if(input$downloadType_qtl == "probs") {
filetemp <- vroom(system.file("ext/probs.tsv.gz", package = "viewpoly"))
}
vroom_write(filetemp, file = file)
}
)
observeEvent(input$goQTL, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "qtl")
})
observeEvent(input$goAbout, {
updateTabsetPanel(session = parent_session, inputId = "viewpoly",
selected = "about")
})
# Reset buttons
values <- reactiveValues(
upload_state_map = 0,
upload_state_mappoly = 0,
upload_state_onemap = 0,
upload_state_polymapR = 0,
upload_state_map_custom = 0,
upload_state_qtl = 0,
upload_state_qtlpoly = 0,
upload_state_diaQTL = 0,
upload_state_polyqtlR = 0,
upload_state_qtl_custom = 0,
upload_state_genome = 0,
upload_state_hidecan = 0
)
observeEvent(input$reset_all, {
values$upload_state_viewpoly <- 'reset'
values$upload_state_map <- 'reset'
values$upload_state_mappoly = 0
values$upload_state_onemap = 0
values$upload_state_polymapR = 0
values$upload_state_map_custom = 0
values$upload_state_qtl <- 'reset'
values$upload_state_qtlpoly = 0
values$upload_state_diaQTL = 0
values$upload_state_polyqtlR = 0
values$upload_state_qtl_custom = 0
values$upload_state_genome <- 'reset'
values$upload_state_hidecan <- 'reset'
})
observeEvent(input$reset_viewpoly, {
values$upload_state_viewpoly <- 'reset'
})
observeEvent(input$reset_map, {
values$upload_state_map <- 'reset'
values$upload_state_mappoly = 0
values$upload_state_onemap = 0
values$upload_state_polymapR = 0
values$upload_state_map_custom = 0
})
observeEvent(input$reset_qtl, {
values$upload_state_qtl <- 'reset'
values$upload_state_qtlpoly = 0
values$upload_state_diaQTL = 0
values$upload_state_polyqtlR = 0
values$upload_state_qtl_custom = 0
})
observeEvent(input$reset_genome, {
values$upload_state_genome <- 'reset'
})
observeEvent(input$reset_hidecan, {
values$upload_state_hidecan <- 'reset'
})
observeEvent(input$submit_viewpoly, {
values$upload_state_viewpoly <- 'uploaded'
})
observeEvent(input$submit_mappoly, {
values$upload_state_mappoly <- 'uploaded'
values$upload_state_map <- 0
})
observeEvent(input$submit_onemap, {
values$upload_state_onemap <- 'uploaded'
values$upload_state_map <- 0
})
observeEvent(input$submit_polymapR, {
values$upload_state_polymapR <- 'uploaded'
values$upload_state_map <- 0
})
observeEvent(input$submit_map_custom, {
values$upload_state_map_custom <- 'uploaded'
values$upload_state_map <- 0
})
observeEvent(input$submit_qtlpoly, {
values$upload_state_qtlpoly <- 'uploaded'
values$upload_state_qtl = 0
})
observeEvent(input$submit_diaQTL, {
values$upload_state_diaQTL <- 'uploaded'
values$upload_state_qtl = 0
})
observeEvent(input$submit_polyqtlR, {
values$upload_state_polyqtlR <- 'uploaded'
values$upload_state_qtl = 0
})
observeEvent(input$submit_qtl_custom, {
values$upload_state_qtl_custom <- 'uploaded'
values$upload_state_qtl = 0
})
observeEvent(input$submit_genome, {
values$upload_state_genome <- 'uploaded'
})
observeEvent(input$submit_hidecan, {
values$upload_state_hidecan <- 'uploaded'
})
input_map <- reactive({
if (values$upload_state_map == 0 &
values$upload_state_mappoly == 0 &
values$upload_state_onemap == 0 &
values$upload_state_polymapR == 0 &
values$upload_state_map_custom == 0) {
return(NULL)
} else if (values$upload_state_map == 'reset') {
return(NULL)
} else if(values$upload_state_mappoly == "uploaded"){
validate(
need(!is.null(input$mappoly_in), "Upload mappoly file before submit")
)
return(list(mappoly_in = input$mappoly_in))
} else if(values$upload_state_onemap == "uploaded"){
validate(
need(!is.null(input$onemap_in), "Upload onemap file before submit")
)
return(list(onemap_in = input$onemap_in))
} else if(values$upload_state_polymapR == "uploaded"){
validate(
need(!is.null(input$polymapR.dataset), "Upload polymapR dataset file before submit"),
need(!is.null(input$polymapR.map), "Upload polymapR map file before submit")
)
return(list(polymapR.dataset = input$polymapR.dataset,
polymapR.map = input$polymapR.map,
input.type = input$input.type,
ploidy = as.numeric(input$ploidy)))
} else if(values$upload_state_map_custom == "uploaded"){
validate(
need(!is.null(input$dosages), "Upload custom dosages file before submit"),
need(!is.null(input$phases), "Upload custom phases file before submit"),
need(!is.null(input$genetic_map), "Upload custom genetic map file before submit")
)
return(list(dosages = input$dosages,
phases = input$phases,
genetic_map = input$genetic_map))
}
})
input_qtl <- reactive({
if (values$upload_state_qtl == 0 &
values$upload_state_qtlpoly == 0 &
values$upload_state_diaQTL == 0 &
values$upload_state_polyqtlR == 0 &
values$upload_state_qtl_custom == 0) {
return(NULL)
} else if (values$upload_state_qtl == 'reset') {
return(NULL)
} else if(values$upload_state_qtl_custom == "uploaded"){
validate(
need(!is.null(input$dosages), "Upload custom selected markers file before submit"),
need(!is.null(input$phases), "Upload custom QTL info file before submit"),
need(!is.null(input$blups), "Upload custom BLUPs file before submit"),
need(!is.null(input$beta.hat), "Upload custom beta hat file before submit"),
need(!is.null(input$profile), "Upload custom QTL profile file before submit"),
need(!is.null(input$effects), "Upload custom effects file before submit"),
need(!is.null(input$probs), "Upload custom genotype probabilities file before submit")
)
return(list(selected_mks = input$selected_mks,
qtl_info = input$qtl_info,
blups = input$blups,
beta.hat = input$beta.hat,
profile = input$profile,
effects = input$effects,
probs = input$probs))
} else if(values$upload_state_qtlpoly == "uploaded"){
validate(
need(!is.null(input$qtlpoly_data), "Upload QTLpoly data file before submit"),
need(!is.null(input$qtlpoly_remim.mod), "Upload QTLpoly remim.mod file before submit"),
need(!is.null(input$qtlpoly_est.effects), "Upload QTLpoly estimated effects file before submit"),
need(!is.null(input$qtlpoly_fitted.mod), "Upload QTLpoly fitted.mod file before submit")
)
return(list(
qtlpoly_data = input$qtlpoly_data,
qtlpoly_remim.mod = input$qtlpoly_remim.mod,
qtlpoly_est.effects = input$qtlpoly_est.effects,
qtlpoly_fitted.mod = input$qtlpoly_fitted.mod))
} else if(values$upload_state_diaQTL == "uploaded"){
validate(
need(!is.null(input$diaQTL_scan1), "Upload diaQTL scan1 file before submit"),
need(!is.null(input$diaQTL_scan1.summaries), "Upload diaQTL scan1.summaries file before submit"),
need(!is.null(input$diaQTL_fitQTL), "Upload diaQTL fitQTL file before submit"),
need(!is.null(input$diaQTL_BayesCI), "Upload diaQTL BayesCI file before submit")
)
return(list(
diaQTL_scan1 = input$diaQTL_scan1,
diaQTL_scan1.summaries = input$diaQTL_scan1.summaries,
diaQTL_fitQTL = input$diaQTL_fitQTL,
diaQTL_BayesCI = input$diaQTL_BayesCI
))
} else if(values$upload_state_polyqtlR == "uploaded"){
validate(
need(!is.null(input$qtlpoly_data), "Upload polyqtlR scan list file before submit"),
need(!is.null(input$qtlpoly_remim.mod), "Upload polyqtlR QTL info file before submit"),
need(!is.null(input$qtlpoly_est.effects), "Upload polyqtlR estimated effects file before submit")
)
return(list(
polyqtlR_QTLscan_list = input$polyqtlR_QTLscan_list,
polyqtlR_qtl_info = input$polyqtlR_qtl_info,
polyqtlR_effects = input$polyqtlR_effects
))
}
})
input_genome <- reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading fasta path..."))
if (is.null(values$upload_state_genome)) {
return(NULL)
} else if (values$upload_state_genome == 'reset') {
return(NULL)
} else if(values$upload_state_genome == "uploaded"){
validate(
need(!is.null(input$fasta) | !is.null(input$fasta_server), "Upload reference genome (FASTA) file before submit.")
)
return(list(fasta = input$fasta,
fasta_server = input$fasta_server,
gff3 = input$gff3,
gff3_server = input$gff3_server,
vcf = input$vcf,
vcf_server = input$vcf_server,
align = input$align,
align_server = input$align_server,
wig = input$wig,
wig_server = input$wig_server))
}
})
})
input_hidecan <- reactive({
if (values$upload_state_hidecan == 0) {
return(NULL)
} else if (values$upload_state_hidecan == 'reset') {
return(NULL)
} else if(values$upload_state_hidecan == "uploaded"){
validate(
need(!all(c(is.null(input$gwas),is.null(input$gwaspoly))), "Upload GWAS results file before submit")
)
if(!is.null(input$gwaspoly)) {
temp <- load(input$gwaspoly$datapath)
gwaspoly <- get(temp)
gwaspoly <- GWAS_data_from_gwaspoly(gwaspoly)
} else gwaspoly <- NULL
return(list(GWASpoly = gwaspoly,
GWAS = {if(!is.null(input$gwas)) read_input_hidecan(input$gwas, GWAS_data) else list()},
DE = {if(!is.null(input$de)) read_input_hidecan(input$de, DE_data) else list()},
CAN = {if(!is.null(input$can)) read_input_hidecan(input$can, CAN_data) else list()}))
}
})
# Wait system for the uploads
loadExample = reactive({
if(is.null(input_map()$dosages) & is.null(input_map()$phases) & is.null(input_map()$genetic_map) &
is.null(input_map()$mappoly_in) &
is.null(input_map()$onemap_in) &
is.null(input_map()$polymapR.dataset) &
is.null(input_map()$polymapR.map) &
is.null(input_qtl()$selected_mks) &
is.null(input_qtl()$qtl_info) &
is.null(input_qtl()$blups) &
is.null(input_qtl()$beta.hat) &
is.null(input_qtl()$profile) &
is.null(input_qtl()$effects) &
is.null(input_qtl()$probs) &
is.null(input_qtl()$qtlpoly_data) &
is.null(input_qtl()$qtlpoly_remim.mod) &
is.null(input_qtl()$qtlpoly_est.effects) &
is.null(input_qtl()$qtlpoly_fitted.mod) &
is.null(input_qtl()$diaQTL_data) &
is.null(input_qtl()$diaQTL_scan1) &
is.null(input_qtl()$diaQTL_scan1.summaries) &
is.null(input_qtl()$diaQTL_fitQTL) &
is.null(input_qtl()$diaQTL_BayesCI) &
is.null(input_qtl()$polyqtlR_QTLscan_list) &
is.null(input_qtl()$polyqtlR_qtl_info) &
is.null(input_qtl()$polyqtlR_effects) &
is.null(input_genome()$fasta) &
is.null(input_genome()$fasta_server) &
is.null(input_genome()$gff3) &
is.null(input_genome()$gff3_server) &
is.null(input_genome()$vcf) &
is.null(input_genome()$vcf_server) &
is.null(input_genome()$align) &
is.null(input_genome()$align_server) &
is.null(input_genome()$wig) &
is.null(input_genome()$wig_server) &
is.null(input$viewpoly_input) &
is.null(input$viewpoly_env))
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("Uploading example map data..."))
prepare_examples(input$example_map)
})
else NULL
})
# Load hidecan example
loadHidecanExample = reactive({
if(is.null(input_hidecan()$gwas) & is.null(input_hidecan()$de) & is.null(input_hidecan()$can))
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("Uploading example map data..."))
x <- get_example_data()
list("GWAS" = list(GWAS_data(x[["GWAS"]])),
"DE" = list(DE_data(x[["DE"]])),
"CAN" = list(CAN_data(x[["CAN"]])))
})
else NULL
})
loadViewpoly = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading viewpoly file..."))
if (is.null(values$upload_state_viewpoly)) {
return(NULL)
} else if (values$upload_state_viewpoly == 'reset') {
return(NULL)
} else if(values$upload_state_viewpoly == "uploaded"){
if(is.null(input$viewpoly_input) & is.null(input$viewpoly_env)){
warning("Upload a viewpoly dataset or select one available in your R environment before submit.")
viewpoly.obj <- NULL
} else if(!is.null(input$viewpoly_input)){
temp <- load(input$viewpoly_input$datapath)
viewpoly.obj <- get(temp)
} else if(!is.null(input$viewpoly_env)) {
viewpoly.obj = get(input$viewpoly_env)
}
return(viewpoly.obj)
}
})
})
loadMap_custom = reactive({
if(!(is.null(input_map()$dosages) & is.null(input_map()$phases) & is.null(input_map()$genetic_map))){
req(input_map()$dosages, input_map()$phases, input_map()$genetic_map)
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("Uploading custom map data..."))
prepare_map_custom_files(input_map()$dosages,
input_map()$phases,
input_map()$genetic_map)
})
} else NULL
})
loadMap_mappoly = reactive({
if(!is.null(input_map()$mappoly_in)){
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("Uploading MAPpoly data..."))
prepare_MAPpoly(input_map()$mappoly_in)
})
} else NULL
})
loadMap_onemap = reactive({
if(!is.null(input_map()$onemap_in)){
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("Uploading OneMap data..."))
temp <- load(input_map()$onemap_in$datapath)
viewmap <- get(temp)
viewmap
})
} else NULL
})
loadMap_polymapR = reactive({
if(!(is.null(input_map()$polymapR.dataset) &
is.null(input_map()$polymapR.map))) {
req(input_map()$polymapR.dataset, input_map()$polymapR.map)
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading polymapR data..."))
prepare_polymapR(input_map()$polymapR.dataset, input_map()$polymapR.map,
input$input.type, as.numeric(input$ploidy))
})
} else NULL
})
loadQTL_custom = reactive({
if(!(is.null(input_qtl()$selected_mks) &
is.null(input_qtl()$qtl_info) &
is.null(input_qtl()$blups) &
is.null(input_qtl()$beta.hat) &
is.null(input_qtl()$profile) &
is.null(input_qtl()$effects) &
is.null(input_qtl()$probs))) {
req(input_qtl()$selected_mks, input_qtl()$qtl_info, input_qtl()$blups,
input_qtl()$beta.hat, input_qtl()$profile, input_qtl()$effects,
input_qtl()$probs)
withProgress(message = 'Working:', value = 0, {
incProgress(0.5, detail = paste("Uploading custom QTL data..."))
prepare_qtl_custom_files(input_qtl()$selected_mks,
input_qtl()$qtl_info,
input_qtl()$blups,
input_qtl()$beta.hat,
input_qtl()$profile,
input_qtl()$effects,
input_qtl()$probs)
})
} else NULL
})
loadQTL_qtlpoly = reactive({
if(!(is.null(input_qtl()$qtlpoly_data) &
is.null(input_qtl()$qtlpoly_remim.mod) &
is.null(input_qtl()$qtlpoly_est.effects) &
is.null(input_qtl()$qtlpoly_fitted.mod))) {
req(input_qtl()$qtlpoly_data,
input_qtl()$qtlpoly_remim.mod,
input_qtl()$qtlpoly_est.effects,
input_qtl()$qtlpoly_fitted.mod)
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("Uploading QTLpoly data..."))
prepare_QTLpoly(input_qtl()$qtlpoly_data,
input_qtl()$qtlpoly_remim.mod,
input_qtl()$qtlpoly_est.effects,
input_qtl()$qtlpoly_fitted.mod)
})
} else NULL
})
loadQTL_diaQTL = reactive({
if(!(is.null(input_qtl()$diaQTL_scan1) &
is.null(input_qtl()$diaQTL_scan1.summaries) &
is.null(input_qtl()$diaQTL_fitQTL) &
is.null(input_qtl()$diaQTL_BayesCI))) {
req(input_qtl()$diaQTL_scan1,
input_qtl()$diaQTL_scan1.summaries,
input_qtl()$diaQTL_fitQTL,
input_qtl()$diaQTL_BayesCI)
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("Uploading diaQTL data..."))
prepare_diaQTL(input_qtl()$diaQTL_scan1,
input_qtl()$diaQTL_scan1.summaries,
input_qtl()$diaQTL_fitQTL,
input_qtl()$diaQTL_BayesCI)
})
} else NULL
})
loadQTL_polyqtlR = reactive({
if(!(is.null(input_qtl()$polyqtlR_QTLscan_list) &
is.null(input_qtl()$polyqtlR_qtl_info) &
is.null(input_qtl()$polyqtlR_effects))) {
req(input_qtl()$polyqtlR_QTLscan_list,
input_qtl()$polyqtlR_qtl_info,
input_qtl()$polyqtlR_effects)
withProgress(message = 'Working:', value = 0, {
incProgress(0.3, detail = paste("Uploading polyqtlR data..."))
prepare_polyqtlR(input_qtl()$polyqtlR_QTLscan_list,
input_qtl()$polyqtlR_qtl_info,
input_qtl()$polyqtlR_effects)
})
} else NULL
})
temp_dir <- reactive(tempdir())
loadJBrowse_fasta = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading fasta path..."))
if(!is.null(input_genome()$fasta) & !is.null(loadMap())){
# keep fasta name
for(i in 1:length(input_genome()$fasta$datapath)){
print(file.path(temp_dir(), input_genome()$fasta$name))
file.rename(input_genome()$fasta$datapath[i],
file.path(temp_dir(), input_genome()$fasta$name[i]))
}
file.path(temp_dir(), sort(input_genome()$fasta$name)[1])
} else if(!is.null(input_genome()$fasta_server) & !is.null(loadMap())) {
input_genome()$fasta_server
} else if(!is.null(input_genome()$fasta) | !is.null(input_genome()$fasta_server)) {
warning("Load map data first to use this feature.")
} else if(!is.null(loadExample())){
loadExample()$fasta
} else NULL
})
})
loadJBrowse_gff3 = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading gff3 path..."))
if(!is.null(input_genome()$gff3)){
for(i in 1:length(input_genome()$gff3$datapath)){
file.rename(input_genome()$gff3$datapath[i],
file.path(temp_dir(), input_genome()$gff3$name[i]))
}
file.path(temp_dir(), input_genome()$gff3$name[1])
} else if(!is.null(input_genome()$gff3_server)) {
input_genome()$gff3_server
} else if(!is.null(loadExample())){
loadExample()$gff3
} else NULL
})
})
loadJBrowse_vcf = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading VCF path..."))
if(!is.null(input_genome()$vcf)) {
for(i in 1:length(input_genome()$vcf$datapath)){
file.rename(input_genome()$vcf$datapath[i],
file.path(temp_dir(), input_genome()$vcf$name[i]))
}
file.path(temp_dir(), input_genome()$vcf$name[1])
} else if(!is.null(input_genome()$vcf_server)) {
input_genome()$vcf_server
} else NULL
})
})
loadJBrowse_align = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading BAM or CRAM alignment data path..."))
if(!is.null(input_genome()$align)) {
for(i in 1:length(input_genome()$align$datapath)){
file.rename(input_genome()$align$datapath[i],
file.path(temp_dir(), input_genome()$align$name[i]))
}
file.path(temp_dir(), input_genome()$align$name[1])
} else if(!is.null(input_genome()$align_server)) {
input_genome()$align_server
} else NULL
})
})
loadJBrowse_wig = reactive({
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Uploading bigWig data path..."))
if(!is.null(input_genome()$wig)) {
for(i in 1:length(input_genome()$wig$datapath)){
file.rename(input_genome()$wig$datapath[i],
file.path(temp_dir(), input_genome()$wig$name[i]))
}
file.path(temp_dir(), input_genome()$wig$name[1])
} else if(!is.null(input_genome()$wig_server)) {
input_genome()$wig_server
} else NULL
})
})
loadMap = reactive({
if(is.null(loadExample()) &
is.null(loadMap_custom()) &
is.null(loadMap_mappoly()) &
is.null(loadMap_onemap()) &
is.null(loadMap_polymapR()) &
is.null(loadViewpoly())){
warning("Select one of the options in `upload` session")
return(NULL)
} else if(!is.null(loadViewpoly())){
return(loadViewpoly()$map)
} else if(!is.null(loadMap_custom())){
return(loadMap_custom())
} else if(!is.null(loadMap_mappoly())){
return(loadMap_mappoly())
} else if(!is.null(loadMap_onemap())){
return(loadMap_onemap())
} else if(!is.null(loadMap_polymapR())){
return(loadMap_polymapR())
} else if(!is.null(loadExample())){
return(loadExample()$map)
}
})
loadQTL = reactive({
if(is.null(loadExample()) &
is.null(loadQTL_custom()) &
is.null(loadQTL_qtlpoly()) &
is.null(loadQTL_diaQTL()) &
is.null(loadQTL_polyqtlR()) &
is.null(loadViewpoly())){
warning("Select one of the options in `upload` session")
return(NULL)
} else if(!is.null(loadViewpoly())){
return(loadViewpoly()$qtl)
} else if(!is.null(loadQTL_custom())){
return(loadQTL_custom())
} else if(!is.null(loadQTL_qtlpoly())){
return(loadQTL_qtlpoly())
} else if(!is.null(loadQTL_diaQTL())){
return(loadQTL_diaQTL())
} else if(!is.null(loadQTL_polyqtlR())){
return(loadQTL_polyqtlR())
} else if(!is.null(loadExample())){
return(loadExample()$qtl)
}
})
loadHidecan = reactive({
if(is.null(loadHidecanExample()) &
is.null(input_hidecan()) &
is.null(loadViewpoly())){
warning("Select one of the options in `upload` session")
return(NULL)
} else if(!is.null(loadViewpoly())){
return(loadViewpoly()$hidecan)
} else if(!is.null(input_hidecan())){
return(input_hidecan())
} else if(!is.null(loadHidecanExample())){
return(loadHidecanExample())
}
})
observe({
if (!is.null(loadMap()) | !is.null(loadQTL())) {
Sys.sleep(1)
# enable the download button
shinyjs::enable("export_viewpoly")
} else {
shinyjs::disable("export_viewpoly")
}
})
output$export_viewpoly <- downloadHandler(
filename = function() {
paste0("viewpoly.RData")
},
content = function(file) {
withProgress(message = 'Working:', value = 0, {
incProgress(0.1, detail = paste("Saving viewpoly object..."))
validate(
need(!is.null(loadMap()) | !is.null(loadQTL()), "For exporting VIEWpoly dataset it is required to load
linkage map or QTL data in the above boxes."),
)
obj <- structure(list(map = loadMap(),
qtl = loadQTL(),
fasta = NULL, # It would save only the temporary path
gff3 = NULL,
vcf = NULL,
align = NULL,
wig = NULL,
hidecan = loadHidecan(),
version = packageVersion("viewpoly")),
class = "viewpoly")
assign(input$data.name, obj)
incProgress(0.5, detail = paste("Saving viewpoly object..."))
})
save(list = input$data.name, file = file)
}
)
return(list(loadMap = reactive(loadMap()),
loadQTL = reactive(loadQTL()),
loadJBrowse_fasta = reactive(loadJBrowse_fasta()),
loadJBrowse_gff3 = reactive(loadJBrowse_gff3()),
loadJBrowse_vcf = reactive(loadJBrowse_vcf()),
loadJBrowse_align = reactive(loadJBrowse_align()),
loadJBrowse_wig = reactive(loadJBrowse_wig()),
loadHidecan = reactive(loadHidecan())))
}
## To be copied in the UI
# mod_upload_ui("upload_ui_1")
## To be copied in the server
# mod_upload_server("upload_ui_1")
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/mod_upload.R
|
#' Run the Shiny Application
#'
#' @param ... arguments to pass to golem_opts.
#' See `?golem::get_golem_options` for more details.
#' @inheritParams shiny::shinyApp
#'
#' @export
#' @importFrom shiny shinyApp
#' @importFrom golem with_golem_options
run_app <- function(
onStart = NULL,
options = list(),
enableBookmarking = NULL,
uiPattern = "/",
...
) {
with_golem_options(
app = shinyApp(
ui = app_ui,
server = app_server,
onStart = onStart,
options = options,
enableBookmarking = enableBookmarking,
uiPattern = uiPattern
),
golem_opts = list(...)
)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/run_app.R
|
#' Import data from polymapR
#'
#' Function to import datasets from polymapR. Function from MAPpoly.
#'
#' See examples at \url{https://rpubs.com/mmollin/tetra_mappoly_vignette}.
#'
#' @param input.data a \code{polymapR} dataset
#' @param ploidy the ploidy level
#' @param parent1 a character string containing the name (or pattern of genotype IDs) of parent 1
#' @param parent2 a character string containing the name (or pattern of genotype IDs) of parent 2
#' @param input.type Indicates whether the input is discrete ("disc") or probabilistic ("prob")
#' @param prob.thres threshold probability to assign a dosage to offspring. If the probability
#' is smaller than \code{thresh.parent.geno}, the data point is converted to 'NA'.
#' @param pardose matrix of dimensions (n.mrk x 3) containing the name of the markers in the first column, and the
#' dosage of parents 1 and 2 in columns 2 and 3. (see polymapR vignette)
#' @param offspring a character string containing the name (or pattern of genotype IDs) of the offspring
#' individuals. If \code{NULL} (default) it considers all individuals as offsprings, except
#' \code{parent1} and \code{parent2}.
#' @param filter.non.conforming if \code{TRUE} exclude samples with non
#' expected genotypes under no double reduction. Since markers were already filtered in polymapR, the default is
#' \code{FALSE}.
#' @param verbose if \code{TRUE} (default), the current progress is shown; if
#' \code{FALSE}, no output is produced
#'
#'
#' @return object of class \code{mappoly.data}
#'
#' @author Marcelo Mollinari \email{[email protected]}
#'
#' @references
#' Bourke PM et al: (2019) PolymapR — linkage analysis and genetic map
#' construction from F1 populations of outcrossing polyploids.
#' _Bioinformatics_ 34:3496–3502.
#' \doi{10.1093/bioinformatics/bty1002}
#'
#' Mollinari, M., and Garcia, A. A. F. (2019) Linkage
#' analysis and haplotype phasing in experimental autopolyploid
#' populations with high ploidy level using hidden Markov
#' models, _G3: Genes, Genomes, Genetics_.
#' \doi{10.1534/g3.119.400378}
#'
#'
#' @keywords internal
#'
#' @importFrom reshape2 acast
#' @importFrom dplyr filter arrange
#'
import_data_from_polymapR <- function(input.data,
ploidy,
parent1 = "P1",
parent2 = "P2",
input.type = c("discrete", "probabilistic"),
prob.thres = 0.95,
pardose = NULL,
offspring = NULL,
filter.non.conforming = TRUE,
verbose = TRUE){
input.type <- match.arg(input.type)
if(input.type == "discrete"){
geno.dose <- input.data[,-match(c(parent1, parent2), colnames(input.data)), drop = FALSE]
dosage.p1 <- input.data[,parent1]
dosage.p2 <- input.data[,parent2]
names(dosage.p1) <- names(dosage.p2) <- rownames(input.data)
mappoly.data <- structure(list(ploidy = ploidy,
n.ind = ncol(geno.dose),
n.mrk = nrow(geno.dose),
ind.names = colnames(geno.dose),
mrk.names = rownames(geno.dose),
dosage.p1 = dosage.p1,
dosage.p2 = dosage.p2,
chrom = NA,
genome.pos = NA,
seq.ref = NULL,
seq.alt = NULL,
all.mrk.depth = NULL,
prob.thres = NULL,
geno.dose = geno.dose,
nphen = 0,
phen = NULL,
kept = NULL,
elim.correspondence = NULL),
class = "mappoly.data")
}
else {
if(is.null(pardose))
stop(safeError("provide parental dosage."))
rownames(pardose) <- pardose$MarkerName
dat <- input.data[,c("MarkerName", "SampleName",paste0("P", 0:ploidy))]
p1 <- unique(sapply(parent1, function(x) unique(grep(pattern = x, dat[,"SampleName"], value = TRUE))))
p2 <- unique(sapply(parent2, function(x) unique(grep(pattern = x, dat[,"SampleName"], value = TRUE))))
if(is.null(offspring)){
offspring <- setdiff(as.character(unique(dat[,"SampleName"])), c(p1, p2))
} else {
offspring <- unique(grep(pattern = offspring, dat[,"SampleName"], value = TRUE))
}
d1 <- input.data[,c("MarkerName", "SampleName", "geno")]
geno.dose <- acast(d1, MarkerName ~ SampleName, value.var = "geno")
## get marker names ----------------------
mrk.names <- rownames(geno.dose)
## get number of individuals -------------
n.ind <- length(offspring)
## get number of markers -----------------
n.mrk <- length(mrk.names)
## get individual names ------------------
ind.names <- offspring
## get dosage in parent P ----------------
dosage.p1 <- as.integer(pardose[mrk.names,"parent1"])
names(dosage.p1) <- mrk.names
## get dosage in parent Q ----------------
dosage.p2 <- as.integer(pardose[mrk.names,"parent2"])
names(dosage.p2) <- mrk.names
## monomorphic markers
d.p1 <- abs(abs(dosage.p1-(ploidy/2))-(ploidy/2))
d.p2 <- abs(abs(dosage.p2-(ploidy/2))-(ploidy/2))
mrk.names <- names(which(d.p1+d.p2 != 0))
dosage.p1 <- dosage.p1[mrk.names]
dosage.p2 <- dosage.p2[mrk.names]
nphen <- 0
phen <- NULL
if (verbose){
cat("Importing the following data:")
cat("\n Ploidy level:", ploidy)
cat("\n No. individuals: ", n.ind)
cat("\n No. markers: ", n.mrk)
cat("\n No. informative markers: ", length(mrk.names), " (", round(100*length(mrk.names)/n.mrk,1), "%)", sep = "")
cat("\n ...")
}
## get genotypic info --------------------
MarkerName <- SampleName <- NULL
geno <- dat %>%
filter(SampleName %in% offspring) %>%
filter(MarkerName %in% mrk.names) %>%
arrange(SampleName, MarkerName)
colnames(geno) <- c("mrk", "ind", as.character(0:ploidy))
ind.names <- unique(geno$ind)
mrk.names <- unique(geno$mrk)
dosage.p1 <- dosage.p1[mrk.names]
dosage.p2 <- dosage.p2[mrk.names]
## transforming na's in expected genotypes using Mendelian segregation
i.na <- which(apply(geno, 1, function(x) any(is.na(x))))
if (length(i.na) > 0) {
m.na <- match(geno[i.na, 1], mrk.names)
d.p1.na <- dosage.p1[m.na]
d.p2.na <- dosage.p2[m.na]
for (i in 1:length(m.na)) geno[i.na[i], -c(1, 2)] <- segreg_poly(ploidy, d.p1.na[i], d.p2.na[i])
}
## dosage info
if(filter.non.conforming){
geno.dose <- geno.dose[mrk.names,offspring]
} else {
geno.dose <- dist_prob_to_class(geno = geno, prob.thres = prob.thres)
if(geno.dose$flag)
{
geno <- geno.dose$geno
geno.dose <- geno.dose$geno.dose
n.ind <- ncol(geno.dose)
ind.names <- colnames(geno.dose)
} else {
geno.dose <- geno.dose$geno.dose
}
geno.dose[is.na(geno.dose)] <- ploidy + 1
}
## returning the 'mappoly.data' object
if (verbose) cat("\n Done with reading.\n")
mappoly.data <- structure(list(ploidy = ploidy,
n.ind = n.ind,
n.mrk = length(mrk.names),
ind.names = ind.names,
mrk.names = mrk.names,
dosage.p1 = dosage.p1,
dosage.p2 = dosage.p2,
chrom = rep(NA, length(mrk.names)),
genome.pos = rep(NA, length(mrk.names)),
seq.ref = NULL,
seq.alt = NULL,
all.mrk.depth = NULL,
prob.thres = prob.thres,
geno = geno,
geno.dose = geno.dose,
nphen = nphen,
phen = phen,
chisq.pval = NULL,
kept = NULL,
elim.correspondence = NULL),
class = "mappoly.data")
}
if(filter.non.conforming){
mappoly.data <- filter_non_conforming_classes(mappoly.data)
Ds <- array(NA, dim = c(ploidy+1, ploidy+1, ploidy+1))
for(i in 0:ploidy)
for(j in 0:ploidy)
Ds[i+1,j+1,] <- segreg_poly(ploidy = ploidy, d.p1 = i, d.p2 = j)
d.p1op <- cbind(mappoly.data$dosage.p1, mappoly.data$dosage.p2)
M <- t(apply(d.p1op, 1, function(x) Ds[x[1]+1, x[2]+1,]))
dimnames(M) <- list(mappoly.data$mrk.names, c(0:ploidy))
M <- cbind(M, mappoly.data$geno.dose)
mappoly.data$chisq.pval <- apply(M, 1, mrk_chisq_test, ploidy = ploidy)
}
mappoly.data
}
#' Filter non-conforming classes in F1, non double reduced population.
#' Function from MAPpoly.
#'
#' @param input.data object of class mappoly
#' @param prob.thres threshold for filtering genotypes by genotype probability values. If NULL, the filter is not applied.
#'
#' @return filtered \code{mappoly.data} object
#'
#'
#' @keywords internal
filter_non_conforming_classes <- function(input.data, prob.thres = NULL)
{
if (!inherits(input.data, "mappoly.data")) {
stop(deparse(substitute(input.data)), " is not an object of class 'mappoly.data'")
}
ploidy <- input.data$ploidy
dp <- input.data$dosage.p1
dq <- input.data$dosage.p2
Ds <- array(NA, dim = c(ploidy+1, ploidy+1, ploidy+1))
for(i in 0:ploidy)
for(j in 0:ploidy)
Ds[i+1,j+1,] <- segreg_poly(ploidy = ploidy, d.p1 = i, d.p2 = j)
Dpop <- cbind(dp,dq)
#Gathering segregation probabilities given parental dosages
M <- t(apply(Dpop, 1, function(x) Ds[x[1]+1, x[2]+1,]))
M[M != 0] <- 1
dimnames(M) <- list(input.data$mrk.names, 0:ploidy)
##if no prior probabilities
if(!is.prob.data(input.data)){
for(i in 1:nrow(M)){
id0 <- !as.numeric(input.data$geno.dose[i,])%in%(which(M[i,] == 1)-1)
if(any(id0))
input.data$geno.dose[i,id0] <- (ploidy+1)
}
return(input.data)
}
## 1 represents conforming classes/ 0 represents non-conforming classes
dp <- rep(dp, input.data$n.ind)
dq <- rep(dq, input.data$n.ind)
M <- M[rep(seq_len(nrow(M)), input.data$n.ind),]
R <- input.data$geno[,-c(1:2)] - input.data$geno[,-c(1:2)]*M
id1 <- apply(R, 1, function(x) abs(sum(x))) > 0.3 # if the sum of the excluded classes is greater than 0.3, use segreg_poly
N <- matrix(NA, sum(id1), input.data$ploidy+1)
ct <- 1
for(i in which(id1)){
N[ct,] <- Ds[dp[i]+1, dq[i]+1, ]
ct <- ct+1
}
input.data$geno[id1,-c(1:2)] <- N
# if the sum of the excluded classes is greater than zero
# and smaller than 0.3, assign zero to those classes and normalize the vector
input.data$geno[,-c(1:2)][R > 0] <- 0
input.data$geno[,-c(1:2)] <- sweep(input.data$geno[,-c(1:2)], 1, rowSums(input.data$geno[,-c(1:2)]), FUN = "/")
if(is.null(prob.thres))
prob.thres <- input.data$prob.thres
geno.dose <- dist_prob_to_class(geno = input.data$geno, prob.thres = prob.thres)
if(geno.dose$flag)
{
input.data$geno <- geno.dose$geno
input.data$geno.dose <- geno.dose$geno.dose
} else {
input.data$geno.dose <- geno.dose$geno.dose
}
input.data$geno.dose[is.na(input.data$geno.dose)] <- ploidy + 1
input.data$n.ind <- ncol(input.data$geno.dose)
input.data$ind.names <- colnames(input.data$geno.dose)
return(input.data)
}
#' Linkage phase format conversion: matrix to list. Function from MAPpoly.
#'
#' This function converts linkage phase configurations from matrix
#' form to list
#'
#' @param M matrix whose columns represent homologous chromosomes and
#' the rows represent markers
#'
#' @return a list of linkage phase configurations
#'
#'
#' @keywords internal
ph_matrix_to_list <- function(M) {
w <- lapply(split(M, seq(NROW(M))), function(x, M) which(x == 1))
w[sapply(w, function(x) length(x) == 0)] <- 0
w
}
#' Is it a probability dataset? Function from MAPpoly.
#'
#' @param x object of class \code{mappoly.data}
#'
#' @return TRUE/FALSE indicating if genotype probability information is present
#'
#'
#' @keywords internal
is.prob.data <- function(x){
exists('geno', where = x)
}
#' Haldane map function. From MAPpoly
#'
#' @param d vector containing recombination fraction values
#'
#' @return vector with genetic distances estimated with Haldane function
#'
#'
#' @keywords internal
mf_h <- function(d) 0.5 * (1 - exp(-d/50))
#' Chi-square test. Function from MAPpoly.
#'
#' @param x data.frame containing dosage information
#' @param ploidy integer defining the specie ploidy
#'
#' @return vector with p-values for each marker
#'
#' @importFrom stats chisq.test
#'
#' @keywords internal
mrk_chisq_test <- function(x, ploidy){
y <- x[-c(1:(ploidy+1))]
y[y == ploidy+1] <- NA
y <- table(y, useNA = "always")
names(y) <- c(names(y)[-length(y)], "NA")
seg.exp <- x[0:(ploidy+1)]
seg.exp <- seg.exp[seg.exp != 0]
seg.obs <- seg.exp
seg.obs[names(y)[-length(y)]] <- y[-length(y)]
pval <- suppressWarnings(chisq.test(x = seg.obs, p = seg.exp[names(seg.obs)])$p.value)
pval
}
#' Returns the class with the highest probability in
#' a genotype probability distribution. Function from MAPpoly.
#'
#' @param geno the probabilistic genotypes contained in the object
#' \code{'mappoly.data'}
#' @param prob.thres probability threshold to select the genotype.
#' Values below this genotype are assumed as missing data
#' @return a matrix containing the doses of each genotype and
#' marker. Markers are disposed in rows and individuals are
#' disposed in columns. Missing data are represented by NAs
#'
#' @importFrom reshape2 melt dcast
#' @importFrom dplyr group_by filter arrange `%>%`
#'
#'
#' @keywords internal
dist_prob_to_class <- function(geno, prob.thres = 0.9) {
a <- melt(geno, id.vars = c("mrk", "ind"))
mrk <- ind <- value <- variable <- NULL # Setting the variables to NULL first
a$variable <- as.numeric(levels(a$variable))[a$variable]
b <- a %>%
group_by(mrk, ind) %>%
filter(value > prob.thres) %>%
arrange(mrk, ind, variable)
z <- dcast(data = b[,1:3], formula = mrk ~ ind, value.var = "variable")
rownames(z) <- z[,"mrk"]
z <- data.matrix(frame = z[,-1])
n <- setdiff(unique(geno$mrk), rownames(z))
if(length(n) > 0)
{
ploidy <- matrix(NA, nrow = length(n), ncol = ncol(z), dimnames = list(n, colnames(z)))
z <- rbind(z,ploidy)
}
rm.ind <- setdiff(unique(geno$ind), colnames(z))
flag <- FALSE
if(length(rm.ind) > 0){
flag <- TRUE
warning("Inividual(s) ", paste(rm.ind, collapse = " "),
"\n did not meet the 'prob.thres' criteria for any of\n the markers and was (were) removed.")
geno <- geno %>% filter(ind %in% colnames(z))
}
z <- z[as.character(unique(geno$mrk)), as.character(unique(geno$ind))]
list(geno.dose = z, geno = geno, flag = flag)
}
#' Polysomic segregation frequency - Function from MAPpoly
#'
#' Computes the polysomic segregation frequency given a ploidy level
#' and the dosage of the locus in both parents. It does not consider
#' double reduction.
#'
#' @param ploidy the ploidy level
#'
#' @param d.p1 the dosage in parent P
#'
#' @param d.p2 the dosage in parent Q
#'
#' @return a vector containing the expected segregation frequency for
#' all possible genotypic classes.
#'
#'
#' @author Marcelo Mollinari, \email{[email protected]}
#'
#' @references
#' Mollinari, M., and Garcia, A. A. F. (2019) Linkage
#' analysis and haplotype phasing in experimental autopolyploid
#' populations with high ploidy level using hidden Markov
#' models, _G3: Genes, Genomes, Genetics_.
#' \doi{10.1534/g3.119.400378}
#'
#' Serang O, Mollinari M, Garcia AAF (2012) Efficient Exact
#' Maximum a Posteriori Computation for Bayesian SNP
#' Genotyping in Polyploids. _PLoS ONE_ 7(2):
#' e30906.
#'
#' @importFrom stats dhyper
#'
#' @keywords internal
segreg_poly <- function(ploidy, d.p1, d.p2) {
if (ploidy%%2 != 0)
stop(safeError("m must be an even number"))
p.dose <- numeric((ploidy + 1))
p.names <- character((ploidy + 1))
seg.p1 <- dhyper(x = c(0:(ploidy + 1)), m = d.p1, n = (ploidy - d.p1), k = ploidy/2)
seg.p2 <- dhyper(x = c(0:(ploidy + 1)), m = d.p2, n = (ploidy - d.p2), k = ploidy/2)
M <- tcrossprod(seg.p1, seg.p2)
for (i in 1:nrow(M)) {
for (j in 1:ncol(M)) {
p.dose[i + j - 1] <- p.dose[i + j - 1] + M[i, j]
}
}
p.dose <- p.dose[!is.na(p.dose)]
for (i in 0:ploidy) p.names[i + 1] <- paste(paste(rep("A", i), collapse = ""), paste(rep("a", (ploidy - i)), collapse = ""), sep = "")
names(p.dose) <- p.names
return(p.dose)
}
#' Import phased map list from polymapR
#'
#' Function to import phased map lists from polymapR. Function from MAPpoly.
#'
#' See examples at \url{https://rpubs.com/mmollin/tetra_mappoly_vignette}.
#'
#' @param maplist a list of phased maps obtained using function
#' \code{create_phased_maplist} from package \code{polymapR}
#' @param mappoly.data a dataset used to obtain \code{maplist},
#' converted into class \code{mappoly.data}
#' @param ploidy the ploidy level
#'
#' @return object of class \code{mappoly.map}
#'
#' @author Marcelo Mollinari \email{[email protected]}
#'
#' @references
#' Bourke PM et al: (2019) PolymapR — linkage analysis and genetic map
#' construction from F1 populations of outcrossing polyploids.
#' _Bioinformatics_ 34:3496–3502.
#' \doi{10.1093/bioinformatics/bty1002}
#'
#' Mollinari, M., and Garcia, A. A. F. (2019) Linkage
#' analysis and haplotype phasing in experimental autopolyploid
#' populations with high ploidy level using hidden Markov
#' models, _G3: Genes, Genomes, Genetics_.
#' \doi{10.1534/g3.119.400378}
#'
#'
#' @keywords internal
import_phased_maplist_from_polymapR <- function(maplist,
mappoly.data,
ploidy = NULL){
input_classes <- c("list")
if (!inherits(maplist, input_classes)) {
stop(deparse(substitute(maplist)), " is not a list of phased maps.")
}
X <- maplist[[1]]
if(is.null(ploidy))
ploidy <- (ncol(X)-2)/2
MAPs <- vector("list", length(maplist))
for(i in 1:length(MAPs)){
X <- maplist[[i]]
seq.num <- match(X$marker, mappoly.data$mrk.names)
seq.rf <- mf_h(diff(X$position)) ## Using haldane
seq.rf[seq.rf <= 1e-05] <- 1e-4
P = ph_matrix_to_list(X[,3:(ploidy+2)])
Q = ph_matrix_to_list(X[,3:(ploidy+2) + ploidy])
names(P) <- names(Q) <- seq.num
seq.ph <- list(P = P, Q = Q)
maps <- vector("list", 1)
maps[[1]] <- list(seq.num = seq.num, seq.rf = seq.rf, seq.ph = seq.ph, loglike = 0)
MAPs[[i]] <- structure(list(info = list(ploidy = (ncol(X)-2)/2,
n.mrk = nrow(X),
seq.num = seq.num,
mrk.names = as.character(X$marker),
seq.dose.p1 = mappoly.data$dosage.p1[as.character(X$marker)],
seq.dose.p2 = mappoly.data$dosage.p2[as.character(X$marker)],
chrom = rep(i, nrow(X)),
genome.pos = NULL,
seq.ref = NULL,
seq.alt = NULL,
chisq.pval = mappoly.data$chisq.pval[as.character(X$marker)],
data.name = as.character(sys.call())[3],
ph.thresh = NULL),
maps = maps),
class = "mappoly.map")
#MAPs[[i]] <- loglike_hmm(MAPs[[i]], mappoly.data)
}
MAPs
}
#' prepare maps for plot - from MAPpoly
#' @param input.map object of class \code{mappoly.map}
#' @param config choose between 'best', 'all' or provide vector with defined configuration.
#' 'best' provide just the best estimated configuration. 'all' provides all possibles.
#'
#' @return list containing phase and dosage information
#'
#'
#' @keywords internal
prepare_map <- function(input.map, config = "best"){
if (!inherits(input.map, "mappoly.map")) {
stop(deparse(substitute(input.map)), " is not an object of class 'mappoly.map'")
}
## Choosing the linkage phase configuration
LOD.conf <- get_LOD(input.map, sorted = FALSE)
if(config == "best") {
i.lpc <- which.min(LOD.conf)
} else if(config == "all"){
i.lpc <- seq_along(LOD.conf) } else if (config > length(LOD.conf)) {
stop(safeError("invalid linkage phase configuration"))
} else i.lpc <- config
## Gathering marker positions
map <- data.frame(mk.names = input.map$info$mrk.names,
l.dist = cumsum(imf_h(c(0, input.map$maps[[i.lpc]]$seq.rf))),
g.chr = input.map$info$chrom,
g.dist = if(!is.null(input.map$info$genome.pos)) input.map$info$genome.pos else NA ,
alt = if(!is.null(input.map$info$seq.alt)) input.map$info$seq.alt else NA , # get this info from VCF if it is inputted
ref = if(!is.null(input.map$info$seq.ref)) input.map$info$seq.ref else NA)
##
ph.p1 <- ph_list_to_matrix(input.map$maps[[i.lpc]]$seq.ph$P, input.map$info$ploidy)
ph.p2 <- ph_list_to_matrix(input.map$maps[[i.lpc]]$seq.ph$Q, input.map$info$ploidy)
dimnames(ph.p1) <- list(map$mk.names, letters[1:input.map$info$ploidy])
dimnames(ph.p2) <- list(map$mk.names, letters[(1+input.map$info$ploidy):(2*input.map$info$ploidy)])
if(is.null(input.map$info$seq.alt))
{
ph.p1[ph.p1 == 1] <- ph.p2[ph.p2 == 1] <- "A"
ph.p1[ph.p1 == 0] <- ph.p2[ph.p2 == 0] <- "B"
} else {
for(i in input.map$info$mrk.names){
ph.p1[i, ph.p1[i,] == 1] <- input.map$info$seq.alt[i]
ph.p1[i, ph.p1[i,] == 0] <- input.map$info$seq.ref[i]
ph.p2[i, ph.p2[i,] == 1] <- input.map$info$seq.alt[i]
ph.p2[i, ph.p2[i,] == 0] <- input.map$info$seq.ref[i]
}
}
d.p1 <- input.map$info$seq.dose.p1
d.p2 <- input.map$info$seq.dose.p2
list(ploidy = input.map$info$ploidy, map = map, ph.p1 = ph.p1, ph.p2 = ph.p2, d.p1 = d.p1, d.p2 = d.p2)
}
#' Map functions - from MAPpoly
#'
#' @param r vector with recombination fraction values
#'
#' @keywords internal
#'
#' @return vector with genetic distances
#'
#'
#' @keywords internal
imf_h <- function(r) {
r[r >= 0.5] <- 0.5 - 1e-14
-50 * log(1 - 2 * r)
}
#' Extract the LOD Scores in a \code{'mappoly.map'} object
#' Function from MAPpoly.
#'
#' @param x an object of class \code{mappoly.map}
#' @param sorted logical. if \code{TRUE}, the LOD Scores are displayed
#' in a decreasing order
#'
#' @return a numeric vector containing the LOD Scores
#' @keywords internal
#'
#'
#' @keywords internal
get_LOD <- function(x, sorted = TRUE) {
w <- sapply(x$maps, function(x) x$loglike)
LOD <- w - max(w)
if (sorted)
LOD <- sort(LOD, decreasing = TRUE)
abs(LOD)
}
#' Linkage phase format conversion: list to matrix.
#' Function from MAPpoly.
#'
#' This function converts linkage phase configurations from list
#' to matrix form
#'
#' @param L a list of configuration phases
#'
#' @param ploidy ploidy level
#'
#'
#' @return a matrix whose columns represent homologous chromosomes and
#' the rows represent markers
#'
#'
#' @keywords internal
ph_list_to_matrix <- function(L, ploidy) {
M <- matrix(0, nrow = length(L), ncol = ploidy)
for (i in 1:nrow(M)) if (all(L[[i]] != 0))
M[i, L[[i]]] <- 1
M
}
#' Viewmap object sanity check
#'
#'
#' @param viewmap_obj an object of class \code{viewmap}
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewmap <- function(viewmap_obj){
test <- logical(7L)
names(test) <- 1:7
test[1] <- length(viewmap_obj) != 6
test[2] <- any(names(viewmap_obj) != c("d.p1", "d.p2", "ph.p1", "ph.p2", "maps", "software"))
test[3] <- is.null(names(viewmap_obj$d.p1[[1]]))
test[4] <- is.null(rownames(viewmap_obj$ph.p1[[1]]))
test[5] <- any(sapply(viewmap_obj$maps, length) != 6)
test[6] <- is.null(viewmap_obj$software)
test[7] <- !inherits(viewmap_obj, "viewmap")
if(any(as.logical(test)))
return(test)
else return(0)
}
#' viewqtl object sanity check
#'
#'
#' @param viewqtl_obj an object of class \code{viewqtl}
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewqtl <- function(viewqtl_obj){
test <- logical(10L)
names(test) <- 1:10
test[3] <- any(names(viewqtl_obj$selected_mks) != c("LG", "mk", "pos"))
if(viewqtl_obj$software == "QTLpoly") {
test[1] <- length(viewqtl_obj) != 8
test[2] <- any(names(viewqtl_obj) != c("selected_mks", "qtl_info", "blups", "beta.hat", "profile", "effects", "probs", "software"))
test[4] <- any(names(viewqtl_obj$qtl_info) != c("LG", "Pos", "pheno", "Pos_lower", "Pos_upper", "Pval", "h2"))
test[5] <- any(names(viewqtl_obj$blups) != c("haplo", "pheno", "qtl", "u.hat"))
test[6] <- any(names(viewqtl_obj$beta.hat) != c("pheno", "beta.hat"))
test[7] <- any(names(viewqtl_obj$profile) != c("pheno", "LOP"))
test[8] <- any(names(viewqtl_obj$effects) != c("pheno", "qtl.id", "haplo", "effect"))
test[9] <- length(dim(viewqtl_obj$probs)) != 3
} else if(viewqtl_obj$software == "diaQTL") {
test[1] <- length(viewqtl_obj) != 5
test[2] <- any(names(viewqtl_obj) != c("selected_mks", "qtl_info", "profile", "effects", "software"))
test[4] <- any(names(viewqtl_obj$qtl_info) != c("LG", "Pos", "pheno", "Pos_lower", "Pos_upper", "LL"))
test[7] <- any(names(viewqtl_obj$profile) != c("pheno", "deltaDIC"))
test[8] <- any(names(viewqtl_obj$effects) != c("pheno", "haplo", "qtl.id", "effect", "type", "CI.lower", "CI.upper"))
test[5] <- test[6] <- test[9] <- FALSE
} else if(viewqtl_obj$software == "polyqtlR"){
test[1] <- length(viewqtl_obj) != 5
test[2] <- any(names(viewqtl_obj) != c("selected_mks", "qtl_info", "profile", "effects", "software"))
test[4] <- any(names(viewqtl_obj$qtl_info) != c("LG", "Pos", "pheno", "Pos_lower", "Pos_upper", "thresh"))
test[7] <- any(names(viewqtl_obj$profile) != c("pheno", "LOD"))
test[8] <- any(names(viewqtl_obj$effects)[1:3] != c("pos", "pheno", "LG"))
test[5] <- test[6] <- test[9] <- FALSE
}
test[10] <- !inherits(viewqtl_obj, "viewqtl")
if(any(as.logical(test)))
return(test)
else return(0)
}
#' Viewpoly object sanity check
#'
#'
#' @param viewpoly_obj an object of class \code{viewpoly}
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewpoly <- function(viewpoly_obj){
test <- logical(19L)
names(test) <- 1:19
test[1] <- length(viewpoly_obj) != 8
test[2] <- any(names(viewpoly_obj) != c("map", "qtl", "fasta", "gff3", "vcf", "align", "wig", "version"))
test[3] <- !inherits(viewpoly_obj, "viewpoly")
test[4] <- is.null(viewpoly_obj$version)
if(is.null(viewpoly_obj$map)) test[5:10] <- FALSE else test[5:10] <- check_viewmap(viewpoly_obj$map)
if(is.null(viewpoly_obj$qtl)) test[11:19] <- FALSE else test[11:19] <- check_viewqtl(viewpoly_obj$qtl)
if(any(as.logical(test)))
return(test)
else return(0)
}
# Collapse box
jscode <- "
shinyjs.collapse = function(boxid) {
$('#' + boxid).closest('.box').find('[data-widget=collapse]').click();
}
"
# Global variables to avoid NOTE
globalVariables("js")
# Check internet connection
havingIP <- function() {
if (.Platform$OS.type == "windows") {
ipmessage <- system("ipconfig", intern = TRUE)
} else {
ipmessage <- system("ifconfig", intern = TRUE)
}
validIP <- "((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)[.]){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
any(grep(validIP, ipmessage))
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/R/utils_helpers.R
|
options( "golem.app.prod" = TRUE)
viewpoly::run_app()
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/inst/app.R
|
---
title: "About"
output: html_document
---
### VIEWpoly
<br />
<div><img src="www/flow_chart.png" alt="alt" align="center" style="width:800px;height:830px;"></div>
<br />
`VIEWpoly` is a shiny app and R package for visualizing and exploring results from [polyploid computational tools](https://www.polyploids.org/) using an interactive graphical user interface. The package allows users to directly upload output files from [polymapR](https://cran.r-project.org/web/packages/polymapR/index.html), [MAPpoly](https://cran.r-project.org/web/packages/mappoly/index.html) , [polyqtlR](https://cran.r-project.org/web/packages/polyqtlR/index.html) , [QTLpoly](https://cran.r-project.org/web/packages/qtlpoly/index.html),
[diaQTL](https://github.com/jendelman/diaQTL), [GWASpoly](https://github.com/jendelman/GWASpoly), [HIDECAN](https://cran.r-project.org/web/packages/hidecan/index.html), and genomic assembly, variants, annotation and alignment files. VIEWpoly uses [shiny](https://cran.r-project.org/web/packages/shiny/index.html), [golem](https://cran.r-project.org/web/packages/golem/index.html), [ggplot2](https://cran.r-project.org/web/packages/ggplot2/index.html), [plotly](https://cran.r-project.org/web/packages/plotly/index.html), and [JBrowseR](https://cran.r-project.org/web/packages/JBrowseR/index.html) libraries to graphically display the QTL profiles, positions, alleles estimated effects, progeny individuals containing specific haplotypes and their breeding values. It is also possible to access marker dosage and parental phase from the linkage map. If genomic information is available, the corresponding QTL positions are interactively explored using JBrowseR interface, allowing the search for candidate genes. It also provides features to download specific information into comprehensive tables and images for further analysis and presentation.
### Quick Start
You can run `VIEWpoly` locally installing the package and accessing the graphical interface through a web browser. To use the stable version, please install the package from CRAN:
```{r}
install.packages("viewpoly")
viewpoly::run_app()
```
If you want to use the latest development version, go ahead and install `VIEWpoly` from our Github repository:
```{r}
# install.packages("devtools")
devtools::install_github("mmollina/viewpoly")
viewpoly::run_app()
```
NOTE: Windows users may need to install the `Rtools` before compiling the package from source (development version).
The `Input data` tab has options for diverse types of inputs. You can upload directly outputs from:
* [MAPpoly](https://cran.r-project.org/web/packages/mappoly/index.html)
* [polymapR](https://cran.r-project.org/web/packages/polymapR/index.html)
* [polyqtlR](https://cran.r-project.org/web/packages/polyqtlR/index.html)
* [QTLpoly](https://cran.r-project.org/web/packages/qtlpoly/index.html)
* [diaQTL](https://github.com/jendelman/diaQTL)
* [GWASpoly](https://github.com/jendelman/GWASpoly)
* [HIDECAN](https://cran.r-project.org/web/packages/hidecan/index.html)
* CSV, TSV or TSV.GZ standard formats
To relate the genetic maps and QTL with genomic information, it is also required:
* FASTA reference genome
It is optional to upload also:
* GFF3 annotation file
* BAM or CRAM alignment file
* VCF file
* bigWig file
### Documentation
* Access VIEWpoly tutorial [here](https://cristianetaniguti.github.io/viewpoly_vignettes/VIEWpoly_tutorial.html).
* We present the app main features in the video bellow:
<iframe width="600" height="315"
src="https://www.youtube.com/embed/OBt_jebhfeY">
</iframe>
* Access more information about how to make your data sets available through VIEWpoly [here](https://cristianetaniguti.github.io/viewpoly_vignettes/Publish_data_VIEWpoly.html).
* If you would like to contribute to develop `VIEWpoly`, please check our [Contributing Guidelines](https://cristianetaniguti.github.io/viewpoly_vignettes/Contributing_guidelines.html).
### References
Taniguti CH, Gesteira GS, Lau J, Pereira GS, Zeng ZB, Byrne D, Riera-Lizarazu O, Mollinari M. VIEWpoly: a visualization tool to integrate and explore results of polyploid genetic analysis. Submitted.
Mollinari M, Garcia AAF. 2019. “Linkage analysis and haplotype phasing in experimental autopolyploid populations with high ploidy level using hidden Markov models.” G3: Genes, Genomes, Genetics 9 (10): 3297-3314. doi:10.1534/g3.119.400378.
Pereira GS, Gemenet DC, Mollinari M, Olukolu BA, Wood JC, Mosquera V, Gruneberg WJ, Khan A, Buell CR, Yencho GC, Zeng ZB. 2020. “Multiple QTL mapping in autopolyploids: a random-effect model approach with application in a hexaploid sweetpotato full-sib population.” Genetics 215 (3): 579-595. doi:10.1534/genetics.120.303080.
Amadeu RR, Muñoz PR , Zheng C, Endelman JB. 2021."QTL mapping in outbred tetraploid (and diploid) diallel populations." Genetics 219 (3), iyab124, https://doi.org/10.1093/genetics/iyab124
Bourke PM , van Geest G, Voorrips RE, Jansen J, Kranenburg T, Shahin A, Visser RGF , Arens P, Smulders MJM , Maliepaard C. 2018."polymapR—linkage analysis and genetic map construction from F1 populations of outcrossing polyploids." Bioinformatics, 34 (20): 3496–3502, https://doi.org/10.1093/bioinformatics/bty371
Bourke PM, Voorrips RE, Hackett CA, van Geest G, Willemsen JH, Arens P, Smulders MJM, Visser RGF, Maliepaard C. 2021."Detecting quantitative trait loci and exploring chromosomal pairing in autopolyploids using polyqtlR." Bioinformatics, 37 (21): 3822–3829, https://doi.org/10.1093/bioinformatics/btab574
### Acknowledgment
VIEWpoly project is supported by the USDA, National Institute of Food and Agriculture (NIFA), Specialty Crop Research Initiative (SCRI) project [‘‘Tools for Genomics-Assisted Breeding in Polyploids: Development of a Community Resource’’](https://www.polyploids.org/) and by the Bill & Melinda Gates Foundation under the Genetic Advances and [Innovative Seed Systems for Sweetpotato project (SweetGAINS)](https://cgspace.cgiar.org/handle/10568/106838).
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/inst/ext/about.Rmd
|
#' Viewpoly object sanity check
#'
#'
#' @param viewpoly_obj an object of class \code{viewpoly}
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#' @importFrom testthat expect_equal
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewmap_values <- function(viewmap_obj, doses, phases, maps){
expect_equal(as.vector(table(viewmap_obj$d.p1[[1]])), doses)
expect_equal(as.vector(table(viewmap_obj$ph.p1[[1]][,3])), phases)
expect_equal(as.vector(sum(viewmap_obj$maps[[1]][,2])), maps, tolerance = 0.001)
}
#' Check viewqtl object for QTLpoly uploaded files
#'
#' @param viewqtl_obj an object of class \code{viewqtl}
#' @param pos sum of the position values to be matched
#' @param h2 sum of the log herdability values to be matched
#' @param u.hat sum of the estimated u values to be matched
#' @param beta.hat sum of the estimated beta values to be matched
#' @param lop sum of the LOP values to be matched
#' @param effect sum of the effects values to be matched
#' @param probs sum of the genotype probabilties values to be matched
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#' @importFrom testthat expect_equal
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewqtl_qtlpoly_values <- function(viewqtl_obj, pos, h2, u.hat, beta.hat, lop, effect, probs){
expect_equal(sum(viewqtl_obj$selected_mks$pos), pos, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$qtl_info$h2), h2, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$blups$u.hat), u.hat, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$beta.hat$beta.hat), beta.hat, tolerance = 0.001)
expect_equal(min(viewqtl_obj$profile$LOP), lop, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$effects$effect), effect, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$probs[,1,2]), probs, tolerance = 0.001)
}
#' Check viewqtl object for diaQTL uploaded files
#'
#' @param viewqtl_obj an object of class \code{viewqtl}
#' @param pos sum of the position values to be matched
#' @param ll sum of the log likelihood values to be matched
#' @param DIC sum of the deltaDIC values to be matched
#' @param effect sum of the effects values to be matched
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#' @importFrom testthat expect_equal
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewqtl_diaqtl_values <- function(viewqtl_obj, pos, ll, DIC, effect){
expect_equal(sum(viewqtl_obj$selected_mks$pos), pos, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$qtl_info$LL), ll, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$profile$deltaDIC), DIC, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$effects$effect, na.rm = T), effect, tolerance = 0.001)
}
#' Check viewqtl object for polyqtlR uploaded files
#'
#' @param viewqtl_obj an object of class \code{viewqtl}
#' @param pos sum of the position values to be matched
#' @param thre sum of the threshold values to be matched
#' @param lod sum of the LOD values to be matched
#' @param effect sum of the effects values to be matched
#'
#' @return if consistent, returns 0. If not consistent, returns a
#' vector with a number of tests, where \code{TRUE} indicates
#' a failed test.
#'
#' @importFrom testthat expect_equal
#'
#' @author Cristiane Taniguti, \email{[email protected]}
#' @keywords internal
check_viewqtl_polyqtlr_values <- function(viewqtl_obj, pos, thre, lod, effect){
expect_equal(sum(viewqtl_obj$selected_mks$pos), pos, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$qtl_info$thresh), thre, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$profile$LOD), lod, tolerance = 0.001)
expect_equal(sum(viewqtl_obj$effects[,4], na.rm = T), effect, tolerance = 0.001)
}
|
/scratch/gouwar.j/cran-all/cranData/viewpoly/inst/ext/functions4tests.R
|
#' An S4 class to represent the viewshed
#'
#' A `viewshed` object contains a 'matrix' of visible and invisible area,
#' resolution, extent, and crs
#' @slot visible matrix
#' @slot resolution vector
#' @slot extent numeric
#' @slot crs character
#' @import methods
#' @importFrom terra geom
#' @md
setClass(
Class = "Viewshed",
representation(viewpoint = "numeric",
visible = "matrix",
resolution = "numeric",
extent = "numeric",
crs = "character")
)
setGeneric("filter_invisible", function(object, ifRaster){
standardGeneric("filter_invisible")
})
setMethod("filter_invisible", signature(object="Viewshed", ifRaster="logical"),
function(object, ifRaster)
{
raster_data <- terra::rast(object@visible,
crs = terra::crs(object@crs),
extent = terra::ext(object@extent, xy = TRUE))
if (ifRaster) {
return(raster_data)
} else {
pointsData <- terra::as.points(raster_data)
coords <- terra::geom(pointsData)
x <- coords[,3]
y <- coords[,4]
z <- terra::values(pointsData)[,1]
pt <- cbind(x, y, z)
pt <- pt[pt[,3] == 1,]
pt <- pt[,-3]
return(pt)
}
})
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/Class-Viewshed.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
get_depths <- function(px, py, x, y, num) {
.Call('_viewscape_get_depths', PACKAGE = 'viewscape', px, py, x, y, num)
}
visibleLabel <- function(viewpoint, dsm, h, max_dis) {
.Call('_viewscape_visibleLabel', PACKAGE = 'viewscape', viewpoint, dsm, h, max_dis)
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/RcppExports.R
|
#' calculate_diversity
#' @description The calculate_diversity function is designed to calculate landscape
#' diversity metrics within a viewshed. It takes as input a land cover raster,
#' a viewshed object representing the observer's line of sight, and an optional
#' parameter to compute class proportions.
#'
#' @param viewshed Viewshed object.
#' @param land Raster. The raster of land use/land cover representing different
#' land use/cover classes.
#' @param proportion logical (Optional), indicating whether to return class
#' proportions along with the Shannon Diversity Index (SDI). (default is FALSE).
#' @return List. a list containing the Shannon Diversity Index (SDI) and,
#' if the proportion parameter is set to TRUE, a table of class proportions
#' within the viewshed.
#' @import terra
#' @importFrom dplyr count
#' @importFrom dplyr %>%
#' @importFrom rlang .data
#'
#' @export
#'
#' @examples
#' # Load a viewpoint
#' test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp", package = "viewscape"))
#' # load dsm raster
#' dsm <- terra::rast(system.file("test_dsm.tif", package ="viewscape"))
#' #Compute viewshed
#' output <- compute_viewshed(dsm = dsm,
#' viewpoints = test_viewpoint,
#' offset_viewpoint = 6)
#' # load landuse raster
#' test_landuse <- terra::rast(system.file("test_landuse.tif",
#' package ="viewscape"))
#' diversity <- calculate_diversity(output,
#' test_landuse)
#'
calculate_diversity <- function(viewshed,
land,
proportion = FALSE){
if (isFALSE(terra::crs(land, proj = TRUE) == viewshed@crs)) {
land <- terra::project(land, y=terra::crs(viewshed@crs))
}
pt <- filter_invisible(viewshed, FALSE)
land <- terra::crop(land, terra::ext(viewshed@extent, xy = TRUE))
# calculate the proportion of each class
land_class <- terra::extract(land, pt)[,1]
land_class <- as.data.frame(land_class)
colnames(land_class)[1] <- "type"
land_class <- dplyr::count(land_class, .data$type)
total <- sum(land_class$n)
land_class$proportion <- land_class$n/total
# calculate Shannon diversity index
p <- land_class$proportion
sdi <- sd_index(p)
if (proportion) {
# sub_land_class <- subset(land_class,
# select = c(type, proportion))
sub_land_class <- land_class %>%
dplyr::select(.data$type, proportion)
return(list(SDI=sdi,Proportion=t(sub_land_class)))
} else {
return(sdi)
}
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/calculate_diversity.R
|
#' calculate_feature
#' @description The calculate_feature function is designed to extract specific
#' feature-related information within a viewshed. It allows you to compute
#' the proportion of the feature that is present in the viewshed.
#'
#' @param viewshed Viewshed object.
#' @param feature Raster. Land cover or land use
#' @param type Numeric. The input type of land cover raster.
#' type=1: percentage raster (that represents the percentage of
#' area in each cell).
#' type=2: binary raster (that only uses two values to represent whether
#' the feature exists in each cell).
#' @param exclude_value Numeric. the value of those cells need to be excluded
#' in the analysis. If type = 2, exclude_value is reqired.
#'
#' @return Numeric. The canopy area in the viewshed.
#' @export
#'
#' @examples
#' # Load a viewpoint
#' test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp", package = "viewscape"))
#' # load dsm raster
#' dsm <- terra::rast(system.file("test_dsm.tif", package ="viewscape"))
#' #Compute viewshed
#' viewshed <- compute_viewshed(dsm = dsm,
#' viewpoints = test_viewpoint,
#' offset_viewpoint = 6)
#' # load canopy raster
#' test_canopy <- terra::rast(system.file("test_canopy.tif",
#' package ="viewscape"))
#' # calculate the percentage of canopy coverage
#' test_canopy_proportion <- viewscape::calculate_feature(viewshed = viewshed,
#' feature = test_canopy,
#' type = 2,
#' exclude_value = 0)
calculate_feature <- function(viewshed,
feature,
type,
exclude_value){
if (isFALSE(terra::crs(feature, proj = TRUE) == viewshed@crs)) {
feature <- terra::project(feature, y=terra::crs(viewshed@crs))
}
if (missing(type)) {
stop("type is missing")
}
if (type == 2 & missing(exclude_value)) {
stop("please specify exclude_value")
}
pt <- filter_invisible(viewshed, FALSE)
feature_df <- terra::extract(feature, pt)[,1]
# colnames(feature_df)[2] <- 'value'
# feature_df <- subset(feature_df, value != exclude_value)
if(type == 1){
output <- sum((viewshed@resolution[1])^2*feature_)/
(viewshed@resolution[1])^2*length(pt[,1])
}
else if(type == 2){
feature_ <- feature_df[feature_df!=exclude_value]
output <- length(feature_)/length(pt[,1])
}
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/calculate_feature.R
|
#' calculate_viewmetrics
#' @description The calculate_viewmetrics function is designed to compute a set of
#' configuration metrics based on a given viewshed object and optionally, digital surface
#' models (DSM) and digital terrain models (DTM) for terrain analysis.
#' The function calculates various metrics that describe the visibility characteristics
#' of a landscape from a specific viewpoint.The metrics include:
#' 1. Extent: The total area of the viewshed, calculated as the number of visible grid
#' cells multiplied by the grid resolution
#' 2. Depth: The furthest visible distance within the viewshed from the viewpoint
#' 3. Vdepth: The standard deviation of distances to visible points,
#' providing a measure of the variation in visible distances
#' 4. Horizontal: The total visible horizontal or terrestrial area within the viewshed
#' 5. Relief: The standard deviation of elevations of the visible ground surface
#' 6. Skyline: The standard deviation of the vertical viewscape, including visible
#' canopy and buildings, when specified
#' 7. Number of patches: Visible fragmentation measured by total visible patches
#' with the viewscape
#' 8. Mean shape index: Visible patchiness based on average perimeter-to-area ratio
#' for all viewscape patches (vegetation and building)
#' 9. Edge density: A measure of visible complexity based on the length of
#' patch edges per unit area
#' 10. Patch size: Total average size of a patches over the entire viewscape area
#' 11. Patch density: Visible landscape granularity based on measuring patch density
#' 12. Shannon diversity index: The abundance and evenness of land cover/use in a viewshed
#' 13. Proportion of object: Proportion of a single type of land use or cover in a viewshed
#'
#' @param viewshed Viewshed object.
#' @param dsm Raster, Digital Surface Model for the calculation of
#' @param dtm Raster, Digital Terrain Model
#' @param masks List, a list including rasters of canopy and building footprints.
#' For example of canopy raster, the value for cells without canopy should be 0 and
#' the value for cells with canopy can be any number.
#' @return List
#' @references Tabrizian, P., Baran, P.K., Berkel, D.B., Mitásová, H., & Meentemeyer, R.K. (2020).
#' Modeling restorative potential of urban environments by coupling viewscape analysis of lidar
#' data with experiments in immersive virtual environments. Landscape and Urban Planning, 195, 103704.
#' @import terra
#' @importFrom terra patches
#' @importFrom terra as.polygons
#' @importFrom terra mask
#' @importFrom terra crop
#' @importFrom ForestTools vwf
#' @importFrom ForestTools mcws
#'
#' @examples
#' \donttest{
#' # Load in DSM
#' test_dsm <- terra::rast(system.file("test_dsm.tif",
#' package ="viewscape"))
#' # Load DTM
#' test_dtm <- terra::rast(system.file("test_dtm.tif",
#' package ="viewscape"))
#'
#' # Load canopy raster
#' test_canopy <- terra::rast(system.file("test_canopy.tif",
#' package ="viewscape"))
#'
#' # Load building footprints raster
#' test_building <- terra::rast(system.file("test_building.tif",
#' package ="viewscape"))
#'
#' # Load in the viewpoint
#' test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp",
#' package = "viewscape"))
#'
#' # Compute viewshed
#' output <- viewscape::compute_viewshed(dsm = test_dsm,
#' viewpoints = test_viewpoint,
#' offset_viewpoint = 6)
#'
#' # calculate metrics given the viewshed, canopy, and building footprints
#' test_metrics <- viewscape::calculate_viewmetrics(output,
#' test_dsm,
#' test_dtm,
#' list(test_canopy, test_building))
#' }
#'
#' @export
calculate_viewmetrics <- function(viewshed, dsm, dtm, masks = list()) {
if (missing(viewshed)) {
stop("Viewshed object is missing")
}
if (missing(dsm) || missing(dtm)) {
stop("DSM or DTM is missing")
}
units <- sf::st_crs(viewshed@crs)$units
if (units == "ft") {
error <- 1.6
minHeight <- 10
} else if (units == "m") {
error <- 0.5
minHeight <- 3
}
if (isFALSE(terra::crs(dsm, proj = TRUE) == viewshed@crs)) {
dsm <- terra::project(dsm, y=terra::crs(viewshed@crs))
}
if (isFALSE(terra::crs(dtm, proj = TRUE) == viewshed@crs)) {
dsm <- terra::project(dtm, y=terra::crs(viewshed@crs))
}
output <- list()
visiblepoints <- filter_invisible(viewshed, FALSE)
# viewshed raster
m <- terra::vect(sp::SpatialPoints(visiblepoints))
terra::crs(m) <- viewshed@crs
mask_ <- terra::mask(filter_invisible(viewshed, TRUE), m)
# get subdsm/dtm
subdsm <- terra::crop(dsm, terra::ext(mask_))
subdtm <- terra::crop(dtm, terra::ext(mask_))
submodel <- subdsm - subdtm
ttops <- ForestTools::vwf(CHM = submodel,
winFun = function(x){x * 0.05 + 0.6},
minHeight = minHeight)
crowns <- ForestTools::mcws(treetops = ttops,
CHM = submodel,
minHeight = minHeight)
crowns <- terra::patches(crowns, directions=4)
crowns <- terra::as.polygons(crowns)
# viewshed patch parameters
patch_paras <- patch_p(mask_, crowns)
x <- patch_paras[[6]][,1]
y <- patch_paras[[6]][,2]
pointnumber <- length(visiblepoints[,1])
resolution <- viewshed@resolution[1]
# Number of patches
# Mean shape index
# Edge density
# Patch size
for (i in 1:5) {
output[[length(output)+1]] <- patch_paras[[i]]
}
names(output) <- c("Nump", "MSI", "ED", "PS", "PD")
# extent - Total area of the viewshed
extent <- pointnumber * resolution^2
output[[length(output)+1]] <- extent
# depth - Furthest visible distance given the viewscape
depths <- get_depths(viewshed@viewpoint[1],
viewshed@viewpoint[2],
x,
y,
length(x))
depths <- depths[!is.na(depths)]
# depth
output[[length(output)+1]] <- max(depths)
# vdepth
output[[length(output)+1]] <- sd(depths)
names(output) <- c("Nump", "MSI", "ED", "PS", "PD",
"extent", "depth", "vdepth")
# horizontal - Total visible horizontal or terrestrial area
# relief - Variation (Standard deviation) in elevation of the visible ground surface.
# dsm <- terra::crop(subdsm, terra::ext(viewshed@extent, xy = TRUE))
# dtm <- terra::crop(subdtm, terra::ext(viewshed@extent, xy = TRUE))
dtm_z <- terra::extract(subdtm, visiblepoints)[,1]
dsm_z <- terra::extract(subdsm, visiblepoints)[,1]
delta_z <- dsm_z - dtm_z
z <- cbind(dtm_z, dsm_z, delta_z)
z <- z[which(z[,3]<=error),]
# horizontal
output[[length(output)+1]] <- length(z[,3]) * resolution^2
# relief
output[[length(output)+1]] <- sd(z[,1])
names(output) <- c("Nump", "MSI", "ED", "PS", "PD",
"extent", "depth", "vdepth",
"horizontal", "relief")
# skyline - Variation of (Standard deviation) of the vertical viewscape
# (visible canopy and buildings)
if (length(masks) == 2) {
if (isFALSE(terra::crs(masks[[1]], proj = TRUE) == viewshed@crs)) {
masks[[1]] <- terra::project(masks[[1]], y=terra::crs(viewshed@crs))
}
if (isFALSE(terra::crs(masks[[2]], proj = TRUE) == viewshed@crs)) {
masks[[2]] <- terra::project(masks[[2]], y=terra::crs(viewshed@crs))
}
masks_1 <- terra::crop(masks[[1]], terra::ext(viewshed@extent, xy = TRUE))
masks_2 <- terra::crop(masks[[2]], terra::ext(viewshed@extent, xy = TRUE))
dsm_z <- terra::extract(dsm, visiblepoints)[,1]
masks_1 <- terra::extract(masks_1, visiblepoints)[,1]
masks_2 <- terra::extract(masks_2, visiblepoints)[,1]
mask_df <- cbind(masks_1, masks_2, masks_1+masks_2, dsm_z)
mask_ <- mask_df[which(mask_df[,3] != 0),]
if (length(mask_df[,4]) > 1) {
output[[length(output)+1]] <- sd(na.omit(mask_df[,4]))
} else {
output[[length(output)+1]] <- 0
}
names(output) <- c("Nump", "MSI", "ED", "PS", "PD",
"extent", "depth", "vdepth",
"horizontal", "relief", "skyline")
}
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/calculate_viewmetrics.R
|
#' compute_viewshed
#' @description The compute_viewshed function is designed for computing viewsheds,
#' which are areas visible from specific viewpoints, based on a Digital Surface
#' Model (DSM). It provides flexibility for single or multi-viewpoint analyses
#' and allows options for parallel processing, raster output, and plotting.
#'
#' @param dsm Raster, the digital surface model/digital elevation model
#' @param viewpoints sf point(s) or vector including x,y coordinates of a viewpoint
#' or a matrix including several viewpoints with x,y coordinates
#' @param offset_viewpoint numeric, setting the height of the viewpoint.
#' (default is 1.7 meters).
#' @param offset_height numeric, setting the height of positions that a given
#' viewpoint will look at. (defaut is 0)
#' @param r Numeric (optional), setting the radius for viewshed analysis.
#' (it is defaulted as NULL)
#' @param parallel Logical, (default is FALSE) indicating if parallel computing
#' should be used to compute viewsheds of multiview points. When it is TRUE,
#' arguements 'raster' and 'plot' are ignored
#' @param workers Numeric, indicating the number of CPU cores that will be used
#' for parallel computing. It is required if 'parallel' is 'TRUE'.
#' @param raster Logical, (default is FALSE) if it is TRUE, the raster of
#' viewshed will be returned.
#' The default is FALSE
#' @param plot Logical, (default is FALSE) if it is TRUE, the raster of
#' viewshed will be displayed
#'
#' @return Raster or list. For single-viewpoint analysis, the function returns
#' either a raster (raster is TRUE) or a viewshed object. Value 1 means visible while
#' value 0 means invisible. For multi-viewpoint analysis, a list of viewsheds
#' is returned.
#'
#'
#' @useDynLib viewscape
#' @import pbmcapply
#' @importFrom Rcpp sourceCpp
#' @importFrom Rcpp evalCpp
#' @importFrom terra ext
#' @importFrom terra res
#' @importFrom terra plot
#' @importFrom parallel detectCores
#' @export
#'
#' @examples
#' # Load a viewpoint
#' test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp", package = "viewscape"))
#' # load dsm raster
#' dsm <- terra::rast(system.file("test_dsm.tif", package ="viewscape"))
#' #Compute viewshed
#' output <- compute_viewshed(dsm = dsm,
#' viewpoints = test_viewpoint,
#' offset_viewpoint = 6)
compute_viewshed <- function(dsm,
viewpoints,
offset_viewpoint=1.7,
offset_height = 0,
r = NULL,
parallel = FALSE,
workers = 0,
raster = FALSE,
plot = FALSE){
multiviewpoints <- FALSE
if (missing(dsm)) {
stop("DSM is missing!")
} else if (missing(viewpoints)) {
stop("viewpoint(s) is missing!")
}
dsm_units <- sf::st_crs(dsm)$units
if(is.null(r) == TRUE){
if (dsm_units == "ft") {
r <- 3281
} else if (dsm_units == "m") {
r <- 1000
}
}
# if (dsm_units == "ft" && r > 3281) {
# r <- 3281
# } else if (dsm_units == "m" && r > 1000) {
# r <- 1000
# }
if (plot) {
raster <- TRUE
}
if (!(class(viewpoints)[1] == "numeric")) {
if (class(viewpoints)[1] == "sf") {
viewpoints <- sf::st_coordinates(viewpoints)
} else {
stop("If input viewpoints is not a vector or matrix, it has to be sf point(s)")
}
}
if (is.vector(viewpoints)){
viewpoints <- matrix(data = viewpoints,
nrow = 1,
ncol = 2)
} else {
if (length(viewpoints[,1]) > 1) {
multiviewpoints <- TRUE
}
}
if (multiviewpoints == FALSE){
viewpoints <- c(viewpoints[,1], viewpoints[,2])
# compute viewshed
output <- radius_viewshed(dsm, r, viewpoints, offset_viewpoint, offset_height)
if (raster) {
raster_data <- filter_invisible(output, raster)
if (plot) {
terra::plot(raster_data,
axes=FALSE,
box=FALSE,
legend = FALSE)
v<- matrix(0,1,3)
v[1,1] <- viewpoints[1]
v[1,2] <- viewpoints[2]
terra::plot(sp::SpatialPoints(v),
add=TRUE,
col="red",
axes=FALSE,
box=FALSE,
legend=FALSE)
}
return(raster_data)
} else {
return(output)
}
}else if (multiviewpoints){
inputs <- split(viewpoints,seq(nrow(viewpoints)))
if (parallel == TRUE){
if (workers == 0) {
workers <- 2
} else if (workers > parallel::detectCores()) {
workers <- parallel::detectCores()
}
# inputs <- split(viewpoints,seq(nrow(viewpoints)))
if (isTRUE(Sys.info()[1]=="Windows") == TRUE){
workers = 1
}
suppressWarnings(
viewsheds <- paral_nix(X = inputs,
dsm = dsm,
r = r,
offset = offset_viewpoint,
workers = workers)
)
} else {
suppressWarnings(
viewsheds <- paral_nix(X = inputs,
dsm = dsm,
r = r,
offset = offset_viewpoint,
workers = 1)
)
}
return(viewsheds)
}
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/compute_viewshed.R
|
#' @import sf
#' @importFrom graphics par
#' @importFrom grDevices rgb
#' @importFrom methods new
#' @importFrom stats sd
#' @importFrom sp SpatialPoints
#' @noMd
radius_viewshed <- function(dsm, r, viewPt, offset, offset2 = 0) {
resolution <- terra::res(dsm)
distance <- round(r/resolution[1])
projection <- terra::crs(dsm, proj = TRUE)
# create an extent to crop input raster
subarea <- get_buffer(viewPt[1], viewPt[2], r)
subdsm <- terra::crop(dsm, terra::ext(subarea))
dsm <- subdsm
# setup the view point
col <- terra::colFromX(dsm, viewPt[1])
row <- terra::rowFromY(dsm, viewPt[2])
z_viewpoint = terra::extract(dsm,cbind(viewPt[1],viewPt[2]))[1,1]+offset
viewpoint <- matrix(0,1,3)
viewpoint[1,1] <- col
viewpoint[1,2] <- row
viewpoint[1,3] <- z_viewpoint
# get raster information
dsm_matrix <- terra::as.matrix(dsm, wide=TRUE)
# compute viewshed
label_matrix <- visibleLabel(viewpoint, dsm_matrix, offset2, distance)
output <- new("Viewshed",
viewpoint = viewPt,
visible = label_matrix,
resolution = resolution,
extent = as.vector(sf::st_bbox(dsm)),
crs = projection)
return(output)
}
#' @noMd
paral_nix <- function(X, dsm, r, offset, workers){
results <- pbmcapply::pbmclapply(X = X,
FUN=radius_viewshed,
dsm=dsm,
r=r,
offset=offset,
mc.cores=workers)
return(results)
}
#' @noMd
# H=−∑[(pi)×ln(pi)]
sd_index <- function(p) {
out <- sum(log(p) * p) * -1
return(round(out, digits = 3))
}
#' @noMd
# create a buffer based on a given point
get_buffer <- function(x, y, r) {
pdf <- data.frame(row.names = 1)
pdf[1,"x"] <- x
pdf[1,"y"] <- y
p <- sp::SpatialPoints(pdf)
p <- sf::st_as_sf(p)
subarea <- sf::st_buffer(p, r)
return(subarea)
}
#' @noMd
# get patches
get_patch <- function(viewshed){
vpt <- filter_invisible(viewshed, FALSE)
m <- terra::vect(sp::SpatialPoints(vpt))
terra::crs(m) <- viewshed@crs
mask_ <- terra::mask(filter_invisible(viewshed, TRUE), m)
return(mask_)
}
#' @noMd
# get patches parameter
patch_p <- function(m, patchpoly){
clusters <- terra::patches(m, directions=4)
ptc <- terra::as.polygons(clusters)
patchpoly <- terra::mask(patchpoly, ptc)
ptc_lines <- m %>%
terra::as.polygons() %>%
terra::as.lines() %>%
sf::st_as_sf()
perimeters <- terra::perim(patchpoly)
viewshed_areas <- terra::expanse(ptc)
areas <- terra::expanse(patchpoly)
total_perimeters <- sum(perimeters)
total_areas <- sum(areas)
total_viewshed_areas <- sum(viewshed_areas)
if (sf::st_crs(m)$units == "ft") {
num_pt <- round(total_perimeters/3.281)
} else {
num_pt <- round(total_perimeters)
}
# Number of patches
Nump <- length(areas)
# Mean shape index
MSI <- mean(perimeters/areas)
# Edge density
ED <- total_perimeters/total_viewshed_areas
# Patch size
PS <- total_areas/Nump
# Patch density
PD <- Nump/total_viewshed_areas
# sample points along the edge of patches
samples <- sf::st_sample(sf::st_cast(ptc_lines$geometry,
"MULTILINESTRING"),
num_pt)
samples <- sf::st_coordinates(samples)[,-3]
return(list(Nump, MSI, ED, PS, PD, samples))
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/utils.R
|
#' visualize_viewshed
#' @description The visualize_viewshed function is designed for the visualization
#' of a viewshed analysis, providing users with various options for visualizing
#' the results. The function works with a viewshed object and offers multiple
#' plotting and output types.
#'
#' @param viewshed Viewshed object
#' @param plottype Character, specifying the type of visualization ("polygon" or
#' "raster").
#' @param outputtype Character, specifying the type of output object ("raster"
#' or "polygon").
#' @return Visualized viewshed as either a raster or polygon object,
#' depending on the outputtype specified.
#'
#' @export
#'
#' @examples
#' \donttest{
#' # Load a viewpoint
#' test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp", package = "viewscape"))
#' # load dsm raster
#' dsm <- terra::rast(system.file("test_dsm.tif", package ="viewscape"))
#' #Compute viewshed
#' viewshed <- compute_viewshed(dsm = dsm,
#' viewpoints = test_viewpoint,
#' offset_viewpoint = 6)
#' # Visualize the viewshed as polygons
#' visualize_viewshed(viewshed, plottype = "polygon")
#' # Visualize the viewshed as a raster
#' visualize_viewshed(viewshed, plottype = "raster")
#' # Get the visualized viewshed as a polygon object
#' polygon_viewshed <- visualize_viewshed(viewshed,
#' plottype = "polygon",
#' outputtype = "polygon")
#'}
visualize_viewshed <- function(viewshed,
plottype = "",
outputtype = "") {
if (missing(viewshed)){
stop("Viewshed object is missing")
}
# vectorize the viewshed
mask_v <- get_patch(viewshed)
if (plottype == "polygon"){
polygon_v <- terra::as.polygons(mask_v)
#polygon_v <- terra::buffer(polygon_v, width = 0.0001)
terra::plot(polygon_v, col = rgb(0, 1, 0, 0.3), border = NA)
}else if (plottype == "raster"){
terra::plot(mask_v)
}
if (outputtype == "raster" || outputtype == "polygon"){
if (outputtype == "raster"){
out <- mask_v
}else if (outputtype == "polygon"){
polygon_v <- terra::as.polygons(mask_v)
out <- sf::st_as_sf(polygon_v)
}
return(out)
}
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/visualize_viewshed.R
|
#' @import Rcpp
.onLoad <- function(libname, pkgname) {
# suppressWarnings(
# Rcpp::sourceCpp(system.file("extdata/visibleLabel.cpp", package ="viewscape"),
# env = asNamespace("viewscape"))
# )
# cat("████████████████████████████████████████████████████████████████████████████████████████████████████\n")
# cat("████████████████████████████████████████████████████████████████████████████████████████████████████\n")
# cat("██████████████████████████████████████████████MM/MMM/MMM/M///M██████████████████████████████████████\n")
# cat("███████████████████████████████████████/M////MM/M/MM//////MM//MM///MM///M███████████████████████████\n")
# cat("███████████████████████████████/M////MM//MM/MM/MMMM//MMM///MM///MM/////MM//MM/██████████████████████\n")
# cat("██████████████████████████/////MM/MM/M/MMM/mn=======nmMMMM/M/MM/////MMM///MMM/M█████████████████████\n")
# cat("█████████████████████////MM/MMMMMMmn-=====nMMMMMMMMMMn=====nmMMMMMM///MMM//MMM////M/████████████████\n")
# cat("██████████████████///M/MMMMmm-=====nmMMMMm::::...::::NMMMn-=======nmMM██████████████████████████████\n")
# cat("███████████████//MMMMMMm-====nMMMN:. ..:::::MMMMn-======nmMM████████████████████████\n")
# cat("████████████///MMMM-===nMMN:::.. ////XXXooo ...::::::::NMMn-====nmMM████████████████████\n")
# cat("███████████///MM-==MMn:::.. ////XX??-||-oo ...:::::::::NMM-===MM████████████████\n")
# cat("████████████-===MNn:.. ///XXXX??------oo ..........::::MMM==MM███████████████\n")
# cat("█████████████==:... iiii'''ii??----oooo ........::::MMM-===MM████████████████\n")
# cat("██████████████==:. iii'''''iiiii////oooo ........:::MMMM-===MM██████████████████\n")
# cat("████████████████==:.. iii''''''iii///oooo .......:::::MMM-===MM█████████████████████\n")
# cat("██████████████████==::. iii'''''ooo//ooooo ...::::::MMMM-==MM█████████████████████████\n")
# cat("█████████████████████==::.. iioo'''''//ooooo ..::::::MMMM-==MM██████████████████████████████\n")
# cat("█████████████████████████==:::......ioooooooooo....:::::MMMMMM==MM██████████████████████████████████\n")
# cat("███████████████████████████████████MMMmmmmmmmmmmMMM█████████████████████████████████████████████████\n")
# cat("████████████████████████████████████████████████████████████████████████████████████████████████████\n")
# cat("████████████████████████████████████████████████████████████████████████████████████████████████████\n")
# cat("█::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::█\n")
# cat("█::██:::::::::██::██::██████::██::::██::::██::████████:::██████:::::███::::::::█████████:::██████::█\n")
# cat("█:::██:::::::██::██::██::::::██:::████:::██::██:::::::::██::::██:::██:██::::::██::::::██::██:::::::█\n")
# cat("█::::██:::::██::██::██::::::██::██::██::██::████████:::██:::::::::██::██:::::██::::::██::██::::::::█\n")
# cat("█:::::██:::██::██::██████::██:██::::██:██:::::::::██::██:::::::::███████::::█████████::███████:::::█\n")
# cat("█::::::██:██::██::██::::::███:::::::███:::██::::███::██:::::██::██::::██:::██:::::::::██:::::::::::█\n")
# cat("█:::::::███::██::██████::██:::::::::██:::█████████:::████████::██:::::██::██:::::::::███████:::::::█\n")
# cat("█::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::█\n")
# cat("████████████████████████████████████████████████████████████████████████████████████████████████████\n")
# cat("VIEWSCAPE_1.0.0\n")
invisible()
}
.onUnload <- function(libpath)
{
library.dynam.unload("viewscape", libpath)
}
|
/scratch/gouwar.j/cran-all/cranData/viewscape/R/zzz.R
|
## -----------------------------------------------------------------------------
library(viewscape)
## -----------------------------------------------------------------------------
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
#Load in the viewpoint
test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp",
package = "viewscape"))
#Compute viewshed
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoint,
offset_viewpoint = 6)
## ----eval=FALSE---------------------------------------------------------------
# # overlap viewshed on DSM
# output_r <- viewscape::visualize_viewshed(output, outputtype = 'raster')
# terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
# terra::plot(output_r, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
# terra::plot(test_viewpoint, add = TRUE, col = "blue", axes=FALSE, box=FALSE, legend = FALSE)
## -----------------------------------------------------------------------------
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load points (.shp file)
test_viewpoints <- sf::read_sf(system.file("test_viewpoints.shp",
package = "viewscape"))
# Compute viewsheds
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoints,
offset_viewpoint = 6,
parallel = TRUE,
workers = 1)
## ----eval = FALSE-------------------------------------------------------------
# # Use plot all viewsheds on DSM
# par(mfrow=c(3,3))
# for(i in 1:length(output)) {
# each <- output[[i]]
# raster_data <- viewscape::visualize_viewshed(each, outputtype="raster")
# terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
# terra::plot(raster_data, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
# }
## -----------------------------------------------------------------------------
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load DTM
test_dtm <- terra::rast(system.file("test_dtm.tif",
package ="viewscape"))
# Load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# Load building footprints raster
test_building <- terra::rast(system.file("test_building.tif",
package ="viewscape"))
## -----------------------------------------------------------------------------
# calculate metrics given the viewshed, canopy, and building footprints
test_metrics <- viewscape::calculate_viewmetrics(output[[1]],
test_dsm,
test_dtm,
list(test_canopy, test_building))
test_metrics
## -----------------------------------------------------------------------------
# load landuse raster
test_landuse <- terra::rast(system.file("test_landuse.tif",
package ="viewscape"))
## -----------------------------------------------------------------------------
# the Shannon Diversity Index (SDI)
test_diversity <- viewscape::calculate_diversity(output[[1]],
test_landuse,
proportion = TRUE)
# SDI and The proportion of each type of land use
test_diversity
## -----------------------------------------------------------------------------
# load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# calculate the percentage of canopy coverage
test_canopy_proportion <- viewscape::calculate_feature(viewshed = output[[1]],
feature = test_canopy,
type = 2,
exclude_value=0)
test_canopy_proportion
|
/scratch/gouwar.j/cran-all/cranData/viewscape/inst/doc/viewscape.R
|
---
title: "viewscape"
author: "Xiaohao Yang"
date: "2023-12-18"
vignette: >
%\VignetteIndexEntry{viewscape}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Viewscape
This vignette provides a basic overview of the functions in R package `viewscape`.
The basic viewshed analysis can be accessed through calling the `compute_viewshed`. The two needed objects to compute the viewshed are a digital surface model (DSM) and a viewpoint.
## 1. Compute viewshed
```{r}
library(viewscape)
```
### 1.1 Compute single viewshed
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
#Load in the viewpoint
test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp",
package = "viewscape"))
#Compute viewshed
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoint,
offset_viewpoint = 6)
```
```{r eval=FALSE}
# overlap viewshed on DSM
output_r <- viewscape::visualize_viewshed(output, outputtype = 'raster')
terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(output_r, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(test_viewpoint, add = TRUE, col = "blue", axes=FALSE, box=FALSE, legend = FALSE)
```
### 1.2 Compute the viewshed for multiple viewpoints
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load points (.shp file)
test_viewpoints <- sf::read_sf(system.file("test_viewpoints.shp",
package = "viewscape"))
# Compute viewsheds
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoints,
offset_viewpoint = 6,
parallel = TRUE,
workers = 1)
```
```{r eval = FALSE}
# Use plot all viewsheds on DSM
par(mfrow=c(3,3))
for(i in 1:length(output)) {
each <- output[[i]]
raster_data <- viewscape::visualize_viewshed(each, outputtype="raster")
terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(raster_data, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
}
```
## 2. Calculate viewscape metrics
### 2.1 Calculate the metrics of viewshed
The function of view depth analysis can calculate two different metrics: the furthest distance and standard deviation of distances. To calculate view depth, there are two needed objects: the DSM that was used to get viewshed and result from viewshed analysis.
The function of extent analysis can calculate the total area of viewshed and needs the DSM that was used to get viewshed and result from viewshed analysis.
The following function can calculate the area of ground surface and standard deviation of elevations within a viewshed. The function needs a DSM and a DEM/DTM to calculate the metrics.
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load DTM
test_dtm <- terra::rast(system.file("test_dtm.tif",
package ="viewscape"))
# Load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# Load building footprints raster
test_building <- terra::rast(system.file("test_building.tif",
package ="viewscape"))
```
```{r}
# calculate metrics given the viewshed, canopy, and building footprints
test_metrics <- viewscape::calculate_viewmetrics(output[[1]],
test_dsm,
test_dtm,
list(test_canopy, test_building))
test_metrics
```
### 2.2 Calculate land use/cover diversity
calculate_diversity() calculates the proportion of each type of land use/ cover within a viewshed to get the Shannon Diversity Index.
```{r}
# load landuse raster
test_landuse <- terra::rast(system.file("test_landuse.tif",
package ="viewscape"))
```
```{r}
# the Shannon Diversity Index (SDI)
test_diversity <- viewscape::calculate_diversity(output[[1]],
test_landuse,
proportion = TRUE)
# SDI and The proportion of each type of land use
test_diversity
```
### 2.3 calculate a single feature
calculate_feature is to calculate the proportion of a feature (including trees, buildings, parking, and roads) within the viewshed. This function can be applied to
```{r}
# load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# calculate the percentage of canopy coverage
test_canopy_proportion <- viewscape::calculate_feature(viewshed = output[[1]],
feature = test_canopy,
type = 2,
exclude_value=0)
test_canopy_proportion
```
|
/scratch/gouwar.j/cran-all/cranData/viewscape/inst/doc/viewscape.Rmd
|
---
title: "viewscape"
author: "Xiaohao Yang"
date: "2023-12-18"
vignette: >
%\VignetteIndexEntry{viewscape}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
# Viewscape
This vignette provides a basic overview of the functions in R package `viewscape`.
The basic viewshed analysis can be accessed through calling the `compute_viewshed`. The two needed objects to compute the viewshed are a digital surface model (DSM) and a viewpoint.
## 1. Compute viewshed
```{r}
library(viewscape)
```
### 1.1 Compute single viewshed
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
#Load in the viewpoint
test_viewpoint <- sf::read_sf(system.file("test_viewpoint.shp",
package = "viewscape"))
#Compute viewshed
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoint,
offset_viewpoint = 6)
```
```{r eval=FALSE}
# overlap viewshed on DSM
output_r <- viewscape::visualize_viewshed(output, outputtype = 'raster')
terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(output_r, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(test_viewpoint, add = TRUE, col = "blue", axes=FALSE, box=FALSE, legend = FALSE)
```
### 1.2 Compute the viewshed for multiple viewpoints
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load points (.shp file)
test_viewpoints <- sf::read_sf(system.file("test_viewpoints.shp",
package = "viewscape"))
# Compute viewsheds
output <- viewscape::compute_viewshed(dsm = test_dsm,
viewpoints = test_viewpoints,
offset_viewpoint = 6,
parallel = TRUE,
workers = 1)
```
```{r eval = FALSE}
# Use plot all viewsheds on DSM
par(mfrow=c(3,3))
for(i in 1:length(output)) {
each <- output[[i]]
raster_data <- viewscape::visualize_viewshed(each, outputtype="raster")
terra::plot(test_dsm, axes=FALSE, box=FALSE, legend = FALSE)
terra::plot(raster_data, add=TRUE, col = "red", axes=FALSE, box=FALSE, legend = FALSE)
}
```
## 2. Calculate viewscape metrics
### 2.1 Calculate the metrics of viewshed
The function of view depth analysis can calculate two different metrics: the furthest distance and standard deviation of distances. To calculate view depth, there are two needed objects: the DSM that was used to get viewshed and result from viewshed analysis.
The function of extent analysis can calculate the total area of viewshed and needs the DSM that was used to get viewshed and result from viewshed analysis.
The following function can calculate the area of ground surface and standard deviation of elevations within a viewshed. The function needs a DSM and a DEM/DTM to calculate the metrics.
```{r}
#Load in DSM
test_dsm <- terra::rast(system.file("test_dsm.tif",
package ="viewscape"))
# Load DTM
test_dtm <- terra::rast(system.file("test_dtm.tif",
package ="viewscape"))
# Load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# Load building footprints raster
test_building <- terra::rast(system.file("test_building.tif",
package ="viewscape"))
```
```{r}
# calculate metrics given the viewshed, canopy, and building footprints
test_metrics <- viewscape::calculate_viewmetrics(output[[1]],
test_dsm,
test_dtm,
list(test_canopy, test_building))
test_metrics
```
### 2.2 Calculate land use/cover diversity
calculate_diversity() calculates the proportion of each type of land use/ cover within a viewshed to get the Shannon Diversity Index.
```{r}
# load landuse raster
test_landuse <- terra::rast(system.file("test_landuse.tif",
package ="viewscape"))
```
```{r}
# the Shannon Diversity Index (SDI)
test_diversity <- viewscape::calculate_diversity(output[[1]],
test_landuse,
proportion = TRUE)
# SDI and The proportion of each type of land use
test_diversity
```
### 2.3 calculate a single feature
calculate_feature is to calculate the proportion of a feature (including trees, buildings, parking, and roads) within the viewshed. This function can be applied to
```{r}
# load canopy raster
test_canopy <- terra::rast(system.file("test_canopy.tif",
package ="viewscape"))
# calculate the percentage of canopy coverage
test_canopy_proportion <- viewscape::calculate_feature(viewshed = output[[1]],
feature = test_canopy,
type = 2,
exclude_value=0)
test_canopy_proportion
```
|
/scratch/gouwar.j/cran-all/cranData/viewscape/vignettes/viewscape.Rmd
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Expectation-Maximization
#'
#' \code{expectation_maximization} is a method to choose hyper-parameters of the
#' linear Gaussian State-Space Model with time-invariant variances relying on the
#' Expectation-Maximization algorithm.
#'
#' E-step is realized through recursive Kalman formulae (filtering then smoothing).\cr
#' M-step is the maximization of the expected complete likelihood with respect to the
#' hyper-parameters.\cr
#' We only have the guarantee of convergence to a LOCAL optimum.
#' We fix P1 = p1 I (by default p1 = 0). We optimize theta1, sig, Q.
#'
#' @param X explanatory variables
#' @param y time series
#' @param n_iter number of iterations of the EM algorithm
#' @param Q_init initial covariance matrix on the state noise
#' @param sig_init (optional, default \code{1}) initial value of the standard deviation
#' of the observation noise
#' @param verbose (optional, default \code{1000}) frequency for prints
#' @param lambda (optional, default \code{10^-9}) regularization parameter to avoid singularity
#' @param mode_diag (optional, default \code{FALSE}) if \code{TRUE} then we restrict the
#' search to diagonal matrices for \code{Q}
#' @param p1 (optional, default \code{0}) deterministic value of \code{P1 = p1 I}
#'
#' @return a list containing values for \code{P,theta,sig,Q}, and two vectors
#' \code{DIFF, LOGLIK} assessing the convergence of the algorithm.
#' @export
#'
#' @example tests/example_em.R
expectation_maximization <- function(X, y, n_iter, Q_init, sig_init = 1, verbose = 1000,
lambda = 10^-9, mode_diag = FALSE, p1 = 0) {
n <- dim(X)[1]
d <- dim(X)[2]
sig2 <- sig_init^2
theta1 <- matrix(0,d,1)
Q <- Q_init
DIFF <- numeric(n_iter)
LOGLIK <- rep(- 0.5 * log(2 * pi), n_iter)
flag_decrease <- FALSE
init_time <- Sys.time()
for (i in 1:n_iter) {
theta_arr <- array(0,dim=c(n,d))
theta_arr2 <- array(0,dim=c(n,d))
theta <- matrix(0,d,1)
P_arr <- array(0,dim=c(n,d,d))
P <- diag(d,x=p1)
C_arr <- array(0,dim=c(n,d,d))
C_arr2 <- array(0,dim=c(n,d,d))
C <- diag(d)
##################################################################################
# E-step
# filtering
for (t in 1:n) {
theta_arr2[t,] <- theta
C_arr2[t,,] <- C
Xt <- X[t,]
LOGLIK[i] <- LOGLIK[i] - 0.5 * log(sig2 + crossprod(Xt, P %*% Xt)[1]) / n -
0.5 * (y[t] - crossprod(theta + C %*% theta1, Xt)[1])^2 /
(sig2 + crossprod(Xt, P %*% Xt)[1]) / n
P <- P - tcrossprod(P%*%Xt) / (sig2 + crossprod(Xt, P%*%Xt)[1])
P_arr[t,,] <- P
theta <- theta + P %*% Xt * (y[t] - crossprod(theta, Xt)[1]) / sig2
theta_arr[t,] <- theta
C <- (diag(d) - tcrossprod(P%*%Xt, Xt) / sig2) %*% C
C_arr[t,,] <- C
P <- P + Q
}
Pn <- P - Q
# smoothing
Q_inv <- solve(Q)
Q_new <- diag(d,x=0)
sig2_new <- crossprod(X[n,], Pn %*% X[n,])[1] / n
for (t in (n-1):1) {
Pt <- P_arr[t,,]
Pt_inv <- solve(Pt+Q)
Q_new <- Q_new + (Pn - Pn %*% Pt_inv %*% Pt - t(Pn %*% Pt_inv %*% Pt)) / (n-1)
thetat <- matrix(theta_arr[t,],d,1)
theta_arr[t,] <- thetat + Pt %*% Pt_inv %*% (theta_arr[t+1,] - thetat)
Pn <- Pt + Pt %*% Pt_inv %*% (Pn - Pt - Q) %*% Pt_inv %*% Pt
Q_new <- Q_new + Pn / (n-1)
sig2_new <- sig2_new + crossprod(X[t,], Pn %*% X[t,])[1] / n
C <- C_arr[t,,] + Pt %*% Pt_inv %*% (C - C_arr[t,,])
C_arr[t,,] <- C
}
##################################################################################
# M-step
# update theta1
A <- diag(d,x=lambda)
b <- matrix(0,d,1)
for (t in 1:n) {
Xt <- X[t,]
Ct <- C_arr2[t,,]
A <- A + tcrossprod(crossprod(Ct, Xt))
b <- b + (y[t] - crossprod(theta_arr2[t,], Xt)[1]) * crossprod(Ct, Xt)
}
theta1 <- solve(A,b)
for (t in 1:n)
theta_arr[t,] <- theta_arr[t,] + C_arr[t,,] %*% theta1
# update Q
Q_last <- Q
Q <- Q_new
for (t in 1:(n-1))
Q <- Q + tcrossprod(theta_arr[t+1,] - theta_arr[t,]) / (n-1)
Q <- (Q + t(Q)) / 2 # force symmetry to avoid approx error propagation
if (mode_diag)
Q <- diag(diag(Q))
# update sig2
sig2 <- sig2_new
for (t in 1:n)
sig2 <- sig2 + (y[t] - crossprod(theta_arr[t,], X[t,])[1])^2 / n
DIFF[i] <- sqrt(sum(diag(crossprod(Q-Q_last))) / sum(diag(crossprod(Q_last))))
if (verbose > 0) {
if (i %% verbose == 0) {
print(paste('Iteration:',i,
'| Relative variation of Q:', format(DIFF[i], scientific=TRUE, digits=3),
'| Log-likelihood:', round(LOGLIK[i], digits=6)))
flag_decrease <- FALSE
}
if (sum(sum(abs(Q-t(Q)))>10^-6)>0)
print('Q not symmetric')
if (min(eigen(Q, symmetric=TRUE)$values) < 0)
print('Q is not positive')
}
if (i > 1) {
if (LOGLIK[i] < LOGLIK[i-1] & flag_decrease == FALSE) {
warning('WARNING: log-likelihood decreased')
flag_decrease <- TRUE
}
}
}
if (verbose > 0)
print(c('Computation time of the expectation-maximization:', Sys.time()-init_time))
list(P = diag(d,x=p1), theta = theta1, Q = Q, sig = sqrt(sig2), DIFF = DIFF, LOGLIK = LOGLIK)
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/expectation-maximization.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Iterative Grid Search
#'
#' \code{iterative_grid_search} is an iterative method to choose hyper-parameters of
#' the linear Gaussian State-Space Model with time-invariant variances.
#'
#' We restrict ourselves to a diagonal matrix \code{Q} and we optimize \code{Q / sig^2} on
#' a grid. Each diagonal coefficient is assumed to belong to a pre-defined \code{q_list}.\cr
#' We maximize the log-likelihood on that region of search in an iterative fashion.
#' At each step, we change the diagonal coefficient improving the most the log-likelihood.
#' We stop when there is no possible improvement. This doesn't guarantee an optimal point
#' on the grid, but the computational time is much lower.
#'
#' @param X the explanatory variables
#' @param y the observations
#' @param q_list the possible values of \code{diag(Q) / sig^2}
#' @param Q_init (default \code{NULL}) initial value of \code{Q / sig^2},
#' if \code{NULL} it is set to 0
#' @param max_iter (optional 0) maximal number of iterations. If 0 then optimization is
#' done as long as we can improve the log-likelihood
#' @param delay (optional, default 1) to predict \code{y[t]} we have access to \code{y[t-delay]}
#' @param use (optional, default \code{NULL}) the availability variable
#' @param restrict (optional, default \code{NULL}) if not \code{NULL} it allows to specify the
#' indices of the diagonal coefficient to optimize
#' @param mode (optional, default \code{gaussian})
#' @param p1 (optional, default \code{0}) coefficient for \code{P1/sig^2 = p1 I}
#' @param ncores (optional, default \code{1}) number of available cores for computation
#' @param train_theta1 (optional, default \code{NULL}) training set for estimation of \code{theta1}
#' @param train_Q (optional, default \code{NULL}) time steps on which the log-likelihood is computed
#' @param verbose (optional, default \code{TRUE}) whether to print intermediate progress
#'
#' @return a list containing values for \code{P,theta,sig,Q}, as well as \code{LOGLIK},
#' the evolution of the log-likelihood during the search.
#' @export
#'
#' @example tests/example_igd.R
iterative_grid_search <- function(X, y, q_list, Q_init = NULL, max_iter = 0, delay = 1,
use = NULL, restrict = NULL, mode = 'gaussian', p1 = 0,
ncores = 1, train_theta1 = NULL, train_Q = NULL, verbose=TRUE) {
n <- dim(X)[1]
d <- dim(X)[2]
train_theta1 <- filter_null(train_theta1, 1:n)
train_Q <- filter_null(train_Q, 1:n)
Qstar <- if(is.null(Q_init)) diag(d,x=0) else diag(diag(Q_init))
use <- filter_null(use, sapply(1:n - delay, function(x) max(x,0)))
search_dimensions <- filter_null(restrict, 1:d)
b <- TRUE
l_opt <- loglik(X, y, Qstar, use, p1, train_theta1, train_Q, mode = mode)
n_iter <- 0
LOGLIK <- c()
init_time <- Sys.time()
while (b) {
n_iter <- n_iter + 1
b <- n_iter < max_iter | max_iter == 0
i <- -1
q_i <- 0
for (k in search_dimensions) {
q_prev <- Qstar[k,k]
l_arr <- unlist(parallel::mclapply(q_list,function(q) {
Qstar[k,k] <- q
loglik(X, y, Qstar, use, p1, train_theta1, train_Q, mode = mode)
}, mc.cores=ncores))
if (max(l_arr) > l_opt) {
l_opt <- max(l_arr)
i <- k
q_i <- q_list[which.max(l_arr)]
}
Qstar[k,k] <- q_prev
}
LOGLIK <- c(LOGLIK, l_opt)
if (i == -1)
b <- FALSE
else {
Qstar[i,i] <- q_i
if (verbose) {
if (q_i %in% c(min(q_list),max(q_list)))
warning(c('Values may not be suited',q_i))
print(paste('Iteration:', n_iter,
' | Log-likelihood:', round(l_opt, digits=6),
' | Diagonal of Q/sigma^2:'))
print(diag(Qstar))
}
}
}
par <- parameters_star(X, y, Qstar, p1)
sig <- get_sig(X, y, par, Qstar, use)
if (verbose)
print(paste('Computation time of the iterative grid search:',
format(difftime(Sys.time(),init_time))))
list(theta = get_theta1(X, y, par, Qstar, use, mode = mode), P = diag(d,x=p1*sig^2),
sig = sig, Q = Qstar*sig^2, LOGLIK = LOGLIK)
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/iterative_grid_search.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' @title Kalman Filtering
#' @description Compute the filtered estimation of the parameters \code{theta} and \code{P}.
#'
#' @param X the explanatory variables
#' @param y the time series
#' @param theta1 initial \code{theta}
#' @param P1 initial \code{P}
#' @param Q (optional, default \code{0}) covariance matrix of the state noise
#' @param sig (optional, default \code{1}) variance of the spate noise
#'
#' @return a list containing \code{theta_arr} and \code{P_arr}, the filtered estimation of
#' the parameters \code{theta} and \code{P}.
#' @export
kalman_filtering <- function(X, y, theta1, P1, Q = 0, sig = 1) {
n <- nrow(X)
d <- ncol(X)
theta_arr <- array(0,dim=c(n,d))
theta <- theta1
P_arr <- array(0,dim=c(n,d,d))
P <- P1
# filtering
for (t in 1:n) {
theta_arr[t,] <- theta
P_arr[t,,] <- P
Xt <- X[t,]
if (sum(is.na(c(X[t,],y[t]))) == 0) {
P <- P - tcrossprod(P %*% Xt) / (sig^2 + crossprod(Xt, P %*% Xt)[1])
theta <- theta + P %*% Xt * (y[t] - crossprod(theta, Xt)[1]) / sig^2
P <- P + Q
}
}
list(theta_arr = theta_arr, P_arr = P_arr, theta = theta, P = P)
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/kalman_filtering.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' @title Kalman Smoothing
#' @description Compute the smoothed estimation of the parameters \code{theta} and \code{P}.
#'
#' @param X the explanatory variables
#' @param y the time series
#' @param theta1 initial \code{theta}
#' @param P1 initial \code{P}
#' @param Q (optional, default \code{0}) covariance matrix of the state noise
#' @param sig (optional, default \code{1}) variance of the spate noise
#'
#' @return a list containing \code{theta_arr} and \code{P_arr}, the smoothed estimation of
#' the parameters \code{theta} and \code{P}.
#' @export
kalman_smoothing <- function(X, y, theta1, P1, Q=0, sig=1) {
n <- nrow(X)
d <- ncol(X)
theta_arr <- array(0,dim=c(n,d))
theta <- theta1
P_arr <- array(0,dim=c(n,d,d))
P <- P1
# filtering
for (t in 1:n) {
Xt <- X[t,]
if (sum(is.na(c(X[t,],y[t]))) == 0)
P <- P - tcrossprod(P %*% Xt) / (sig^2 + crossprod(Xt, P %*% Xt)[1])
theta_arr[t,] <- theta
P_arr[t,,] <- P
if (sum(is.na(c(X[t,],y[t]))) == 0) {
theta <- theta + P %*% Xt * (y[t] - crossprod(theta, Xt)[1]) / sig^2
P <- P + Q
}
}
Pn <- P - Q
# smoothing
for (t in (n-1):1) {
Pt <- P_arr[t,,]
P_inv <- solve(Pt+Q)
thetat <- matrix(theta_arr[t,],d,1)
theta_arr[t,] <- thetat + Pt %*% P_inv %*% (theta_arr[t+1,] - thetat)
Pn <- Pt + Pt %*% P_inv %*% (Pn - Pt - Q) %*% P_inv %*% Pt
P_arr[t,,] <- Pn
}
list(theta_arr = theta_arr, P_arr = P_arr)
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/kalman_smoothing.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
parameters_star <- function(X, y, Qstar, p1 = 0) {
n <- dim(X)[1]
d <- dim(X)[2]
thetastar <- matrix(0,d,1)
Pstar <- diag(d,x=p1)
C <- diag(d,x=1)
thetastar_arr <- array(0,dim=c(n+1,d))
Pstar_arr <- array(0,dim=c(n+1,d,d))
C_arr <- array(0,dim=c(n+1,d,d))
C_arr[1,,] <- C
for (t in 1:n) {
Xt <- X[t,]
err <- y[t] - crossprod(thetastar, Xt)[1]
inv <- 1 / (1 + crossprod(Xt, Pstar %*% Xt)[1])
thetastar_new <- thetastar + Pstar %*% Xt * inv * err
Pstar_new <- Pstar + Qstar - tcrossprod(Pstar %*% Xt) * inv
C_new <- (diag(d) - tcrossprod(Pstar %*% Xt, Xt) * inv) %*% C
thetastar <- thetastar_new
Pstar <- Pstar_new
C <- C_new
thetastar_arr[t+1,] <- thetastar
Pstar_arr[t+1,,] <- Pstar
C_arr[t+1,,] <- C
}
list(thetastar_arr=thetastar_arr, Pstar_arr=Pstar_arr, C_arr=C_arr)
}
get_theta1 <- function(X, y, par, Qstar, use, mode = 'gaussian') {
n <- dim(X)[1]
d <- dim(X)[2]
##########################################################
A <- diag(d,x=0)
b <- matrix(0,d,1)
for (t in 1:n) {
Xt <- X[t,]
err <- y[t] - crossprod(par$thetastar_arr[use[t]+1,], Xt)[1]
inv <- 1
if (mode == 'gaussian') {
inv <- 1 / (1 + crossprod(Xt, (par$Pstar_arr[use[t]+1,,] +
max(0,t-use[t]-1) * Qstar) %*% Xt)[1])
}
A <- A + tcrossprod(crossprod(par$C_arr[use[t]+1,,],Xt)) * inv
b <- b + crossprod(par$C_arr[use[t]+1,,],Xt) * err * inv
}
solve(A,b)
}
get_sig <- function(X, y, par, Qstar, use) {
n <- dim(X)[1]
d <- dim(X)[2]
theta1 <- get_theta1(X, y, par, Qstar, use)
sig2 <- 0
for (t in 1:n) {
Xt <- matrix(X[t,],d,1)
P <- par$Pstar_arr[use[t]+1,,] + max(t-use[t]-1,0) * Qstar
sig2 <- sig2 + (y[t] - crossprod(par$thetastar_arr[use[t]+1,] +
par$C_arr[use[t]+1,,]%*%theta1,Xt)[1])^2 /
(1 + crossprod(Xt, P%*%Xt)[1]) / n
}
sqrt(sig2)
}
#' Log-likelihood
#'
#' \code{loglik} computes the log-likelihood of a state-space model of specified
#' \code{Q/sig^2, P1/sig^2, theta1}.
#'
#' @param X explanatory variables
#' @param y time series
#' @param Qstar the ratio \code{Q/sig^2}
#' @param use the availability variable
#' @param p1 coefficient for \code{P1/sig^2 = p1 I}
#' @param train_theta1 training set for estimation of \code{theta1}
#' @param train_Q time steps on which the log-likelihood is computed
#' @param mode (optional, default \code{gaussian})
#'
#' @return a numeric value for the log-likelihood.
#' @export
loglik <- function(X, y, Qstar, use, p1, train_theta1, train_Q, mode = 'gaussian') {
par <- parameters_star(X, y, Qstar, p1)
theta1 <- get_theta1(X[train_theta1,], y[train_theta1], par, Qstar, use,
mode = mode)
sum1 <- 0
sum2 <- 0
n <- length(train_Q)
for (t in train_Q) {
Xt <- X[t,]
P <- par$Pstar_arr[use[t]+1,,] + max(0,t-use[t]-1) * Qstar
sum1 <- sum1 + log(1 + crossprod(Xt, P%*%Xt)[1]) / n
err2 <- (y[t] - crossprod(par$thetastar_arr[use[t]+1,] +
par$C_arr[use[t]+1,,]%*%theta1,Xt)[1])^2
if (mode == 'gaussian')
sum2 <- sum2 + err2 / (1 + crossprod(Xt, P%*%Xt)[1]) / n
else if (mode == 'rmse')
sum2 <- sum2 + err2 / n
}
if (mode == 'gaussian')
return(- 0.5 * sum1 - 0.5 - 0.5 * log(2 * pi * sum2))
else if (mode == 'rmse')
return(- sqrt(sum2))
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/loglik.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Plot a statespace object
#'
#' \code{plot.statespace} displays different graphs expressing the behavior of the state-space
#' model:\cr
#' 1. Evolution of the Bias: rolling version of the error of the model.\cr
#' 2. Evolution of the RMSE: root-mean-square-error computed on a rolling window.\cr
#' 3. State Evolution: time-varying state coefficients, subtracted of the initial state vector.\cr
#' 4. Normal Q-Q Plot: we check if the observation follows the Gaussian distribution of estimated
#' mean and variance. To that end, we display a Q-Q plot of the residual divided by the estimated
#' standard deviation, against the standard normal distribution.\cr
#'
#' @param x the statespace object.
#' @param pause (default \code{FALSE}) if set to \code{FALSE} then the plots are displayed on a single
#' page, otherwise a new page is created for each plot.
#' @param window_size (default \code{7}) the window size of the rolling mean computed on the
#' error to display the bias, and on the mean squared error to display a rolling RMSE.
#' @param date (default \code{NULL}) defines the values for the x-axis.
#' @param sel (default \code{NULL}) defines a subset of the data on which we zoom.
#' For instance one can display the evolution of the SSM on a test set and not the whole data set.
#' @param ... additional parameters
#'
#' @importFrom graphics axis box legend lines mtext par plot points text
#' @importFrom stats qqnorm sd
#'
#' @return No return value, called to display plots.
#'
#' @export
plot.statespace <- function(x, pause = FALSE, window_size = 7, date = NULL, sel = NULL, ...) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
if (pause) par(ask = TRUE, mfrow = c(1,1))
else par(ask = FALSE, mfrow = c(3,2), mai=c(0.4,0.4,0.4,0.4))
if (is.null(date)) date <- 1:length(x$y)
if (is.null(sel)) sel <- 1:length(x$y)
# 1. Evolution of the bias
res <- (x$y - x$pred_mean)[sel]
th_fixed <- if (is.null(x$vik)) x$kf$theta_arr[sel[1],] else x$vik$theta_arr[sel[1],]
res_fixed <- x$y[sel] - x$X[sel,] %*% th_fixed
date_sel <- date[sel]
lfixed <- sapply(window_size:length(res), function(t) {mean(res_fixed[(t-window_size+1):t], na.rm=T)})
l <- sapply(window_size:length(res), function(t) {mean(res[(t-window_size+1):t], na.rm=T)})
plot(date_sel[window_size:length(res)], lfixed, type='l', ylim=range(c(lfixed, l), na.rm=T),
main='Evolution of the Bias', ylab='', xlab='', lwd=2)
lines(date_sel[window_size:length(res)], l, col='darkgreen', lwd=2)
mtext('SSM', side = 4, at = l[length(l)], col='darkgreen', las=2, cex=0.5*(1+pause), line=0.3)
mtext('Fixed', side = 4, at = lfixed[length(lfixed)], col='black', las=2, cex=0.5*(1+pause), line=0.3)
# 2. Evolution of the RMSE
lfixed <- sapply(window_size:length(res), function(t) {sqrt(mean(res_fixed[(t-window_size+1):t]^2, na.rm=T))})
l <- sapply(window_size:length(res), function(t) {sqrt(mean(res[(t-window_size+1):t]^2, na.rm=T))})
plot(date_sel[window_size:length(res)], lfixed, ylim=c(0,max(c(lfixed, l), na.rm=T)), type='l',
ylab='', xlab='', lwd=2,
main=if (pause) 'Evolution of the Root-Mean-Square-Error' else 'Evolution of the RMSE')
lines(date_sel[window_size:length(res)], l, col='darkgreen', lwd=2)
mtext('SSM', side=4, at=l[length(l)], col='darkgreen', las=2, cex=0.5*(1+pause), line=0.3)
mtext('Fixed', side=4, at=lfixed[length(lfixed)], col='black', las=2, cex=0.5*(1+pause), line=0.3)
# 3. Evolution of the state coefficients
# Kalman Filtering
plot_evol <- function(evol, title, coln = NULL, logpar='') {
c <- RColorBrewer::brewer.pal(max(ncol(evol), 11), name = "Spectral")[sapply(1:ncol(evol), function(i) min(i,11))]
plot(date_sel, evol[,1], type='l', ylim=range(evol), col=c[1],
main=title, ylab='', xlab='', lwd=2, log=logpar)
for (j in 2:ncol(evol))
lines(date_sel, evol[,j], col=c[j], lwd=2)
if (!is.null(coln))
mtext(side=4, text=coln, at=evol[nrow(evol),], las=2, col=c, cex=0.5, line=0.3)
}
if (!is.null(x$kf)) {
evoltheta <- x$kf$theta_arr[sel,]
evoltheta <- evoltheta - matrix(rep(evoltheta[1,], nrow(evoltheta)), nrow(evoltheta),
ncol(evoltheta), byrow=TRUE)
plot_evol(evoltheta, 'State Evolution: Kalman Filtering', coln = colnames(x$X))
}
# Kalman Smoothing
if (!is.null(x$ks)) {
evoltheta <- x$ks$theta_arr[sel,]
evoltheta <- evoltheta - matrix(rep(evoltheta[1,], nrow(evoltheta)), nrow(evoltheta),
ncol(evoltheta), byrow=TRUE)
plot_evol(evoltheta, 'State Evolution: Kalman Smoothing', coln = colnames(x$X))
}
# Viking
if (!is.null(x$vik)) {
evoltheta <- x$vik$theta_arr[sel,]
evoltheta <- evoltheta - matrix(rep(evoltheta[1,], nrow(evoltheta)), nrow(evoltheta),
ncol(evoltheta), byrow=TRUE)
plot_evol(evoltheta, 'State Evolution: Viking', coln = colnames(x$X))
}
# 4. Viking: evolution of the variances
if (!is.null(x$vik)) {
plot(date_sel, exp(x$vik$hata_arr[sel] + x$vik$s_arr[sel] / 2), type='l', lwd=2,
main='Evolution of the observation noise variance', ylab='', xlab='')
plot_evol(x$vik$q_arr[sel,], 'Evolution of the state noise covariance matrix',
coln = colnames(x$X), logpar = 'y')
}
# 5. Plot the diagonal of Q
if (!is.null(x$kf)) {
diagQ <- diag(x$kalman_params$Q)
# diagQ[diagQ == 0] <- NA
c <- RColorBrewer::brewer.pal(max(length(diagQ), 11), name = "Spectral")[sapply(1:length(diagQ), function(i) min(i,11))]
plot(c(1:length(diagQ)), diagQ, type='h', xlab='', ylab='', col=c, axes=FALSE,
main='Diagonal of Q', ylim=c(0,max(diagQ)*1.4))
text(c(1:length(diagQ)), diagQ, labels=format(diagQ, scientific=TRUE, digits=2),
srt=90, adj=c(-0.15,0.5), col=c)
points(c(1:length(diagQ)), diagQ, pch=20, col=c)
axis(2)
box()
mtext(side=1, text=colnames(x$X), at=1:length(diagQ), las=2, col=c, cex=0.5)
}
# 6. Check of the Gaussian assumption
qqnorm(res / x$pred_sd[sel], xlab='', ylab='', main='Normal Q-Q Plot of the Residuals')
lines(-4:4, -4:4, col='darkred', lwd=2)
lines(-4:4, -4:4 * sd(res / x$pred_sd[sel]), col='darkgreen', lwd=2)
legend('topleft', legend=c('Standard Normal', 'Best Constant'), col=c('darkred','darkgreen'),
lty=1, lwd=2)
par(mfrow = c(1,1))
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/plot-statespace.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Predict using a statespace object
#'
#' \code{predict.statespace} makes a prediction for a statespace object, in the offline or online
#' setting.
#'
#' @param object the statespace object
#' @param newX the design matrix in the prediction set
#' @param newy (default \code{NULL}) the variable of interest in the prediction set. If specified
#' it allows to use the state-space model in the online setting. Otherwise the prediction is
#' offline.
#' @param online (default \code{TRUE}) specifies if the prediction is made online, that is if
#' the observation at time t-1 is used to update the model before predicting at time t.
#' @param compute_smooth (default \code{FALSE}) specifies if Kalman Smoothing is also computed.
#' @param type type of prediction. Can be either
#' \describe{
#' \item{mean}{return the mean forecast.}
#' \item{proba}{return a probabilistic forecast (list containing estimation of the mean and
#' standard deviation).}
#' \item{model}{return the updated statespace object (containing also the forecasts).}
#' }
#' @param ... additional parameters
#'
#' @return Depending on the type specified, the result is \cr
#' - a vector of mean forecast if \code{type='mean'}
#' - a list of two vectors, mean forecast and standard deviations if \code{type='proba'}
#' - a statespace object if \code{type='model'}
#'
#' @export
predict.statespace <- function(object, newX, newy = NULL, online = TRUE, compute_smooth = FALSE,
type = c('mean', 'proba', 'model'), ...) {
type <- match.arg(type)
object_new <- object
X <- if (is.null(ncol(newX))) matrix(newX, length(newX), 1) else newX
object_new$X <- X
object_new$y <- newy
if (online) {
if (is.null(newy))
stop('newy must be given when online = TRUE')
if (is.null(object$viking_params)) {
if (compute_smooth) {
object_new$ks <- kalman_smoothing(X, newy, object$kalman_params$theta, object$kalman_params$P,
Q = object$kalman_params$Q, sig = object$kalman_params$sig)
}
object_new$kf <- kalman_filtering(X, newy, object$kalman_params$theta, object$kalman_params$P,
Q = object$kalman_params$Q, sig = object$kalman_params$sig)
object_new$kalman_params$theta <- object_new$kf$theta
object_new$kalman_params$P <- object_new$kf$P
object_new$pred_mean <- sapply(1:nrow(X), function(t) {crossprod(object_new$kf$theta_arr[t,], X[t,])[1]})
object_new$pred_sd <- sapply(1:nrow(X), function(t) {
sqrt(crossprod(X[t,], object_new$kf$P_arr[t,,] %*% X[t,]) + object_new$kalman_params$sig^2)
})
} else {
object_new$vik <- do.call(viking, c(list(X = newX, y = newy), object$viking_params))
object_new$pred_mean <- sapply(1:nrow(X), function(t) {crossprod(object_new$vik$theta_arr[t,], X[t,])[1]})
object_new$pred_sd <- sqrt(sapply(1:nrow(X), function(t) {
crossprod(X[t,], object_new$vik$P_arr[t,,] %*% X[t,])
}) + exp(object_new$vik$hata + object_new$vik$s / 2) )
}
} else {
theta <- if (is.null(object$viking_params)) object$kalman_params$theta else object$viking_params$theta
object_new$pred_mean <- sapply(1:nrow(X), function(t) {
crossprod(X[t,], theta)
})
P <- if (is.null(object$viking_params)) object$kalman_params$P else object$viking_params$P
object_new$pred_sd <- sapply(1:nrow(X), function(t) {
sqrt(crossprod(X[t,], (P + (t-1) * object$kalman_params$Q) %*% X[t,]) + object$kalman_params$sig^2)
})
}
switch(type, mean = object_new$pred_mean,
proba = list(pred_mean = object_new$pred_mean, pred_sd = object_new$pred_sd),
model = object_new)
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/predict-statespace.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Select time-invariant variances of a State-Space Model
#'
#' \code{select_Kalman_variances} is a function to choose hyper-parameters of the
#' linear Gaussian State-Space Model with time-invariant variances. It relies on the
#' functions \code{iterative_grid_search} and \code{expectation_maximization}.
#'
#' @param ssm the statespace object
#' @param X explanatory variables
#' @param y time series
#' @param method (optional, default \code{'igd'}) it can be either
#' \describe{
#' \item{\code{'igd'}}{\code{iterative_grid_search} is called}
#' \item{\code{'em'}}{\code{expectation_maximization} is called}
#' }
#' @param ... additional parameters
#'
#' @return a new statespace object with new values in \code{kalman_params}
#' @export
select_Kalman_variances <- function(ssm, X, y, method = 'igd', ...) {
### 1. Estimation of the hyper-parameters
if (method == 'igd')
l <- iterative_grid_search(X, y, ...)
else if (method == 'em')
l <- expectation_maximization(X, y, ...)
else
stop("Selection method is not recognized. It should be either 'igd' or 'em'.")
### 2. Creation of the new model
ssm_new <- ssm
ssm_new$kalman_params <- l
ssm_new$kalman_params$opt_Kalman_call <- match.call()
# ### 3. Compute the state-space inference for designed parameters
# # Kalman Filtering and Smoothing
# ssm_new <- predict(ssm_new, X, newy = y, type = 'model')
ssm_new
}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/select_Kalman_variances-statespace.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Design a State-Space Model
#'
#' The function \code{statespace} builds a state-space model, with known or unknown variances.
#' By default, this function builds a state-space model in the static setting, with a constant
#' state (zero state noise covariance matrix) and constant observation noise variance.
#'
#' @param X design matrix.
#' @param y variable of interest.
#' @param kalman_params (default \code{NULL}) list containing initial values for \code{theta,P}
#' as well as the variances (\code{Q,sig}). If it is not specified, the state-space model is
#' constructed in the static setting (\code{theta=0, P=I, Q=0, sig=1}).
#' @param viking_params (default \code{NULL}) list of parameters for the Viking algorithm.
#' @param ... additional parameters
#'
#' @return a statespace object.
#'
#' @importFrom stats predict
#' @export
#'
#' @example tests/example_statespace.R
statespace <- function(X, y, kalman_params = NULL, viking_params = NULL, ...) {
### Create state-space object
d <- if (is.null(ncol(X))) 1 else ncol(X)
kp <- list(theta = filter_null(kalman_params$theta, matrix(0,d,1)),
P = filter_null(kalman_params$P, diag(d)),
Q = filter_null(kalman_params$Q, diag(d, x=0)),
sig = filter_null(kalman_params$sig, 1),
opt_Kalman_call = kalman_params$opt_Kalman_call)
vp <- viking_params
if (!is.null(viking_params)) {
vp$theta <- filter_null(vp$theta, matrix(0,d,1))
vp$P <- filter_null(vp$P, diag(d))
vp$hata <- filter_null(vp$hata, 0)
vp$s <- filter_null(vp$s, 0)
vp$mode <- filter_null(viking_params$mode, 'diagonal')
vp$hatb <- filter_null(vp$hatb, if (viking_params$mode == 'scalar') 0 else matrix(0, d, 1))
vp$Sigma <- filter_null(vp$Sigma, if (viking_params$mode == 'scalar') 0 else diag(d, x=0))
}
ssm <- list(kalman_params = kp, viking_params = vp)
class(ssm) <- 'statespace'
### Compute the state-space inference for designed parameters
# Using either Kalman Filtering and Smoothing or Viking
ssm <- predict(ssm, X, newy = y, type = 'model', ...)
ssm
}
filter_null <- function(x, y) {if (is.null(x)) y else x}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/statespace.R
|
# Copyright 2019-2022 EDF, Sorbonne Université and CNRS.
# Author : Joseph de Vilmarest (EDF, Sorbonne Université)
# The package Viking is distributed under the terms of the license GNU LGPL 3.
#' Viking: Variational bayesIan variance tracKING
#'
#' \code{viking} is the state-space estimation realized by Viking,
#' generalizing the Kalman Filter to variance uncertainty.
#'
#' @param X the explanatory variables
#' @param y the time series
#' @param theta0 initial \code{theta}
#' @param P0 initial \code{P}
#' @param hata0 initial \code{hata}
#' @param s0 initial \code{s}
#' @param hatb0 initial \code{hatb}
#' @param Sigma0 initial \code{Sigma}
#' @param n_iter (optional, default \code{2}) number of alternate steps
#' @param mc (optional, default \code{10}) number of Monte-Carlo samples
#' @param rho_a (optional, default \code{0}) learning rate of \code{a}
#' @param rho_b (optional, default \code{0}) learning rate of \code{b}
#' @param learn_sigma (optional, default \code{TRUE}) asserts the estimation of \code{a}
#' @param learn_Q (optional, default \code{TRUE}) asserts the estimation of \code{b}
#' @param K (optional, default \code{NULL}) if not \code{NULL} then it is a multiplicative
#' factor of the state in the state update
#' @param mode (optional, default \code{'diagonal'})
#' @param thresh (optional, default \code{TRUE})
#' @param phi (optional, default \code{logt})
#' @param phi1 (optional, default \code{logt1})
#' @param phi2 (optional, default \code{logt2})
#'
#' @importFrom stats rnorm
#'
#' @return a list composed of the evolving value of all the parameters:
#' \code{theta_arr, P_arr, q_arr, hata_arr, s_arr, hatb_arr, Sigma_arr}
#'
#' @references J. de Vilmarest, O. Wintenberger (2021), Viking: Variational Bayesian Variance
#' Tracking. <arXiv:2104.10777>
#'
#' @export
viking <- function(X, y, theta0, P0, hata0, s0, hatb0, Sigma0, n_iter = 2, mc = 10,
rho_a = 0, rho_b = 0, learn_sigma = TRUE, learn_Q = TRUE, K = NULL,
mode = 'diagonal', thresh = TRUE,
phi = logt, phi1 = logt1, phi2 = logt2) {
n <- dim(X)[1]
d <- dim(X)[2]
if (is.null(K))
K <- diag(d)
q_arr <- matrix(0,n,d)
theta_arr <- matrix(0,n,d)
P_arr <- array(0,dim=c(n,d,d))
hata_arr <- numeric(n)
s_arr <- numeric(n)
hatb_arr <- matrix(0,n,d)
Sigma_arr <- array(0,dim=c(n,d,d))
theta <- theta0
P <- P0
hata <- hata0
s <- s0
hatb <- hatb0
Sigma <- Sigma0
for (t in 1:n) {
hata_new <- hata
s_new <- s + rho_a
hatb_new <- hatb
Sigma_new <- Sigma + rho_b * (if (mode == 'scalar') 1 else diag(d))
if (mode == 'scalar')
q_arr[t,] <- mean(phi( rnorm(10^3, mean=hatb_new, sd=Sigma_new) ))
else if (mode == 'diagonal')
q_arr[t,] <- colMeans(phi( mvtnorm::rmvnorm(10^3, mean=hatb_new, sigma=Sigma_new) ))
theta_arr[t,] <- K %*% theta
P_arr[t,,] <- K %*% P %*% t(K) + diag(q_arr[t,])
hata_arr[t] <- hata_new
s_arr[t] <- s_new
hatb_arr[t,] <- hatb_new
Sigma_arr[t,,] <- Sigma_new
for (iter in 1:n_iter) {
if (mode == 'scalar') {
if (mc > 0) {
bMC <- rnorm(mc, mean=hatb_new, sd=Sigma_new)
A <- diag(d,x=0)
for (b in bMC)
A <- A + solve(K %*% P %*% t(K) + diag(d) * phi(b)) / length(bMC)
} else {
C_inv <- solve(K %*% P %*% t(K) + phi(hatb_new) * diag(d))
A <- C_inv - phi2(hatb_new) * Sigma_new * C_inv %*% C_inv / 2 +
phi1(hatb_new)^2 * Sigma_new * C_inv %*% C_inv %*% C_inv
}
}
else if (mode == 'diagonal') {
if (mc > 0) {
bMC <- mvtnorm::rmvnorm(mc, mean=hatb_new, sigma=Sigma_new)
A <- diag(d,x=0)
for (i in 1:nrow(bMC))
A <- A + solve(K %*% P %*% t(K) + diag(c( phi(bMC[i,]) ))) / nrow(bMC)
} else {
C_inv <- solve(K %*% P %*% t(K) + diag(c( phi(hatb_new) )))
A <- C_inv - C_inv %*% diag(c( phi2(hatb_new) * diag(Sigma_new) )) %*% C_inv / 2 +
C_inv %*% (C_inv * tcrossprod(phi1(hatb_new)) * Sigma_new) %*% C_inv
}
}
A_inv <- solve((A+t(A))/2)
# update of theta and P
v <- exp(hata_new - s_new/2)
P_new <- A_inv - tcrossprod(A_inv %*% X[t,]) / (crossprod(X[t,], A_inv %*% X[t,])[1] + v)
theta_new <- K %*% theta + P_new %*% X[t,] / v * (y[t] - crossprod(K %*% theta, X[t,])[1])
# update of hata and s
if (learn_sigma) {
c <- (y[t] - crossprod(theta_new, X[t,])[1])^2 + crossprod(X[t,], P_new %*% X[t,])[1]
s_new <- ((s + rho_a)^-1 + 0.5 * c * exp(-hata_new))^-1
Ma <- 3 * s
diff <- 0.5 * ((s + rho_a)^-1 + 0.5 * c * exp(- hata + s_new / 2 + Ma))^-1 *
(c * exp(- hata + s_new / 2) - 1)
hata_new <- hata + max(min(diff, Ma), -Ma)
}
# update of hatb and Sigma
if (learn_Q) {
B <- P_new + tcrossprod(theta_new - K %*% theta)
if (mode == 'scalar') {
C_inv <- solve(K %*% P %*% t(K) + phi(hatb) * diag(d))
g <- sum(diag( C_inv %*% (diag(d) - B %*% C_inv) )) * phi1(hatb)
H <- - sum(diag(C_inv %*% B %*% C_inv)) * phi2(hatb) +
2 * sum(diag(C_inv %*% C_inv %*% B %*% C_inv)) * phi1(hatb)^2
Sigma_new <- ((Sigma + rho_b)^-1 + H / 2)^-1
hatb_new <- hatb - Sigma_new * g / 2
} else if (mode == 'diagonal') {
C_inv <- solve(K %*% P %*% t(K) + diag(c( phi(hatb) )))
g <- diag( C_inv %*% (diag(d) - B %*% C_inv) ) * phi1(hatb)
H <- - (C_inv %*% B %*% C_inv %*% diag(c( phi2(hatb) ))) * diag(d) +
2 * (C_inv %*% B %*% C_inv) * C_inv * tcrossprod(phi1(hatb))
Sigma_new <- solve(solve(Sigma + rho_b * diag(d)) + H / 2)
hatb_new <- hatb - Sigma_new %*% g / 2
}
if (thresh)
hatb_new <- (hatb_new + abs(hatb_new)) / 2
}
}
P <- P_new
theta <- theta_new
hata <- hata_new
s <- s_new
hatb <- hatb_new
Sigma <- Sigma_new
}
list(theta_arr=theta_arr, P_arr=P_arr, q_arr=q_arr, hata_arr=hata_arr, s_arr=s_arr,
hatb_arr=hatb_arr, Sigma_arr=Sigma_arr)
}
logt <- function(b) {log(1 + (b+abs(b)) / 2)}
logt1 <- function(b) {1 / (1 + (b+abs(b)) / 2) * (b >= 0)}
logt2 <- function(b) {-1 / (1 + (b+abs(b)) / 2)^2 * (b >= 0)}
|
/scratch/gouwar.j/cran-all/cranData/viking/R/viking.R
|
#' @title simulation
#' @docType class
#' @description Advances one or more villages through time
#' @field length The total number of time steps that the simulation runs for
#' @field villages A list of villages that the simulator will run
#' @field writer An instance of a data_writer class for writing village data to disk
#' @export
#' @section Methods:
#' \describe{
#' \item{\code{run_model()}}{Runs the simulation}
#' }
simulation <- R6::R6Class("simulation",
public = list(
length = NA,
villages = NA,
writer = NA,
#' Creates a new Simulation instance
#'
#' @description Creates a new simulation object to control the experiment
#' @param length The number of steps the simulation takes
#' @param villages A list of villages that will be simulated
#' @param writer The data writer to be used with the villages
initialize = function(length,
villages,
writer = villager::data_writer$new()) {
self$villages <- villages
self$length <- length
self$writer <- writer
},
#' Runs the simulation
#'
#' @return None
run_model = function() {
for (village in self$villages) {
village$set_initial_state()
}
# Loop over each village and run the user defined initial condition function. Index off of 1 because the
# initial condition is set at 0
current_step <- 1
while (current_step <= self$length) {
# Iterate the villages a single time step
for (village in self$villages) {
village$propagate(current_step)
self$writer$write(village$current_state, village$name)
}
current_step <- current_step + 1
}
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/Simulation.R
|
#' @export
#' @title Data Writer
#' @docType class
#' @description A class responsible for the simulation data to disk.
#' @details This class can be subclasses to provide advanced data writing to other data sources. This should also
#' be subclassed if the winik and resource classes are subclasses, to write any addional fields to the data source.
#' @field results_directory The folder where the simulation resulst are written to
#' @field winik_filename The location where the winiks are written to
#' @field resource_filename The location where the resources are written to
#' @section Methods:
#' \describe{
#' \item{\code{write()}}{Writes the winik and resources to disk.}
#' }
data_writer <- R6::R6Class(
"data_writer",
public = list(
results_directory = NULL,
winik_filename = NULL,
resource_filename = NULL,
#' Create a new data writer.
#'
#' @description Creates a new data writer object that has optional paths for data files.
#' @param results_directory The directory where the results file is written to
#' @param winik_filename The name of the file for the winik data
#' @param resource_filename The name of the file for the resource data
#' @return A new winik object
initialize = function(results_directory = "results",
winik_filename = "winiks.csv",
resource_filename = "resources.csv") {
self$results_directory <- results_directory
self$winik_filename <- winik_filename
self$resource_filename <- resource_filename
# Check that the directory exists, delete it if it does
res_folder <- file.path(self$results_directory)
if (file.exists(res_folder)) {
unlink(res_folder, recursive = TRUE)
}
dir.create(res_folder, recursive = TRUE)
},
#' Writes a village's state to disk.
#'
#' @description Takes a state an the name of a village and writes the winiks and resources to disk
#' @param state The village's village_state that's being written
#' @param village_name The name of the village. This is used to create the data directory
#' @return None
write = function(state, village_name) {
# Check that the village_name folder where the csv files are written to exists; create it if it doesn't
res_folder <- file.path(self$results_directory, village_name)
if (!file.exists(res_folder)) {
dir.create(res_folder, recursive = TRUE)
}
# Write the winiks to disk
winik_path <- file.path(res_folder, self$winik_filename)
append <- TRUE
col_names <- FALSE
if (!file.exists(winik_path)) {
file.create(winik_path, recursive = TRUE)
append <- FALSE
col_names <- TRUE
}
readr::write_csv(state$winik_states,
file = winik_path,
na = "NA",
append = append,
col_names = col_names)
# Write the resources
append <- TRUE
col_names <- FALSE
resources_path <- file.path(res_folder, self$resource_filename)
if (!file.exists(resources_path)) {
file.create(resources_path, recursive = TRUE)
append <- FALSE
col_names <- TRUE
}
readr::write_csv(state$resource_states,
file = resources_path,
na = "NA",
append = append,
col_names = col_names)
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/data_writer.R
|
#' R6 Class representing data that's external from resources and winiks
#'
#' It contains a single variable, 'events' for when the data holds a list of events
model_data <- R6::R6Class("model_data",
public = list(
#' @field events Any events that need to be tracked
events = NULL,
#' @description Creates a new model_data object
#' @return A new model data object
initialize = function() {
self$events <- list()
}
))
|
/scratch/gouwar.j/cran-all/cranData/villager/R/model_data.R
|
#' @title resource
#' @docType class
#' @description This is an object that represents a single resource.
#' @field name The name of the resource
#' @field quantity The quantity of the resource that exists
#' @export
#' @section Methods:
#' \describe{
#' \item{\code{initialize()}}{Create a new resource}
#' \item{\code{as_table()}}{Represents the current state of the resource as a tibble}
#' }
resource <- R6::R6Class("resource",
cloneable = TRUE,
public = list(
name = NA,
quantity = NA,
#' Creates a new resource.
#'
#' @description Creates a new resource object
#' @param name The name of the resource
#' @param quantity The quantity present
initialize = function(name = NA, quantity = 0) {
self$name <- name
self$quantity <- quantity
},
#' Returns a data.frame representation of the resource
#'
#' @return A data.frame of resources
as_table = function() {
return(data.frame(name = self$name, quantity = self$quantity))
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/resource.R
|
#' @export
#' @title Resource Manager
#' @docType class
#' @description This object manages all of the resources in a village.
#' @section Methods:
#' \describe{
#' \item{\code{initialize()}}{Creates a new manager}
#' \item{\code{get_resources()}}{Gets all of the resources that the manager has}
#' \item{\code{get_resource()}}{Retrieves a resource from the manager}
#' \item{\code{add_resource()}}{Adds a resource to the manager}
#' \item{\code{remove_resource()}}{Removes a resource from the manager}
#' \item{\code{get_resource_index()}}{Retrieves the index of the resource}
#' \item{\code{get_states()}}{Returns a list of states}
#' \item{\code{load()}}{Loads a csv file of resources and adds them to the manager.}
#' }
resource_manager <- R6::R6Class("resource_manager",
public = list(
#' @field resources A list of resource objects
#' @field resource_class The class used to represent resources
resources = NA,
resource_class = NULL,
#' Creates a new , empty, resource manager for a village.
#' @description Get a new instance of a resource_manager
#' @param resource_class The class being used to describe the resources being managed
initialize = function(resource_class=villager::resource) {
self$resources <- vector()
self$resource_class <- resource_class
},
#' Gets all of the managed resources
#'
#' @return A list of resources
get_resources = function() {
return(self$resources)
},
#' Gets a resource given a resource name
#'
#' @param name The name of the requested resource
#' @return A resource object
get_resource = function(name) {
for (res in self$resources) {
if (res$name == name) {
return(res)
}
}
},
#' Adds a resource to the manager.
#'
#' @param new_resource The resource to add
#' @return None
add_resource = function(new_resource) {
self$resources <- append(self$resources, new_resource)
},
#' Removes a resource from the manager
#'
#' @param name The name of the resource being removed
#' @return None
remove_resource = function(name) {
resource_index <- self$get_resource_index(name)
self$resources <- self$resources[-resource_index]
},
#' Returns the index of a resource in the internal resource list
#'
#' @param name The name of the resource being located
#' @return The index in the list, or R's default return value
get_resource_index = function(name) {
for (i in seq_len(length(self$resources))) {
if (self$resources[[i]]$name == name) {
return(i)
}
}
},
#' Returns a data.frame where each row is a resource.
#'
#' @details Subclasses should not have to override this method because it takes all member variables into account
#' @return A single data.frame
get_states = function() {
# Allocate the appropriate sized table so that the row can be emplaced instead of appended
resource_count <- length(self$resources)
resource_fields <- names(self$resource_class$public_fields)
state_table <-data.frame(matrix(nrow = resource_count, ncol = length(resource_fields)))
if (resource_count > 0) {
colnames(state_table) <- resource_fields
for (i in 1:resource_count) {
state_table[i, ] <- self$resources[[i]]$as_table()
}
}
return(state_table)
},
#' Loads a csv file of resources into the manager
#'
#' @param file_name The path to the csv file
#' @return None
load = function(file_name) {
resources <- read.csv(file_name)
for (i in seq_len(nrow(resources))) {
resource_row <- resources[i, ]
self$add_resource(resource$new(name = resource_row$name, quantity = resource_row$quantity))
}
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/resource_manager.R
|
#' @export
#' @title Village
#' @docType class
#' @description This is an object that represents the state of a village at a particular time.
#' @details This class acts as a type of record that holds the values of the
#' different village variables. This class can be subclassed to include more variables that aren't present.
#' @section Methods:
#' \describe{
#' \item{\code{initialize()}}{Creates a new village}
#' \item{\code{propagate()}}{Advances the village a single time step}
#' \item{\code{set_initial_state()}}{Initializes the initial state of the village}
#' }
village <- R6::R6Class("village",
public = list(
#' @field name An optional name for the village
name = NA,
#' @field initial_condition A function that sets the initial state of the village
initial_condition = NA,
#' @field current_state The village's current state
current_state = NA,
#' @field previous_state The village's previous state
previous_state = NA,
#' @field models A list of functions or a single function that should be run at each timestep
models = NULL,
#' @field model_data Optional data that models may need
model_data = NULL,
#' @field winik_mgr The manager that handles all of the winiks
winik_mgr = NULL,
#' @field resource_mgr The manager that handles all of the resources
resource_mgr = NULL,
#' Initializes a village
#'
#' @description This method is meant to set the variables that are needed for a village to propagate through
#' time.
#' @param name An optional name for the village
#' @param initial_condition A function that gets called on the first time step
#' @param models A list of functions or a single function that should be run at each time step
#' @param winik_class The class that's being used to represent agents
#' @param resource_class The class being used to describe the resources
initialize = function(name,
initial_condition,
models = list(),
winik_class = villager::winik,
resource_class = villager::resource) {
self$initial_condition <- initial_condition
self$winik_mgr <- winik_manager$new(winik_class)
self$resource_mgr <- resource_manager$new(resource_class)
# Check to see if the user supplied a single model, outside of a list
# If so, put it in a vector because other code expects 'models' to be a list
if (!is.list(models) && !is.null(models)) {
self$models <- list(models)
} else {
self$models <- models
}
self$name <- name
# Creates an empty state that the initial condition will populate
self$current_state <- village_state$new()
self$previous_state <- self$current_state$clone(deep = TRUE)
# Set the data
self$model_data <- model_data$new()
},
#' Propagates the village a single time step
#'
#' @details This method is used to advance the village a single time step. It should NOT be used
#' to set initial conditions. See the set_initial_state method.
#' @param current_step The current time step
#' @return None
propagate = function(current_step) {
# Create a new state representing this slice in time. Since many of the
# values will be the same as the previous state, clone the previous state
self$current_state <- self$previous_state$clone(deep = TRUE)
# Update the current_step in the state record to reflect the new step
self$current_state$step <- current_step
# Run each of the models
for (model in self$models) {
# Create a read only copy of the last state so that users can make decisions off of it
self$previous_state <- self$current_state$clone(deep = TRUE)
model(self$current_state, self$previous_state, self$model_data, self$winik_mgr, self$resource_mgr
)
}
self$current_state$winik_states <- self$winik_mgr$get_states()
# Add the time step to the data
if (nrow(self$current_state$winik_states) > 0) {
self$current_state$winik_states$step <- self$current_state$step
}
self$current_state$resource_states <- self$resource_mgr$get_states()
if (nrow(self$current_state$resource_states) > 0) {
self$current_state$resource_states$step <- self$current_state$step
}
},
#' Runs the user defined function that sets the initial state of the village
#'
#' @description Runs the initial condition model
set_initial_state = function() {
self$current_state <- village_state$new()
self$current_state$step <- 0
self$initial_condition(self$current_state,
self$model_data,
self$winik_mgr,
self$resource_mgr)
self$current_state$winik_states <- self$winik_mgr$get_states()
self$current_state$resource_states <- self$resource_mgr$get_states()
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/village.R
|
#' @title village_state
#' @docType class
#' @description This is an object that represents the state of a village at a particular time.
#' @details This class acts as a type of record that holds the values of the different village variables. This class
#' can be subclassed to include more variables that aren't present.
#' @section Methods:
#' @field step The time step that the state is relevant to
#' @field winik_states A list of winik states
#' @field resource_states A list of resource states
#' @section Methods:
#' \describe{
#' }
village_state <- R6::R6Class("village_state",
cloneable = TRUE,
public = list(
step = NA,
winik_states = NA,
resource_states = NA,
#' Creates a new State
#'
#' @description Initializes all of the properties in the state to the ones passed in. This should
#' be called by subclasses during initialization.
#' @details When adding a new property, make sure to add it to the tibble
#' representation.
#' @export
#' @param step The time step that the state is relevant to
#' @param winik_states A vector of tibbles representing the states of the winiks
#' @param resource_states A vector of tibbles representing the states of the resources
initialize = function(step = 0,
winik_states = vector(),
resource_states = vector()) {
self$step <- step
self$winik_states <- winik_states
self$resource_states <- resource_states
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/village_state.R
|
#' @export
#' @title Winik
#' @docType class
#' @description This is an object that represents a villager (winik).
#' @details This class acts as an abstraction for handling villager-level logic. It can take a
#' number of functions that run at each timestep. It also has an associated
#' @field identifier A unique identifier that can be used to identify and find the winik
#' @field first_name The winik's first name
#' @field last_name The winik's last name
#' @field age The winik's age
#' @field mother_id The identifier of the winik's mother
#' @field father_id The identifier of the winik's father
#' @field profession The winik's profession
#' @field partner The identifier of the winik's partner
#' @field gender The winik's gender
#' @field alive A boolean flag that represents whether the villager is alive or dead
#' @field children A list of children identifiers
#' @field health A percentage value of the winik's current health
#' @section Methods:
#' \describe{
#' \item{\code{as_table()}}{Represents the current state of the winik as a tibble}
#' \item{\code{get_age()}}{Returns age in terms of years}
#' \item{\code{get_gender()}}{}
#' \item{\code{get_days_sincelast_birth()}}{Get the number of days since the winik last gave birth}
#' \item{\code{initialize()}}{Create a new winik}
#' \item{\code{propagate()}}{Runs every day}
#' }
winik <- R6::R6Class("winik",
public = list(
age = NULL,
alive = NULL,
children = NULL,
father_id = NULL,
first_name = NULL,
gender = NULL,
health = NULL,
identifier = NULL,
last_name = NULL,
mother_id = NULL,
partner = NULL,
profession = NULL,
#' Create a new winik
#'
#' @description Used to created new winik objects.
#'
#' @export
#' @param age The age of the winik
#' @param alive Boolean whether the winik is alive or not
#' @param children An ordered list of of the children from this winik
#' @param gender The gender of the winik
#' @param identifier The winik's identifier
#' @param first_name The winik's first name
#' @param last_name The winik's last naem
#' @param mother_id The identifier of the winik's monther
#' @param father_id The identifier of the winik' father
#' @param partner The identifier of the winik's partner
#' @param profession The winik's profession
#' @param health A percentage value of the winik's current health
#' @return A new winik object
initialize = function(identifier = NA,
first_name = NA,
last_name = NA,
age = 0,
mother_id = NA,
father_id = NA,
partner = NA,
children = vector(mode = "character"),
gender = NA,
profession = NA,
alive = TRUE,
health = 100) {
if (is.na(identifier)) {
identifier <- uuid::UUIDgenerate()
}
self$alive <- alive
self$identifier <- identifier
self$first_name <- first_name
self$last_name <- last_name
self$age <- age
self$mother_id <- mother_id
self$father_id <- father_id
self$profession <- profession
self$gender <- gender
self$partner <- partner
self$children <- children
self$health <- health
},
#' A function that returns true or false whether the villager dies
#' This is run each day
#'
#' @return A boolean whether the winik is alive (true for yes)
is_alive = function() {
# The villager survived the day
return(self$alive)
},
#' Gets the number of days from the last birth. This is also
#' the age of the most recently born winik
#'
#' @return The number of days since last birth
get_days_since_last_birth = function() {
if (length(self$children) > 0) {
# This works because the children list is sorted
return(self$children[[1]]$age)
}
return(0)
},
#' Connects a child to the winik. This method ensures that the
#' 'children' vector is ordered.
#'
#' @param child The Winik object representing the child
#' @return None
add_child = function(child) {
sort_children <- function() {
children_length <- length(self$children)
if (children_length <= 1) {
return()
}
for (i in 1:children_length) {
j_len <- children_length - 1
for (j in 1:j_len) {
if (self$children[[j]]$age > self$children[[j + 1]]$age) {
temp <- self$children[j + 1]
self$children[j + 1] <- self$children[j]
self$children[j] <- temp
}
}
}
}
if (length(self$children) == 0) {
self$children <- c(self$children, child)
} else {
self$children <- append(self$children, child, after = 0)
sort_children()
}
},
#' Returns a data.frame representation of the winik
#'
#' @description I hope there's a more scalable way to do this in R; Adding every new attribute to this
#' function isn't practical
#' @details The village_state holds a copy of all of the villagers at each timestep; this method is used to turn
#' the winik properties into the object inserted in the village_state.
#' @export
#' @return A data.frame representation of the winik
as_table = function() {
winik_table <- data.frame(
age = self$age,
alive = self$alive,
father_id = self$father_id,
first_name = self$first_name,
gender = self$gender,
health = self$health,
identifier = self$identifier,
last_name = self$last_name,
mother_id = self$mother_id,
partner = self$partner,
profession = self$profession
)
return(winik_table)
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/winik.R
|
#' @export
#' @title Winik Manager
#' @docType class
#' @description A class that abstracts the management of aggregations of Winik classes. Each village should have
#' an instance of a winik_manager to interface the winiks inside.
#' @field winiks A list of winiks objects that the winik manager manages.
#' @field winik_class A class describing winiks. This is usually the default villager supplied 'winik' class
#' @section Methods:
#' \describe{
#' \item{\code{add_winik()}}{Adds a single winik to the manager.}
#' \item{\code{get_average_age()}}{Returns the average age, in years, of all the winiks.}
#' \item{\code{get_living_winiks()}}{Gets a list of all the winiks that are currently alive.}
#' \item{\code{get_states()}}{Returns a data.frame consisting of all of the managed winiks.}
#' \item{\code{get_winik()}}{Retrieves a particular winik from the manager.}
#' \item{\code{get_winik_index()}}{Retrieves the index of a winik.}
#' \item{\code{initialize()}}{Creates a new manager instance.}
#' \item{\code{load()}}{Loads a csv file defining a population of winiks and places them in the manager.}
#' \item{\code{remove_winik()}}{Removes a winik from the manager}
#' }
winik_manager <- R6::R6Class("winik_manager",
public = list(
winiks = NULL,
winik_class = NULL,
#' Creates a new winik manager instance.
#'
#' @param winik_class The class that's being used to represent agents being managed
initialize = function(winik_class=villager::winik) {
self$winiks <- vector()
self$winik_class <- winik_class
},
#' Given the identifier of a winik, sort through all of the managed winiks and return it
#' if it exists.
#'
#' @description Return the R6 instance of a winik with identiifier 'winik_identifier'.
#' @param winik_identifier The identifier of the requested winik.
#' @return An R6 winik object
get_winik = function(winik_identifier) {
for (winik in self$winiks) {
if (winik$identifier == winik_identifier) {
return(winik)
}
}
},
#' Returns a list of all the winiks that are currently alive.
#'
#' @return A list of living winiks
get_living_winiks = function() {
living_winiks <- list()
for (winik in self$winiks) {
if (winik$alive) {
living_winiks <- append(living_winiks, winik)
}
}
return(living_winiks)
},
#' Adds a winik to the manager.
#'
#' @param new_winik The winik to add to the manager
#' @return None
add_winik = function(new_winik) {
# Create an identifier if it's null
if (is.null(new_winik$identifier)) {
new_winik$identifier <- uuid::UUIDgenerate()
}
self$winiks <- append(self$winiks, new_winik)
},
#' Removes a winik from the manager
#'
#' @param winik_identifier The identifier of the winik being removed
#' @return None
remove_winik = function(winik_identifier) {
winik_index <- self$get_winik_index(winik_identifier)
self$winiks <- self$winiks[-winik_index]
},
#' Returns a data.frame of winiks
#'
#' @details Each row of the data.frame represents a winik object
#' @return A single data.frame of all winiks
get_states = function() {
# Allocate the appropriate sized table so that the row can be emplaced instead of appended
winik_count <- length(self$winiks)
winik_fields <- names(self$winik_class$public_fields)
column_names <- winik_fields[!winik_fields %in% c("children")]
state_table <- data.frame(matrix(nrow = winik_count, ncol = length(column_names)))
if (winik_count > 0) {
# Since we know that a winik exists and we need to match the columns here with the
# column names in winik::as_table, get the first winik and use its column names
colnames(state_table) <- column_names
for (i in 1:winik_count) {
state_table[i, ] <- self$winiks[[i]]$as_table()
}
}
return(state_table)
},
#' Returns the index of a winik in the internal winik list
#'
#' @param winik_identifier The identifier of the winik being located
#' @return The index in the list, or R's default return value
get_winik_index = function(winik_identifier) {
for (i in seq_len(length(self$winiks))) {
if (self$winiks[[i]]$identifier == winik_identifier) {
return(i)
}
}
return(NA)
},
#' Connects two winiks together as mates
#'
#' @param winik_a A winik that will be connected to winik_b
#' @param winik_b A winik that will be connected to winik_a
connect_winiks = function(winik_a, winik_b) {
winik_a$partner <- winik_b$identifier
winik_b$partner <- winik_a$identifier
},
#' Returns the total number of winiks that are alive
#' @return The numnber of living winiks
get_living_population = function() {
total_living_population <- 0
for (winik in self$winiks)
if (winik$alive == TRUE) {
total_living_population <- total_living_population + 1
}
return(total_living_population)
},
#' Returns the averag age, in years, of all of the winiks
#'
#' @details This is an *example* of the kind of logic that the manager might handle. In this case,
#' the manager is performing calculations about its aggregation (winiks). Note that the 364 days needs to
#' work better
#'
#' @return The average age in years
get_average_age = function() {
total_age <- 0
for (winik in self$winiks)
total_age <- total_age + winik$age
average_age_days <- total_age / length(self$winiks)
return(average_age_days / 364)
},
#' Takes all of the winiks in the manager and reconstructs the children
#'
#' @details This is typically called when loading winiks from disk for the first time.
#' When children are created during the simulation, the family connections are made
#' through the winik class and added to the manager via add_winik.
#' @return None
add_children = function() {
for (winik in self$winiks) {
if (!is.na(winik$mother_id)) {
if (!is.na(self$get_winik_index(winik$mother_id))) {
mother <- self$get_winik(winik$mother_id)
mother$add_child(winik)
}
}
if (!is.na(winik$father_id)) {
if (!is.na(self$get_winik_index(winik$father_id))) {
father <- self$get_winik(winik$father_id)
father$add_child(winik)
}
}
}
},
#' Loads winiks from disk.
#'
#' @details Populates the winik manager with a set of winiks defined in a csv file.
#' @param file_name The location of the file holding the winiks.
#' @return None
load = function(file_name) {
winiks <- read.csv(file_name, row.names = NULL)
for (i in seq_len(nrow(winiks))) {
winiks_row <- winiks[i, ]
new_winik <- winik$new(
identifier = winiks_row$identifier,
first_name = winiks_row$first_name,
last_name = winiks_row$last_name,
age = winiks_row$age,
mother_id = winiks_row$mother_id,
father_id = winiks_row$father_id,
partner = winiks_row$partner,
gender = winiks_row$gender,
profession = winiks_row$profession,
alive = winiks_row$alive,
health = winiks_row$health
)
self$add_winik(new_winik)
}
self$add_children()
}
)
)
|
/scratch/gouwar.j/cran-all/cranData/villager/R/winik_manager.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(villager)
library(leaflet)
## -----------------------------------------------------------------------------
gps_winik <- R6::R6Class("winik",
inherit = villager::winik,
public = list(
age = NULL,
alive = NULL,
children = NULL,
father_id = NULL,
first_name = NULL,
gender = NULL,
health = NULL,
identifier = NULL,
last_name = NULL,
mother_id = NULL,
partner = NULL,
profession = NULL,
latitude = NULL,
longitude = NULL,
initialize = function(identifier = NA,
first_name = NA,
last_name = NA,
age = 0,
mother_id = NA,
father_id = NA,
partner = NA,
children = vector(mode = "character"),
gender = NA,
profession = NA,
alive = TRUE,
health = 100,
latitude = 0,
longitude = 0) {
super$initialize(identifier,
first_name,
last_name,
age,
mother_id,
father_id,
partner,
children,
gender,
profession,
alive,
health)
self$latitude <- latitude
self$longitude <- longitude
},
as_table = function() {
winik_table <- data.frame(
age = self$age,
alive = self$alive,
father_id = self$father_id,
first_name = self$first_name,
gender = self$gender,
health = self$health,
identifier = self$identifier,
last_name = self$last_name,
mother_id = self$mother_id,
partner = self$partner,
profession = self$profession,
latitude = self$latitude,
longitude = self$longitude
)
return(winik_table)
}
)
)
## -----------------------------------------------------------------------------
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
# Create the initial villagers
test_agent <- gps_winik$new(first_name="Lewis", last_name="Taylor", age=9125, latitude=33.8785486, longitude=-118.0434921)
winik_mgr$add_winik(test_agent)
}
## -----------------------------------------------------------------------------
test_model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all the winiks (just one at the moment)
for (winik in winik_mgr$get_living_winiks()) {
# Generate new coordinates
latitude <- winik$latitude + runif(1, 0.01, 0.03)
longitude <- winik$longitude + runif(1, 0.01, 0.03)
winik$latitude <- latitude
winik$longitude <- longitude
}
}
## -----------------------------------------------------------------------------
los_angeles <- village$new("Test_Village", initial_condition, test_model, gps_winik)
simulator <- simulation$new(10, list(los_angeles))
simulator$run_model()
## -----------------------------------------------------------------------------
# Load in data
agent_data <- readr::read_csv("results/Test_Village/winiks.csv")
# Grab just the location data
agent_location <- data.frame(latitude = agent_data$latitude, longitude = agent_data$longitude)
# create a map
leaflet::leaflet() %>%
leaflet::addTiles() %>% # Add default OpenStreetMap map tiles
leaflet::addMarkers (data = agent_location) # Add agent locations
|
/scratch/gouwar.j/cran-all/cranData/villager/inst/doc/extending-agents.R
|
---
title: "extending-winiks"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{extending-winiks}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(villager)
library(leaflet)
```
# Extending Agents
To create agents (winiks) that have more properties than the ones provided by _villager_, subclass the `winik` class into a new R6 class. Once sub-classed, additional properties can be added to the winik which can be used in the subsequent model. The new winik class can be tied to individual villages. This gives flexibility to model populations differently when running under the same simulation.
To add new members to the winik class,
1. Copy the _winik_ class source code
2. Create the new member variable
3. Add it as a parameter to the `initialize` function
4. Make an entry for it in the `as_table` function
## Agent with a GPS coordinate
To give a complete example of the sublclassing process, consider an extended agent. In this case the agent has an additional property, `gps_coordinates`, that's a named list of latitude and longitude coordinates: [lat=1234, long=1234]. Each coordinate gets updated by the model each day by a random number.
To start the base class off, the original class was copied to save time with the member variable definitions.
### Custom winik class
```{r}
gps_winik <- R6::R6Class("winik",
inherit = villager::winik,
public = list(
age = NULL,
alive = NULL,
children = NULL,
father_id = NULL,
first_name = NULL,
gender = NULL,
health = NULL,
identifier = NULL,
last_name = NULL,
mother_id = NULL,
partner = NULL,
profession = NULL,
latitude = NULL,
longitude = NULL,
initialize = function(identifier = NA,
first_name = NA,
last_name = NA,
age = 0,
mother_id = NA,
father_id = NA,
partner = NA,
children = vector(mode = "character"),
gender = NA,
profession = NA,
alive = TRUE,
health = 100,
latitude = 0,
longitude = 0) {
super$initialize(identifier,
first_name,
last_name,
age,
mother_id,
father_id,
partner,
children,
gender,
profession,
alive,
health)
self$latitude <- latitude
self$longitude <- longitude
},
as_table = function() {
winik_table <- data.frame(
age = self$age,
alive = self$alive,
father_id = self$father_id,
first_name = self$first_name,
gender = self$gender,
health = self$health,
identifier = self$identifier,
last_name = self$last_name,
mother_id = self$mother_id,
partner = self$partner,
profession = self$profession,
latitude = self$latitude,
longitude = self$longitude
)
return(winik_table)
}
)
)
```
### Initial Condition
We'll create the initial population of one Agent in the `initial_condition` function, which gets run before the model starts. The initial starting location is in Los Angeles, Ca. Note that the new `gps_winik` class is used to instantiate the agent rather than the library provided `winik` class.
```{r}
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
# Create the initial villagers
test_agent <- gps_winik$new(first_name="Lewis", last_name="Taylor", age=9125, latitude=33.8785486, longitude=-118.0434921)
winik_mgr$add_winik(test_agent)
}
```
### Model
Each day, the model picks a number between 0.0000001 and 0.0000003 and increments `gps_coordinate` on the winik.
```{r}
test_model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all the winiks (just one at the moment)
for (winik in winik_mgr$get_living_winiks()) {
# Generate new coordinates
latitude <- winik$latitude + runif(1, 0.01, 0.03)
longitude <- winik$longitude + runif(1, 0.01, 0.03)
winik$latitude <- latitude
winik$longitude <- longitude
}
}
```
### Running
Finally, we'll create and run a simulation with a duration of 10 days.
```{r}
los_angeles <- village$new("Test_Village", initial_condition, test_model, gps_winik)
simulator <- simulation$new(10, list(los_angeles))
simulator$run_model()
```
### Results
```{r}
# Load in data
agent_data <- readr::read_csv("results/Test_Village/winiks.csv")
# Grab just the location data
agent_location <- data.frame(latitude = agent_data$latitude, longitude = agent_data$longitude)
# create a map
leaflet::leaflet() %>%
leaflet::addTiles() %>% # Add default OpenStreetMap map tiles
leaflet::addMarkers (data = agent_location) # Add agent locations
```
|
/scratch/gouwar.j/cran-all/cranData/villager/inst/doc/extending-agents.Rmd
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(plotly)
library(villager)
## -----------------------------------------------------------------------------
resource_expiration <- R6::R6Class("resource",
cloneable = TRUE,
public = list(
name = NA,
quantity = NA,
creation_date = NA,
#' Creates a new resource.
#'
#' @description Creates a new resource object
#' @param name The name of the resource
#' @param quantity The quantity present
#' @param creation_date The date that the resource was created
initialize = function(name = NA, quantity = 0, creation_date=NA) {
self$name <- name
self$quantity <- quantity
self$creation_date <- creation_date # New member variable to track the creation date
},
#' Returns a data.frame representation of the resource
#'
#' @return A data.frame of resources
as_table = function() {
return(data.frame(name = self$name, quantity = self$quantity))
}
)
)
## -----------------------------------------------------------------------------
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
for (i in 1:10) {
name <- runif(1, 0.0, 100)
new_winik <- winik$new(first_name <- name, last_name <- "Smith")
winik_mgr$add_winik(new_winik)
}
# Create two new resources at the current date (The first day)
corn <- resource_expiration$new("Corn", 10, current_state$step)
rice <- resource_expiration$new("Rice", 20, current_state$step)
resource_mgr$add_resource(corn)
resource_mgr$add_resource(rice)
}
## -----------------------------------------------------------------------------
# Create the model that, each day, checks to see whether or not any resource expire
model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all of the resources and check if any expire
for (resource in resource_mgr$get_resources()) {
# Figure out how many days have passed
days_passed <- current_state$step - resource$creation_date
if (resource$name == "Corn") {
if (days_passed > 10 && resource$quantity > 0) {
print("Setting Corn quantity to 0")
resource$quantity <- 0
}
} else if (resource$name == "Rice" && resource$quantity > 0) {
if (days_passed > 20) {
print("Setting Rice quantity to 0")
resource$quantity <- 0
}
}
}
}
## -----------------------------------------------------------------------------
# Create the village and simulation
coastal_village <- village$new("Expiring_Resources", initial_condition, model, resource_class=)
simulator <- simulation$new(16, villages = list(coastal_village))
simulator$run_model()
## -----------------------------------------------------------------------------
# Load in data
time_series_data <- readr::read_csv("results/Expiring_Resources/resources.csv")
# Get unique dates
unique_step<- sort(unique(time_series_data$step))
# Get corn & rice quantities and dates
corn_date_quantities <- dplyr::filter(time_series_data, name=="Corn")
rice_date_quantities <- dplyr::filter(time_series_data, name=="Rice")
# create data frame for sorted data
reordered_time_series <- data.frame(step = unique_step, Corn = 0, Rice = 0)
for (i in 1:nrow(reordered_time_series)){
reordered_time_series[i,2] = corn_date_quantities[which(corn_date_quantities$step == reordered_time_series$step[i]),2]
reordered_time_series[i,3] = rice_date_quantities[which(rice_date_quantities$step == reordered_time_series$step[i]),2]
}
# Plot graph
plotly::plot_ly(reordered_time_series, x = ~step) %>%
plotly::add_trace(y = ~Corn, name = 'Corn', type = 'scatter', mode = 'lines') %>%
plotly::add_trace(y = ~Rice, name = 'Rice', type = 'scatter', mode = 'lines') %>%
plotly::layout(title = 'Resource Counts', xaxis = list(title = 'Time Step'),
yaxis = list(title = 'Quantity'), legend = list(title=list(text='Crop Type')))
|
/scratch/gouwar.j/cran-all/cranData/villager/inst/doc/extending-resources.R
|
---
title: "extending-resources"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{extending-resources}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(plotly)
library(villager)
```
# Extending Resources
To create resources that have additional properties, subclass the `resource` class.
To add new members to the resource class,
1. Copy the _resource_ class source definition
2. Create the new member variable
3. Add it as a parameter to the `initialize` function
4. Make an entry for it in the `as_table` function
## Resource with an expiration date
To demonstrate how to subclass and run a model with a custom _resource_ class, consier an example of corn and rice resources that expire. To do this, a new variable is added to the resource class, `creation_date` which gets updated when the resource is created. When the model runs, the date at each time step is used to check against the creation date of each resource.
### Custom resource class
```{r}
resource_expiration <- R6::R6Class("resource",
cloneable = TRUE,
public = list(
name = NA,
quantity = NA,
creation_date = NA,
#' Creates a new resource.
#'
#' @description Creates a new resource object
#' @param name The name of the resource
#' @param quantity The quantity present
#' @param creation_date The date that the resource was created
initialize = function(name = NA, quantity = 0, creation_date=NA) {
self$name <- name
self$quantity <- quantity
self$creation_date <- creation_date # New member variable to track the creation date
},
#' Returns a data.frame representation of the resource
#'
#' @return A data.frame of resources
as_table = function() {
return(data.frame(name = self$name, quantity = self$quantity))
}
)
)
```
### Initial Condition
The initial condition is a village that has two resource types, corn and rice.
``` {r}
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
for (i in 1:10) {
name <- runif(1, 0.0, 100)
new_winik <- winik$new(first_name <- name, last_name <- "Smith")
winik_mgr$add_winik(new_winik)
}
# Create two new resources at the current date (The first day)
corn <- resource_expiration$new("Corn", 10, current_state$step)
rice <- resource_expiration$new("Rice", 20, current_state$step)
resource_mgr$add_resource(corn)
resource_mgr$add_resource(rice)
}
```
### Model
The model checks the current date against the expiration dates on each resource. When the threshold limits
are reached, the quantity is set to zero.
```{r}
# Create the model that, each day, checks to see whether or not any resource expire
model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all of the resources and check if any expire
for (resource in resource_mgr$get_resources()) {
# Figure out how many days have passed
days_passed <- current_state$step - resource$creation_date
if (resource$name == "Corn") {
if (days_passed > 10 && resource$quantity > 0) {
print("Setting Corn quantity to 0")
resource$quantity <- 0
}
} else if (resource$name == "Rice" && resource$quantity > 0) {
if (days_passed > 20) {
print("Setting Rice quantity to 0")
resource$quantity <- 0
}
}
}
}
```
### Running
With the required model components complete, we can create a simulation that runs for 15 days. By the end of it, there should be no more corn left, and the rice stocks should still be full.
```{r}
# Create the village and simulation
coastal_village <- village$new("Expiring_Resources", initial_condition, model, resource_class=)
simulator <- simulation$new(16, villages = list(coastal_village))
simulator$run_model()
```
### Results
A timeseries of each resource type is plotted below. The _rice_ resource has clearly not expired while the _corn_ resource has after 10 days.
```{r}
# Load in data
time_series_data <- readr::read_csv("results/Expiring_Resources/resources.csv")
# Get unique dates
unique_step<- sort(unique(time_series_data$step))
# Get corn & rice quantities and dates
corn_date_quantities <- dplyr::filter(time_series_data, name=="Corn")
rice_date_quantities <- dplyr::filter(time_series_data, name=="Rice")
# create data frame for sorted data
reordered_time_series <- data.frame(step = unique_step, Corn = 0, Rice = 0)
for (i in 1:nrow(reordered_time_series)){
reordered_time_series[i,2] = corn_date_quantities[which(corn_date_quantities$step == reordered_time_series$step[i]),2]
reordered_time_series[i,3] = rice_date_quantities[which(rice_date_quantities$step == reordered_time_series$step[i]),2]
}
# Plot graph
plotly::plot_ly(reordered_time_series, x = ~step) %>%
plotly::add_trace(y = ~Corn, name = 'Corn', type = 'scatter', mode = 'lines') %>%
plotly::add_trace(y = ~Rice, name = 'Rice', type = 'scatter', mode = 'lines') %>%
plotly::layout(title = 'Resource Counts', xaxis = list(title = 'Time Step'),
yaxis = list(title = 'Quantity'), legend = list(title=list(text='Crop Type')))
```
|
/scratch/gouwar.j/cran-all/cranData/villager/inst/doc/extending-resources.Rmd
|
---
title: "extending-winiks"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{extending-winiks}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(villager)
library(leaflet)
```
# Extending Agents
To create agents (winiks) that have more properties than the ones provided by _villager_, subclass the `winik` class into a new R6 class. Once sub-classed, additional properties can be added to the winik which can be used in the subsequent model. The new winik class can be tied to individual villages. This gives flexibility to model populations differently when running under the same simulation.
To add new members to the winik class,
1. Copy the _winik_ class source code
2. Create the new member variable
3. Add it as a parameter to the `initialize` function
4. Make an entry for it in the `as_table` function
## Agent with a GPS coordinate
To give a complete example of the sublclassing process, consider an extended agent. In this case the agent has an additional property, `gps_coordinates`, that's a named list of latitude and longitude coordinates: [lat=1234, long=1234]. Each coordinate gets updated by the model each day by a random number.
To start the base class off, the original class was copied to save time with the member variable definitions.
### Custom winik class
```{r}
gps_winik <- R6::R6Class("winik",
inherit = villager::winik,
public = list(
age = NULL,
alive = NULL,
children = NULL,
father_id = NULL,
first_name = NULL,
gender = NULL,
health = NULL,
identifier = NULL,
last_name = NULL,
mother_id = NULL,
partner = NULL,
profession = NULL,
latitude = NULL,
longitude = NULL,
initialize = function(identifier = NA,
first_name = NA,
last_name = NA,
age = 0,
mother_id = NA,
father_id = NA,
partner = NA,
children = vector(mode = "character"),
gender = NA,
profession = NA,
alive = TRUE,
health = 100,
latitude = 0,
longitude = 0) {
super$initialize(identifier,
first_name,
last_name,
age,
mother_id,
father_id,
partner,
children,
gender,
profession,
alive,
health)
self$latitude <- latitude
self$longitude <- longitude
},
as_table = function() {
winik_table <- data.frame(
age = self$age,
alive = self$alive,
father_id = self$father_id,
first_name = self$first_name,
gender = self$gender,
health = self$health,
identifier = self$identifier,
last_name = self$last_name,
mother_id = self$mother_id,
partner = self$partner,
profession = self$profession,
latitude = self$latitude,
longitude = self$longitude
)
return(winik_table)
}
)
)
```
### Initial Condition
We'll create the initial population of one Agent in the `initial_condition` function, which gets run before the model starts. The initial starting location is in Los Angeles, Ca. Note that the new `gps_winik` class is used to instantiate the agent rather than the library provided `winik` class.
```{r}
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
# Create the initial villagers
test_agent <- gps_winik$new(first_name="Lewis", last_name="Taylor", age=9125, latitude=33.8785486, longitude=-118.0434921)
winik_mgr$add_winik(test_agent)
}
```
### Model
Each day, the model picks a number between 0.0000001 and 0.0000003 and increments `gps_coordinate` on the winik.
```{r}
test_model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all the winiks (just one at the moment)
for (winik in winik_mgr$get_living_winiks()) {
# Generate new coordinates
latitude <- winik$latitude + runif(1, 0.01, 0.03)
longitude <- winik$longitude + runif(1, 0.01, 0.03)
winik$latitude <- latitude
winik$longitude <- longitude
}
}
```
### Running
Finally, we'll create and run a simulation with a duration of 10 days.
```{r}
los_angeles <- village$new("Test_Village", initial_condition, test_model, gps_winik)
simulator <- simulation$new(10, list(los_angeles))
simulator$run_model()
```
### Results
```{r}
# Load in data
agent_data <- readr::read_csv("results/Test_Village/winiks.csv")
# Grab just the location data
agent_location <- data.frame(latitude = agent_data$latitude, longitude = agent_data$longitude)
# create a map
leaflet::leaflet() %>%
leaflet::addTiles() %>% # Add default OpenStreetMap map tiles
leaflet::addMarkers (data = agent_location) # Add agent locations
```
|
/scratch/gouwar.j/cran-all/cranData/villager/vignettes/extending-agents.Rmd
|
---
title: "extending-resources"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{extending-resources}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(plotly)
library(villager)
```
# Extending Resources
To create resources that have additional properties, subclass the `resource` class.
To add new members to the resource class,
1. Copy the _resource_ class source definition
2. Create the new member variable
3. Add it as a parameter to the `initialize` function
4. Make an entry for it in the `as_table` function
## Resource with an expiration date
To demonstrate how to subclass and run a model with a custom _resource_ class, consier an example of corn and rice resources that expire. To do this, a new variable is added to the resource class, `creation_date` which gets updated when the resource is created. When the model runs, the date at each time step is used to check against the creation date of each resource.
### Custom resource class
```{r}
resource_expiration <- R6::R6Class("resource",
cloneable = TRUE,
public = list(
name = NA,
quantity = NA,
creation_date = NA,
#' Creates a new resource.
#'
#' @description Creates a new resource object
#' @param name The name of the resource
#' @param quantity The quantity present
#' @param creation_date The date that the resource was created
initialize = function(name = NA, quantity = 0, creation_date=NA) {
self$name <- name
self$quantity <- quantity
self$creation_date <- creation_date # New member variable to track the creation date
},
#' Returns a data.frame representation of the resource
#'
#' @return A data.frame of resources
as_table = function() {
return(data.frame(name = self$name, quantity = self$quantity))
}
)
)
```
### Initial Condition
The initial condition is a village that has two resource types, corn and rice.
``` {r}
initial_condition <- function(current_state, model_data, winik_mgr, resource_mgr) {
for (i in 1:10) {
name <- runif(1, 0.0, 100)
new_winik <- winik$new(first_name <- name, last_name <- "Smith")
winik_mgr$add_winik(new_winik)
}
# Create two new resources at the current date (The first day)
corn <- resource_expiration$new("Corn", 10, current_state$step)
rice <- resource_expiration$new("Rice", 20, current_state$step)
resource_mgr$add_resource(corn)
resource_mgr$add_resource(rice)
}
```
### Model
The model checks the current date against the expiration dates on each resource. When the threshold limits
are reached, the quantity is set to zero.
```{r}
# Create the model that, each day, checks to see whether or not any resource expire
model <- function(current_state, previous_state, model_data, winik_mgr, resource_mgr) {
# Loop over all of the resources and check if any expire
for (resource in resource_mgr$get_resources()) {
# Figure out how many days have passed
days_passed <- current_state$step - resource$creation_date
if (resource$name == "Corn") {
if (days_passed > 10 && resource$quantity > 0) {
print("Setting Corn quantity to 0")
resource$quantity <- 0
}
} else if (resource$name == "Rice" && resource$quantity > 0) {
if (days_passed > 20) {
print("Setting Rice quantity to 0")
resource$quantity <- 0
}
}
}
}
```
### Running
With the required model components complete, we can create a simulation that runs for 15 days. By the end of it, there should be no more corn left, and the rice stocks should still be full.
```{r}
# Create the village and simulation
coastal_village <- village$new("Expiring_Resources", initial_condition, model, resource_class=)
simulator <- simulation$new(16, villages = list(coastal_village))
simulator$run_model()
```
### Results
A timeseries of each resource type is plotted below. The _rice_ resource has clearly not expired while the _corn_ resource has after 10 days.
```{r}
# Load in data
time_series_data <- readr::read_csv("results/Expiring_Resources/resources.csv")
# Get unique dates
unique_step<- sort(unique(time_series_data$step))
# Get corn & rice quantities and dates
corn_date_quantities <- dplyr::filter(time_series_data, name=="Corn")
rice_date_quantities <- dplyr::filter(time_series_data, name=="Rice")
# create data frame for sorted data
reordered_time_series <- data.frame(step = unique_step, Corn = 0, Rice = 0)
for (i in 1:nrow(reordered_time_series)){
reordered_time_series[i,2] = corn_date_quantities[which(corn_date_quantities$step == reordered_time_series$step[i]),2]
reordered_time_series[i,3] = rice_date_quantities[which(rice_date_quantities$step == reordered_time_series$step[i]),2]
}
# Plot graph
plotly::plot_ly(reordered_time_series, x = ~step) %>%
plotly::add_trace(y = ~Corn, name = 'Corn', type = 'scatter', mode = 'lines') %>%
plotly::add_trace(y = ~Rice, name = 'Rice', type = 'scatter', mode = 'lines') %>%
plotly::layout(title = 'Resource Counts', xaxis = list(title = 'Time Step'),
yaxis = list(title = 'Quantity'), legend = list(title=list(text='Crop Type')))
```
|
/scratch/gouwar.j/cran-all/cranData/villager/vignettes/extending-resources.Rmd
|
#' Average multiple independent importance estimates
#'
#' Average the output from multiple calls to \code{vimp_regression}, for different independent groups, into a single estimate with a corresponding standard error and confidence interval.
#'
#' @param ... an arbitrary number of \code{vim} objects.
#' @param weights how to average the vims together, and must sum to 1; defaults to 1/(number of vims) for each vim, corresponding to the arithmetic mean
#'
#' @return an object of class \code{vim} containing the (weighted) average of the individual importance estimates, as well as the appropriate standard error and confidence interval.
#' This results in a list containing:
#' \itemize{
#' \item{s}{ - a list of the column(s) to calculate variable importance for}
#' \item{SL.library}{ - a list of the libraries of learners passed to \code{SuperLearner}}
#' \item{full_fit}{ - a list of the fitted values of the chosen method fit to the full data}
#' \item{red_fit}{ - a list of the fitted values of the chosen method fit to the reduced data}
#' \item{est}{- a vector with the corrected estimates}
#' \item{naive}{- a vector with the naive estimates}
#' \item{update}{- a list with the influence curve-based updates}
#' \item{mat}{ - a matrix with the estimated variable importance, the standard error, and the \eqn{(1-\alpha) \times 100}\% confidence interval}
#' \item{full_mod}{ - a list of the objects returned by the estimation procedure for the full data regression (if applicable)}
#' \item{red_mod}{ - a list of the objects returned by the estimation procedure for the reduced data regression (if applicable)}
#' \item{alpha}{ - the level, for confidence interval calculation}
#' \item{y}{ - a list of the outcomes}
#' }
#' @examples
#' # generate the data
#' p <- 2
#' n <- 100
#' x <- data.frame(replicate(p, stats::runif(n, -5, 5)))
#'
#' # apply the function to the x's
#' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2
#'
#' # generate Y ~ Normal (smooth, 1)
#' y <- smooth + stats::rnorm(n, 0, 1)
#'
#' # set up a library for SuperLearner; note simple library for speed
#' library("SuperLearner")
#' learners <- c("SL.glm", "SL.mean")
#'
#' # get estimates on independent splits of the data
#' samp <- sample(1:n, n/2, replace = FALSE)
#'
#' # using Super Learner (with a small number of folds, for illustration only)
#' est_2 <- vimp_regression(Y = y[samp], X = x[samp, ], indx = 2, V = 2,
#' run_regression = TRUE, alpha = 0.05,
#' SL.library = learners, cvControl = list(V = 2))
#'
#' est_1 <- vimp_regression(Y = y[-samp], X = x[-samp, ], indx = 2, V = 2,
#' run_regression = TRUE, alpha = 0.05,
#' SL.library = learners, cvControl = list(V = 2))
#'
#' ests <- average_vim(est_1, est_2, weights = c(1/2, 1/2))
#'
#' @importFrom rlang "!!" sym
#' @export
average_vim <- function(..., weights = rep(1/length(list(...)), length(list(...)))) {
# capture the arguments
L <- list(...)
names(L) <- unlist(match.call(expand.dots=F)$...)
p <- length(L)
# check if weights sum to 1; if not, break
if (sum(weights) != 1) stop("Weights must sum to one.")
# extract the estimates and SEs from each element of the list
# also get the sample sizes
ests <- do.call(c, lapply(L, function(z) z$est))
naives <- do.call(c, lapply(L, function(z) z$naive))
ses <- do.call(c, lapply(L, function(z) z$se))
tests <- do.call(c, lapply(L, function(z) z$test))
p_values <- do.call(c, lapply(L, function(z) z$p_value))
predictivenesses_full <- do.call(c, lapply(L, function(z) z$predictiveness_full))
predictivenesses_reduced <- do.call(c, lapply(L, function(z) z$predictiveness_reduced))
predictiveness_cis_full <- do.call(rbind, lapply(L, function(z) z$predictiveness_ci_full))
predictiveness_cis_reduced <- do.call(rbind, lapply(L, function(z) z$predictiveness_ci_reduced))
test_statistics <- do.call(rbind, lapply(L, function(z) z$test_statistic))
delta <- min(do.call(c, lapply(L, function(z) z$delta)))
scale <- unique(unlist(lapply(L, function(z) z$scale)))
names(ests) <- "est"
names(ses) <- "se"
names(naives) <- "naive"
# create the (weighted) average
est_avg <- sum(weights*ests)
predictiveness_full <- sum(weights*predictivenesses_full)
predictiveness_reduced <- sum(weights*predictivenesses_reduced)
# combine the variances correctly
# will need to use the covariance, if not independent
se_avg <- sqrt(matrix(weights^2, nrow = 1)%*%as.matrix(ses^2))
# create a CI
alpha <- min(unlist(lapply(L, function(z) z$alpha)))
ci_avg <- vimp_ci(est_avg, se_avg, level = 1 - alpha, scale = scale[1])
# hypothesis test:
test_statistic <- sum(weights * test_statistics)
p_value <- 1 - pnorm(test_statistic)
hyp_test <- p_value < alpha
# now get lists of the remaining components
eifs <- lapply(L, function(z) z$eif)
s_lst <- lapply(L, function(z) z$s)
s <- paste0("avg_", paste(unlist(s_lst), collapse = "_"))
SL.library <- lapply(L, function(z) z$SL.library)
full_fit <- lapply(L, function(z) z$full_fit)
red_fit <- lapply(L, function(z) z$red_fit)
full_mod <- lapply(L, function(z) z$full_mod)
red_mod <- lapply(L, function(z) z$red_mod)
# combine into a tibble
mat <- tibble::tibble(s = s, est = est_avg, se = se_avg, cil = ci_avg[, 1], ciu = ci_avg[, 2], test = hyp_test, p_value = p_value) %>%
dplyr::arrange(dplyr::desc(!! rlang::sym("est")))
# create output list
output <- list(s = s, SL.library = SL.library, full_fit = full_fit,
red_fit = red_fit, est = mat$est, naive = naives, eif = eifs,
se = mat$se, ci = cbind(mat$cil, mat$ciu),
predictiveness_full = predictiveness_full,
predictiveness_reduced = predictiveness_reduced,
predictiveness_ci_full = sum(weights*predictiveness_cis_full),
predictiveness_ci_reduced = sum(weights*predictiveness_cis_reduced),
test = hyp_test,
p_value = p_value,
mat = mat,
full_mod = full_mod, red_mod = red_mod,
alpha = alpha,
delta = delta,
scale = scale)
tmp <- class(output)
classes <- unlist(lapply(L, function(z) class(z)[2]))
class(output) <- c("vim", classes, tmp)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/average_vim.R
|
#' Compute bootstrap-based standard error estimates for variable importance
#'
#' @inheritParams vim
#'
#' @return a bootstrap-based standard error estimate
#'
#' @importFrom boot boot boot.ci
#' @export
bootstrap_se <- function(Y = NULL, f1 = NULL, f2 = NULL,
cluster_id = NULL, clustered = FALSE,
type = "r_squared", b = 1000,
boot_interval_type = "perc", alpha = 0.05) {
vim_boot_stat <- function(data, indices, type) {
y <- data$y[indices]
f1 <- data$f1[indices]
f2 <- data$f2[indices]
predictiveness_full <- est_predictiveness(
fitted_values = f1, y = y, full_y = y, type = type
)$point_est
predictiveness_reduced <- est_predictiveness(
fitted_values = f2, y = y, full_y = y, type = type
)$point_est
est_vim <- predictiveness_full - predictiveness_reduced
c(vim = est_vim, pred_full = predictiveness_full, pred_redu = predictiveness_reduced)
}
if (!clustered){
bootstrapped_ests <- boot::boot(data = data.frame(y = as.numeric(Y), f1 = f1, f2 = f2),
statistic = vim_boot_stat, R = b,
sim = "ordinary", stype = "i",
type = type)
} else{
single_boot_rep <- function(data){
n_clust <- length(unique(data$cluster_id))
sampled_clusts <- sample(unique(data$cluster_id),
size = n_clust,
replace = TRUE)
sampled_rows <- which(data$cluster_id %in% sampled_clusts)
return(vim_boot_stat(data = data,
indices = sampled_rows,
type = type))
}
.dat <- data.frame(y = as.numeric(Y),
f1 = f1,
f2 = f2,
cluster_id = cluster_id)
b_boot_ests <- t(replicate(b, single_boot_rep(data = .dat)))
overall_est <- vim_boot_stat(data = .dat, indices = 1:nrow(.dat), type = type)
# mimic the structure of "boot" object so that the boot.ci
# function can be used for CIs
bootstrapped_ests <- list(t0 = overall_est, t = b_boot_ests, R = b)
}
vars <- apply(bootstrapped_ests$t, 2, function(x) mean( (x - mean( x ) ) ^ 2) )
ci_init <- boot::boot.ci(bootstrapped_ests, type = boot_interval_type,
conf = 1 - alpha)[[4]]
num_ci_cols <- length(ci_init)
ci <- ci_init[, c(num_ci_cols - 1, num_ci_cols)]
list(se = sqrt(vars[1]), se_full = sqrt(vars[2]), se_reduced = sqrt(vars[3]), ci = ci)
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/bootstrap_se.R
|
#' Nonparametric Intrinsic Variable Importance Estimates and Inference using Cross-fitting
#'
#' Compute estimates and confidence intervals using cross-fitting for
#' nonparametric intrinsic variable importance based on the
#' population-level contrast between the oracle predictiveness using the
#' feature(s) of interest versus not.
#'
#' @inheritParams vim
#' @param cross_fitted_f1 the predicted values on validation data from a
#' flexible estimation technique regressing Y on X in the training data. Provided as
#' either (a) a vector, where each element is
#' the predicted value when that observation is part of the validation fold;
#' or (b) a list of length V, where each element in the list is a set of predictions on the
#' corresponding validation data fold.
#' If sample-splitting is requested, then these must be estimated specially; see Details. However,
#' the resulting vector should be the same length as \code{Y}; if using a list, then the summed
#' length of each element across the list should be the same length as \code{Y} (i.e.,
#' each observation is included in the predictions).
#' @param cross_fitted_f2 the predicted values on validation data from a
#' flexible estimation technique regressing either (a) the fitted values in
#' \code{cross_fitted_f1}, or (b) Y, on X withholding the columns in \code{indx}.
#' Provided as either (a) a vector, where each element is
#' the predicted value when that observation is part of the validation fold;
#' or (b) a list of length V, where each element in the list is a set of predictions on the
#' corresponding validation data fold.
#' If sample-splitting is requested, then these must be estimated specially; see Details. However,
#' the resulting vector should be the same length as \code{Y}; if using a list, then the summed
#' length of each element across the list should be the same length as \code{Y} (i.e.,
#' each observation is included in the predictions).
#' @param f1 the fitted values from a flexible estimation technique
#' regressing Y on X. If sample-splitting is requested, then these must be
#' estimated specially; see Details. If \code{cross_fitted_se = TRUE},
#' then this argument is not used.
#' @param f2 the fitted values from a flexible estimation technique
#' regressing either (a) \code{f1} or (b) Y on X withholding the columns in
#' \code{indx}. If sample-splitting is requested, then these must be
#' estimated specially; see Details. If \code{cross_fitted_se = TRUE},
#' then this argument is not used.
#' @param V the number of folds for cross-fitting, defaults to 5. If
#' \code{sample_splitting = TRUE}, then a special type of \code{V}-fold cross-fitting
#' is done. See Details for a more detailed explanation.
#' @param cross_fitting_folds the folds for cross-fitting. Only used if
#' \code{run_regression = FALSE}.
#' @param cross_fitted_se should we use cross-fitting to estimate the standard
#' errors (\code{TRUE}, the default) or not (\code{FALSE})?
#'
#' @return An object of class \code{vim}. See Details for more information.
#'
#' @details We define the population variable importance measure (VIM) for the
#' group of features (or single feature) \eqn{s} with respect to the
#' predictiveness measure \eqn{V} by
#' \deqn{\psi_{0,s} := V(f_0, P_0) - V(f_{0,s}, P_0),} where \eqn{f_0} is
#' the population predictiveness maximizing function, \eqn{f_{0,s}} is the
#' population predictiveness maximizing function that is only allowed to access
#' the features with index not in \eqn{s}, and \eqn{P_0} is the true
#' data-generating distribution.
#'
#' Cross-fitted VIM estimates are computed differently if sample-splitting
#' is requested versus if it is not. We recommend using sample-splitting
#' in most cases, since only in this case will inferences be valid if
#' the variable(s) of interest have truly zero population importance.
#' The purpose of cross-fitting is to estimate \eqn{f_0} and \eqn{f_{0,s}}
#' on independent data from estimating \eqn{P_0}; this can result in improved
#' performance, especially when using flexible learning algorithms. The purpose
#' of sample-splitting is to estimate \eqn{f_0} and \eqn{f_{0,s}} on independent
#' data; this allows valid inference under the null hypothesis of zero importance.
#'
#' Without sample-splitting, cross-fitted VIM estimates are obtained by first
#' splitting the data into \eqn{K} folds; then using each fold in turn as a
#' hold-out set, constructing estimators \eqn{f_{n,k}} and \eqn{f_{n,k,s}} of
#' \eqn{f_0} and \eqn{f_{0,s}}, respectively on the training data and estimator
#' \eqn{P_{n,k}} of \eqn{P_0} using the test data; and finally, computing
#' \deqn{\psi_{n,s} := K^{(-1)}\sum_{k=1}^K \{V(f_{n,k},P_{n,k}) - V(f_{n,k,s}, P_{n,k})\}.}
#'
#' With sample-splitting, cross-fitted VIM estimates are obtained by first
#' splitting the data into \eqn{2K} folds. These folds are further divided
#' into 2 groups of folds. Then, for each fold \eqn{k} in the first group,
#' estimator \eqn{f_{n,k}} of \eqn{f_0} is constructed using all data besides
#' the kth fold in the group (i.e., \eqn{(2K - 1)/(2K)} of the data) and
#' estimator \eqn{P_{n,k}} of \eqn{P_0} is constructed using the held-out data
#' (i.e., \eqn{1/2K} of the data); then, computing
#' \deqn{v_{n,k} = V(f_{n,k},P_{n,k}).}
#' Similarly, for each fold \eqn{k} in the second group,
#' estimator \eqn{f_{n,k,s}} of \eqn{f_{0,s}} is constructed using all data
#' besides the kth fold in the group (i.e., \eqn{(2K - 1)/(2K)} of the data)
#' and estimator \eqn{P_{n,k}} of \eqn{P_0} is constructed using the held-out
#' data (i.e., \eqn{1/2K} of the data); then, computing
#' \deqn{v_{n,k,s} = V(f_{n,k,s},P_{n,k}).}
#' Finally,
#' \deqn{\psi_{n,s} := K^{(-1)}\sum_{k=1}^K \{v_{n,k} - v_{n,k,s}\}.}
#'
#' See the paper by Williamson, Gilbert, Simon, and Carone for more
#' details on the mathematics behind the \code{cv_vim} function, and the
#' validity of the confidence intervals.
#'
#' In the interest of transparency, we return most of the calculations
#' within the \code{vim} object. This results in a list including:
#' \describe{
#' \item{s}{the column(s) to calculate variable importance for}
#' \item{SL.library}{the library of learners passed to \code{SuperLearner}}
#' \item{full_fit}{the fitted values of the chosen method fit to the full data (a list, for train and test data)}
#' \item{red_fit}{the fitted values of the chosen method fit to the reduced data (a list, for train and test data)}
#' \item{est}{the estimated variable importance}
#' \item{naive}{the naive estimator of variable importance}
#' \item{eif}{the estimated efficient influence function}
#' \item{eif_full}{the estimated efficient influence function for the full regression}
#' \item{eif_reduced}{the estimated efficient influence function for the reduced regression}
#' \item{se}{the standard error for the estimated variable importance}
#' \item{ci}{the \eqn{(1-\alpha) \times 100}\% confidence interval for the variable importance estimate}
#' \item{test}{a decision to either reject (TRUE) or not reject (FALSE) the null hypothesis, based on a conservative test}
#' \item{p_value}{a p-value based on the same test as \code{test}}
#' \item{full_mod}{the object returned by the estimation procedure for the full data regression (if applicable)}
#' \item{red_mod}{the object returned by the estimation procedure for the reduced data regression (if applicable)}
#' \item{alpha}{the level, for confidence interval calculation}
#' \item{sample_splitting_folds}{the folds used for hypothesis testing}
#' \item{cross_fitting_folds}{the folds used for cross-fitting}
#' \item{y}{the outcome}
#' \item{ipc_weights}{the weights}
#' \item{cluster_id}{the cluster IDs}
#' \item{mat}{a tibble with the estimate, SE, CI, hypothesis testing decision, and p-value}
#' }
#'
#' @examples
#' n <- 100
#' p <- 2
#' # generate the data
#' x <- data.frame(replicate(p, stats::runif(n, -5, 5)))
#'
#' # apply the function to the x's
#' smooth <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2
#'
#' # generate Y ~ Normal (smooth, 1)
#' y <- as.matrix(smooth + stats::rnorm(n, 0, 1))
#'
#' # set up a library for SuperLearner; note simple library for speed
#' library("SuperLearner")
#' learners <- c("SL.glm")
#'
#' # -----------------------------------------
#' # using Super Learner (with a small number of folds, for illustration only)
#' # -----------------------------------------
#' set.seed(4747)
#' est <- cv_vim(Y = y, X = x, indx = 2, V = 2,
#' type = "r_squared", run_regression = TRUE,
#' SL.library = learners, cvControl = list(V = 2), alpha = 0.05)
#'
#' # ------------------------------------------
#' # doing things by hand, and plugging them in
#' # (with a small number of folds, for illustration only)
#' # ------------------------------------------
#' # set up the folds
#' indx <- 2
#' V <- 2
#' Y <- matrix(y)
#' set.seed(4747)
#' # Note that the CV.SuperLearner should be run with an outer layer
#' # of 2*V folds (for V-fold cross-fitted importance)
#' full_cv_fit <- suppressWarnings(SuperLearner::CV.SuperLearner(
#' Y = Y, X = x, SL.library = learners, cvControl = list(V = 2 * V),
#' innerCvControl = list(list(V = V))
#' ))
#' full_cv_preds <- full_cv_fit$SL.predict
#' # use the same cross-fitting folds for reduced
#' reduced_cv_fit <- suppressWarnings(SuperLearner::CV.SuperLearner(
#' Y = Y, X = x[, -indx, drop = FALSE], SL.library = learners,
#' cvControl = SuperLearner::SuperLearner.CV.control(
#' V = 2 * V, validRows = full_cv_fit$folds
#' ),
#' innerCvControl = list(list(V = V))
#' ))
#' reduced_cv_preds <- reduced_cv_fit$SL.predict
#' # for hypothesis testing
#' cross_fitting_folds <- get_cv_sl_folds(full_cv_fit$folds)
#' set.seed(1234)
#' sample_splitting_folds <- make_folds(unique(cross_fitting_folds), V = 2)
#' set.seed(5678)
#' est <- cv_vim(Y = y, cross_fitted_f1 = full_cv_preds,
#' cross_fitted_f2 = reduced_cv_preds, indx = 2, delta = 0, V = V, type = "r_squared",
#' cross_fitting_folds = cross_fitting_folds,
#' sample_splitting_folds = sample_splitting_folds,
#' run_regression = FALSE, alpha = 0.05, na.rm = TRUE)
#'
#' @seealso \code{\link[SuperLearner]{SuperLearner}} for specific usage of the
#' \code{SuperLearner} function and package.
#' @export
cv_vim <- function(Y = NULL, X = NULL, cross_fitted_f1 = NULL,
cross_fitted_f2 = NULL, f1 = NULL, f2 = NULL, indx = 1,
V = ifelse(is.null(cross_fitting_folds), 5, length(unique(cross_fitting_folds))),
sample_splitting = TRUE, final_point_estimate = "split",
sample_splitting_folds = NULL, cross_fitting_folds = NULL,
stratified = FALSE, type = "r_squared",
run_regression = TRUE,
SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"),
alpha = 0.05, delta = 0, scale = "identity",
na.rm = FALSE, C = rep(1, length(Y)), Z = NULL,
ipc_scale = "identity", ipc_weights = rep(1, length(Y)),
ipc_est_type = "aipw", scale_est = TRUE,
nuisance_estimators_full = NULL,
nuisance_estimators_reduced = NULL, exposure_name = NULL,
cross_fitted_se = TRUE, bootstrap = FALSE, b = 1000,
boot_interval_type = "perc", clustered = FALSE,
cluster_id = rep(NA, length(Y)), ...) {
# check to see if f1 and f2 are missing
# if the data is missing, stop and throw an error
check_inputs(Y, X, cross_fitted_f1, cross_fitted_f2, indx)
if (bootstrap & clustered & sum(is.na(cluster_id)) > 0){
stop(paste0("If using clustered bootstrap, cluster IDs must be provided",
" for all observations."))
}
if (sample_splitting) {
ss_V <- V * 2
} else {
ss_V <- V
}
# check to see if Y is a matrix or data.frame; if not, make it one
# (just for ease of reading)
if (is.null(dim(Y))) {
Y <- as.matrix(Y)
}
# set up internal data -- based on complete cases only
cc_lst <- create_z(Y, C, Z, X, ipc_weights)
Y_cc <- cc_lst$Y
X_cc <- X[C == 1, , drop = FALSE]
if (is.null(exposure_name)) {
A_cc <- rep(1, length(Y_cc))
} else {
A_cc <- X_cc[, exposure_name]
}
X_cc <- X_cc[, !(names(X_cc) %in% exposure_name), drop = FALSE]
weights_cc <- cc_lst$weights
Z_in <- cc_lst$Z
# get the correct measure function; if not one of the supported ones, say so
full_type <- get_full_type(type)
# get sample-splitting folds (if null and/or run_regression = TRUE)
if (is.null(sample_splitting_folds) | run_regression) {
if (is.null(cross_fitting_folds) & !run_regression) {
stop(paste0("You must specify the folds used for cross-fitting if ",
"run_regression = FALSE."))
}
if (run_regression) {
# set up the cross-fitting folds
cross_fitting_folds <- make_folds(
Y, V = ss_V, C = C, stratified = stratified
)
}
if (sample_splitting) {
# create sample splitting folds; equal number in each
sample_splitting_folds <- make_folds(
unique(cross_fitting_folds), V = 2
)
} else {
sample_splitting_folds <- rep(1, ss_V)
}
}
cross_fitting_folds_cc <- cross_fitting_folds[C == 1]
# if we need to run the regression, fit Super Learner with the given library
if (run_regression) {
full_feature_vec <- 1:ncol(X_cc)
full_sl_lst <- run_sl(Y = Y_cc, X = X_cc, V = ss_V,
SL.library = SL.library, s = full_feature_vec,
sample_splitting = sample_splitting,
cv_folds = cross_fitting_folds_cc,
ss_folds = sample_splitting_folds, split = 1,
verbose = FALSE, weights = weights_cc,
cross_fitted_se = cross_fitted_se,
vector = TRUE, ...)
red_split <- switch((sample_splitting) + 1, 1, 2)
red_Y <- Y_cc
if (full_type == "r_squared" || full_type == "anova") {
if (sample_splitting) {
non_ss_folds <- rep(2, nrow(Y_cc))
full_sl_lst_2 <- run_sl(Y = Y_cc, X = X_cc, V = 1,
SL.library = SL.library, s = full_feature_vec,
sample_splitting = FALSE,
ss_folds = non_ss_folds, split = 2,
verbose = FALSE, weights = weights_cc,
cross_fitted_se = FALSE,
vector = TRUE, ...)
red_Y <- matrix(full_sl_lst_2$preds, ncol = 1)
} else {
full_sl_lst_2 <- run_sl(Y = Y_cc, X = X_cc, V = 1,
SL.library = SL.library, s = full_feature_vec,
sample_splitting = FALSE,
ss_folds = rep(2, nrow(Y_cc)), split = 2,
cv_folds = cross_fitting_folds_cc,
verbose = FALSE, weights = weights_cc,
cross_fitted_se = FALSE,
vector = TRUE, ...)
red_Y <- matrix(full_sl_lst_2$preds, ncol = 1)
}
if (length(unique(red_Y)) == 1) {
red_Y <- Y_cc
}
}
redu_sl_lst <- run_sl(Y = red_Y, X = X_cc, V = ss_V,
SL.library = SL.library, s = full_feature_vec[-indx],
sample_splitting = sample_splitting,
cv_folds = cross_fitting_folds_cc,
ss_folds = sample_splitting_folds, split = red_split,
verbose = FALSE, weights = weights_cc,
cross_fitted_se = cross_fitted_se,
vector = TRUE, ...)
full <- full_sl_lst$fit
reduced <- redu_sl_lst$fit
full_preds <- full_sl_lst$preds
redu_preds <- redu_sl_lst$preds
non_cf_full_preds <- full_sl_lst$preds_non_cf_se
non_cf_redu_preds <- redu_sl_lst$preds_non_cf_se
} else { # otherwise they are fitted values
# check to make sure that the fitted values, folds are what we expect
check_fitted_values(Y = Y, cross_fitted_f1 = cross_fitted_f1,
cross_fitted_f2 = cross_fitted_f2, f1 = f1, f2 = f2,
sample_splitting_folds = sample_splitting_folds,
cross_fitting_folds = cross_fitting_folds,
cross_fitted_se = cross_fitted_se, V = V,
ss_V = ss_V, cv = TRUE)
# if cross_fitted_f1 and/or cross_fitted_f2 aren't vectors, make them vectors
if (!is.numeric(cross_fitted_f1)) {
cross_fitted_f1 <- extract_sampled_split_predictions(
preds = cross_fitted_f1, sample_splitting = sample_splitting,
sample_splitting_folds = switch((sample_splitting) + 1,
rep(1, V), sample_splitting_folds),
cross_fitting_folds = cross_fitting_folds,
full = TRUE, vector = TRUE
)
}
if (!is.numeric(cross_fitted_f2)) {
cross_fitted_f2 <- extract_sampled_split_predictions(
preds = cross_fitted_f2, sample_splitting = sample_splitting,
sample_splitting_folds = switch((sample_splitting) + 1,
rep(2, V), sample_splitting_folds),
cross_fitting_folds = cross_fitting_folds,
full = FALSE, vector = TRUE
)
}
# set up the fitted value objects (both are vectors!)
full_preds <- cross_fitted_f1
redu_preds <- cross_fitted_f2
# non-cross-fitted fits (only used if cross_fitted_se = FALSE)
non_cf_full_preds <- f1
non_cf_redu_preds <- f2
full <- reduced <- NA
cross_fitting_folds_cc <- cross_fitting_folds[C == 1]
}
arg_lst <- list(...)
# set method and family to compatible with continuous values, for EIF estimation
arg_lst <- process_arg_lst(arg_lst)
eifs_lst <- NA
# calculate the estimators, EIFs
if (full_type == "anova") {
# no sample-splitting, since no hypothesis testing
est_lst <- lapply(as.list(seq_len(V)),
function(v)
do.call(
measure_anova,
args = c(
list(full = full_preds[[v]],
reduced = redu_preds[[v]],
y = Y_cc[cross_fitting_folds_cc == v],
full_y = Y_cc,
C = C[cross_fitting_folds == v],
Z = Z_in[cross_fitting_folds == v, ,
drop = FALSE],
folds_Z = cross_fitting_folds,
ipc_weights = ipc_weights[cross_fitting_folds == v],
ipc_fit_type = "SL", na.rm = na.rm,
ipc_est_type = ipc_est_type, scale = ipc_scale,
SL.library = SL.library),
arg_lst
)
)
)
point_ests <- unlist(lapply(est_lst, function(x) x$point_est))
naives <- unlist(lapply(est_lst, function(x) x$naive))
est <- mean(point_ests)
naive <- mean(naives)
if (cross_fitted_se) {
eifs_full <- NA
eifs_redu <- NA
eifs_lst <- unlist(lapply(est_lst, function(x) x$eif))
se <- sqrt(mean(unlist(lapply(eifs_lst, function(x) t(x) %*% x / length(x)))) / nrow(Y_cc))
} else {
eif <- measure_anova(
full = non_cf_full_preds, reduced = non_cf_redu_preds,
y = Y_cc, full_y = Y_cc,
C = C, Z = Z_in,
ipc_weights = ipc_weights,
ipc_fit_type = "SL", na.rm = na.rm,
SL.library = SL.library, arg_lst
)$eif
se <- sqrt(mean(eif^2) / nrow(Y_cc))
}
predictiveness_full <- NA
predictiveness_redu <- NA
eif_full <- rep(NA, sum(cross_fitting_folds_cc == 1))
eif_redu <- rep(NA, sum(cross_fitting_folds_cc == 2))
se_full <- NA
se_redu <- NA
if (bootstrap) {
boot_results <- bootstrap_se(Y = Y_cc, f1 = non_cf_full_preds,
f2 = non_cf_redu_preds, type = full_type,
b = b, clustered = clustered,
cluster_id = cluster_id)
se <- boot_results$se
}
} else {
if (sample_splitting) {
# make new sets of folds, as if we had done V-fold within the two sets
k_fold_lst <- make_kfold(cross_fitting_folds, sample_splitting_folds, C)
full_test <- (k_fold_lst$sample_splitting_folds == 1)
redu_test <- (k_fold_lst$sample_splitting_folds == 2)
} else {
# no need to do anything
k_fold_lst <- list(
full = cross_fitting_folds, reduced = cross_fitting_folds
)
full_test <- rep(TRUE, length(cross_fitting_folds))
redu_test <- rep(TRUE, length(cross_fitting_folds))
}
cf_folds_full <- k_fold_lst$full
cf_folds_redu <- k_fold_lst$reduced
cf_folds_full_cc <- cf_folds_full[C[full_test] == 1]
cf_folds_redu_cc <- cf_folds_redu[C[redu_test] == 1]
full_test_cc <- full_test[C == 1]
redu_test_cc <- redu_test[C == 1]
predictiveness_full_object <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc[full_test_cc], full_y = Y_cc,
a = A_cc[full_test_cc], fitted_values = full_preds[full_test_cc],
cross_fitting_folds = cf_folds_full_cc, C = C[full_test],
Z = Z_in[full_test, , drop = FALSE],
folds_Z = cf_folds_full, ipc_weights = ipc_weights[full_test],
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library, nuisance_estimators = lapply(
nuisance_estimators_full, function(l) {
l[full_test_cc]
}
)), arg_lst
))
predictiveness_reduced_object <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc[redu_test_cc], full_y = Y_cc,
a = A_cc[redu_test_cc], fitted_values = redu_preds[redu_test_cc],
cross_fitting_folds = cf_folds_redu_cc, C = C[redu_test],
Z = Z_in[redu_test, , drop = FALSE],
folds_Z = cf_folds_redu, ipc_weights = ipc_weights[redu_test],
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library, nuisance_estimators = lapply(
nuisance_estimators_reduced, function(l) {
l[redu_test_cc]
}
)), arg_lst
))
predictiveness_full_lst <- estimate(predictiveness_full_object)
predictiveness_redu_lst <- estimate(predictiveness_reduced_object)
eifs_full <- predictiveness_full_lst$all_eifs
eifs_redu <- predictiveness_redu_lst$all_eifs
# use non-cross-fitted SE if requested
if (!cross_fitted_se) {
eif_full_lst <- do.call(
est_predictiveness,
args = c(list(fitted_values = non_cf_full_preds[full_test_cc],
y = Y_cc[full_test_cc], full_y = Y_cc, folds = cf_folds_full_cc,
type = full_type, C = C, Z = Z_in, folds_Z = cf_folds_full,
ipc_weights = ipc_weights,
ipc_fit_type = "SL", scale = ipc_scale,
ipc_est_type = ipc_est_type, na.rm = na.rm,
SL.library = SL.library),
arg_lst), quote = TRUE
)
eif_full <- eif_full_lst$eif
eif_redu_lst <- do.call(
est_predictiveness,
args = c(list(fitted_values = non_cf_redu_preds[full_test_cc],
y = Y_cc[redu_test_cc], full_y = Y_cc, folds = cf_folds_redu_cc,
type = full_type, C = C, Z = Z_in, folds_Z = cf_folds_redu,
ipc_weights = ipc_weights,
ipc_fit_type = "SL", scale = ipc_scale,
ipc_est_type = ipc_est_type, na.rm = na.rm,
SL.library = SL.library),
arg_lst), quote = TRUE
)
eif_redu <- eif_redu_lst$eif
var_full <- mean(eif_full ^ 2)
var_redu <- mean(eif_redu ^ 2)
} else {
eif_full <- predictiveness_full_lst$eif
eif_redu <- predictiveness_redu_lst$eif
var_full <- mean(unlist(lapply(as.list(seq_len(V)), function(k) {
mean(eifs_full[[k]] ^ 2)
})))
var_redu <- mean(unlist(lapply(as.list(seq_len(V)), function(k) {
mean(eifs_redu[[k]] ^ 2)
})))
}
predictiveness_full <- predictiveness_full_lst$point_est
predictiveness_redu <- predictiveness_redu_lst$point_est
est <- predictiveness_full - predictiveness_redu
naive <- NA
se_full <- sqrt(var_full / sum(full_test_cc))
se_redu <- sqrt(var_redu / sum(redu_test_cc))
if (bootstrap & !sample_splitting & !cross_fitted_se) {
boot_results <- bootstrap_se(Y = Y_cc, f1 = non_cf_full_preds,
f2 = non_cf_redu_preds, type = full_type,
b = b, boot_interval_type = boot_interval_type,
alpha = alpha, clustered = clustered,
cluster_id = cluster_id)
se <- boot_results$se
se_full <- boot_results$se_full
se_redu <- boot_results$se_reduced
} else {
if (bootstrap) {
warning(paste0("Bootstrap-based standard error estimates are currently",
" only available if sample_splitting = FALSE. Returning",
" standard error estimates based on the efficient",
" influence function instead."))
}
if (!cross_fitted_se) {
se <- vimp_se(eif_full = eif_full, eif_reduced = eif_redu,
cross_fit = FALSE, sample_split = sample_splitting,
na.rm = na.rm)
} else {
se <- vimp_se(eif_full = eifs_full, eif_reduced = eifs_redu,
cross_fit = TRUE, sample_split = sample_splitting,
na.rm = na.rm)
}
}
}
est_for_inference <- est
predictiveness_full_for_inference <- predictiveness_full
predictiveness_reduced_for_inference <- predictiveness_redu
# if sample-splitting was requested and final_point_estimate isn't "split", estimate
# the required quantities
if (sample_splitting & (final_point_estimate != "split")) {
k_fold_lst_for_est <- list(
full = cross_fitting_folds, reduced = cross_fitting_folds
)
cf_folds_for_est <- k_fold_lst_for_est$full
cf_folds_for_est_cc <- k_fold_lst_for_est$full[C == 1]
if (final_point_estimate == "full") {
est_pred_full <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc, full_y = Y_cc,
a = A_cc, fitted_values = full_preds,
cross_fitting_folds = cf_folds_for_est_cc, C = C,
Z = Z_in, folds_Z = cf_folds_for_est, ipc_weights = ipc_weights,
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library,
nuisance_estimators = nuisance_estimators_full), arg_lst
))
est_pred_reduced <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc, full_y = Y_cc,
a = A_cc, fitted_values = redu_preds,
cross_fitting_folds = cf_folds_for_est_cc, C = C,
Z = Z_in, folds_Z = cf_folds_for_est, ipc_weights = ipc_weights,
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library,
nuisance_estimators = nuisance_estimators_reduced), arg_lst
))
est_pred_full_lst <- estimate(est_pred_full)
est_pred_reduced_lst <- estimate(est_pred_reduced)
predictiveness_full <- est_pred_full_lst$point_est
predictiveness_redu <- est_pred_reduced_lst$point_est
est <- predictiveness_full - predictiveness_redu
} else {
# swap the roles of the folds
full_test_for_est <- (k_fold_lst$sample_splitting_folds == 2)
redu_test_for_est <- (k_fold_lst$sample_splitting_folds == 1)
cf_folds_full_for_est <- k_fold_lst$reduced
cf_folds_redu_for_est <- k_fold_lst$full
cf_folds_full_cc_for_est <- cf_folds_full_for_est[C[full_test_for_est] == 1]
cf_folds_redu_cc_for_est <- cf_folds_redu_for_est[C[redu_test_for_est] == 1]
full_test_cc_for_est <- full_test_for_est[C == 1]
redu_test_cc_for_est <- redu_test_for_est[C == 1]
est_pred_full <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc[full_test_cc_for_est], full_y = Y_cc,
a = A_cc[full_test_cc_for_est], fitted_values = full_preds[full_test_cc_for_est],
cross_fitting_folds = cf_folds_full_cc_for_est, C = C[full_test_for_est],
Z = Z_in[full_test_for_est, , drop = FALSE],
folds_Z = cf_folds_full_for_est, ipc_weights = ipc_weights[full_test_for_est],
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library, nuisance_estimators = lapply(
nuisance_estimators_full, function(l) {
l[full_test_cc_for_est]
}
)), arg_lst
))
est_pred_reduced <- do.call(predictiveness_measure, c(
list(type = full_type, y = Y_cc[redu_test_cc_for_est], full_y = Y_cc,
a = A_cc[redu_test_cc_for_est], fitted_values = redu_preds[redu_test_cc_for_est],
cross_fitting_folds = cf_folds_redu_cc_for_est, C = C[redu_test_for_est],
Z = Z_in[redu_test_for_est, , drop = FALSE],
folds_Z = cf_folds_redu_for_est, ipc_weights = ipc_weights[redu_test_for_est],
ipc_fit_type = "SL", scale = ipc_scale, ipc_est_type = ipc_est_type,
na.rm = na.rm, SL.library = SL.library, nuisance_estimators = lapply(
nuisance_estimators_reduced, function(l) {
l[redu_test_cc_for_est]
}
)), arg_lst
))
est_pred_full_lst <- estimate(est_pred_full)
est_pred_reduced_lst <- estimate(est_pred_reduced)
# compute the point estimates of predictiveness and variable importance
predictiveness_full <- mean(c(predictiveness_full, est_pred_full_lst$point_est))
predictiveness_redu <- mean(c(predictiveness_redu, est_pred_reduced_lst$point_est))
est <- predictiveness_full - predictiveness_redu
}
}
# if est < 0, set to zero and print warning
if (est < 0 && !is.na(est) & scale_est) {
est <- 0
warning("Original estimate < 0; returning zero.")
} else if (is.na(est)) {
warning("Original estimate NA; consider using a different library of learners.")
}
# calculate the confidence interval
ci <- vimp_ci(est_for_inference, se, scale = scale, level = 1 - alpha)
if (bootstrap) {
ci <- boot_results$ci
}
predictiveness_ci_full <- vimp_ci(
predictiveness_full_for_inference, se = se_full, scale = scale, level = 1 - alpha
)
predictiveness_ci_redu <- vimp_ci(
predictiveness_reduced_for_inference, se = se_redu, scale = scale, level = 1 - alpha
)
# perform a hypothesis test against the null of zero importance
if (full_type == "anova" || full_type == "regression") {
hyp_test <- list(test = NA, p_value = NA, test_statistics = NA)
} else {
hyp_test <- vimp_hypothesis_test(
predictiveness_full = predictiveness_full_for_inference,
predictiveness_reduced = predictiveness_reduced_for_inference,
se = se, delta = delta, alpha = alpha
)
}
# create the output and return it (as a tibble)
chr_indx <- paste(as.character(indx), collapse = ",")
mat <- tibble::tibble(
s = chr_indx, est = est, se = se[1], cil = ci[1], ciu = ci[2],
test = hyp_test$test, p_value = hyp_test$p_value
)
if (full_type == "anova") {
if (cross_fitted_se) {
final_eif <- eifs_lst
} else {
final_eif <- eif
}
} else {
if (length(eif_full) != length(eif_redu)) {
max_len <- max(c(length(eif_full), length(eif_redu)))
eif_full <- c(eif_full, rep(NA, max_len - length(eif_full)))
eif_redu <- c(eif_redu, rep(NA, max_len - length(eif_redu)))
}
final_eif <- eif_full - eif_redu
}
output <- list(s = chr_indx,
SL.library = SL.library,
full_fit = full_preds, red_fit = redu_preds,
est = est,
naive = naive,
eif = final_eif,
eif_full = eif_full, eif_redu = eif_redu,
all_eifs_full = eifs_full, all_eifs_redu = eifs_redu,
se = se, ci = ci,
est_for_inference = est_for_inference,
predictiveness_full = predictiveness_full,
predictiveness_reduced = predictiveness_redu,
predictiveness_full_for_inference = predictiveness_full_for_inference,
predictiveness_reduced_for_inference = predictiveness_reduced_for_inference,
predictiveness_ci_full = predictiveness_ci_full,
predictiveness_ci_reduced = predictiveness_ci_redu,
se_full = se_full, se_reduced = se_redu,
test = hyp_test$test,
p_value = hyp_test$p_value,
test_statistic = hyp_test$test_statistic,
full_mod = full,
red_mod = reduced,
alpha = alpha,
delta = delta,
sample_splitting_folds = sample_splitting_folds,
cross_fitting_folds = cross_fitting_folds,
y = Y,
ipc_weights = ipc_weights,
ipc_scale = ipc_scale,
scale = scale,
cluster_id = cluster_id,
mat = mat)
# make it also a vim and vim_type object
tmp.cls <- class(output)
class(output) <- c("vim", full_type, tmp.cls)
return(output)
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/cv_vim.R
|
#' Neutralization sensitivity of HIV viruses to antibody VRC01
#'
#' A dataset containing neutralization sensitivity -- measured using inhibitory
#' concentration, the quantity of antibody necessary to neutralize a fraction of
#' viruses in a given sample -- and viral features including:
#' amino acid sequence features (measured using HXB2 coordinates), geographic region of origin,
#' subtype, and viral geometry. Accessed from the Los Alamos National Laboratory's (LANL's)
#' Compile, Analyze, and tally Neutralizing Antibody Panels (CATNAP) database.
#'
#' @format A data frame with 611 rows and 837variables:
#' \describe{
#' \item{seqname}{Viral sequence identifiers}
#' \item{subtype.is.01_AE}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.02_AG}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.07_BC}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.A1}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.A1C}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.A1D}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.B}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.C}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.D}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.O}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{subtype.is.Other}{Dummy variables encoding the viral subtype as 0/1.
#' Possible subtypes are 01_AE, 02_AG, 07_BC, A1, A1C, A1D, B, C, D, O, Other.}
#' \item{geographic.region.of.origin.is.Asia}{Dummy variables encoding the
#' geographic region of origin as 0/1. Regions are Asia, Europe/Americas,
#' North Africa, and Southern Africa.}
#' \item{geographic.region.of.origin.is.Europe.Americas}{Dummy variables encoding the
#' geographic region of origin as 0/1. Regions are Asia, Europe/Americas,
#' North Africa, and Southern Africa.}
#' \item{geographic.region.of.origin.is.N.Africa}{Dummy variables encoding the
#' geographic region of origin as 0/1. Regions are Asia, Europe/Americas,
#' North Africa, and Southern Africa.}
#' \item{geographic.region.of.origin.is.S.Africa}{Dummy variables encoding the
#' geographic region of origin as 0/1. Regions are Asia, Europe/Americas,
#' North Africa, and Southern Africa.}
#' \item{ic50.censored}{A binary indicator of whether or not the IC-50 (the
#' concentration at which 50% of viruses are neutralized) was right-censored.
#' Right-censoring is a proxy for a resistant virus.}
#' \item{ic80.censored}{A binary indicator of whether or not the IC-80 (the
#' concentration at which 80% of viruses are neutralized) was right-censored.
#' Right-censoring is a proxy for a resistant virus.}
#' \item{ic50.geometric.mean.imputed}{Continuous IC-50. If neutralization
#' sensitivity for the virus was assessed in multiple studies, the geometric mean
#' was taken.}
#' \item{ic80.geometric.mean.imputed}{Continuous IC-90. If neutralization
#' sensitivity for the virus was assessed in multiple studies, the geometric mean
#' was taken.}
#' \item{hxb2.46.E.1mer}{Amino acid sequence features denoting the presence (1) or absence (0)
#' of a residue at the given HXB2-referenced site. For example, \code{hxb2.46.E.1mer}
#' records the presence of an E at HXB2-referenced site 46.}
#' \item{hxb2.46.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.46.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.46.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.46.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.61.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.61.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.61.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.61.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.97.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.97.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.97.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.97.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.124.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.124.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.125.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.125.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.127.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.127.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.C.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.C.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.179.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.181.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.181.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.181.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.181.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.190.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.197.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.197.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.197.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.198.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.198.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.198.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.198.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.241.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.241.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.241.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.241.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.276.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.276.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.276.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.276.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.279.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.279.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.279.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.279.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.279.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.280.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.280.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.280.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.280.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.281.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.282.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.283.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.283.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.283.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.283.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.290.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.321.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.328.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.365.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.369.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.371.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.371.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.371.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.371.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.374.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.374.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.374.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.389.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.W.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.C.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.W.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.W.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.C.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.415.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.425.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.425.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.426.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.426.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.426.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.426.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.426.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.428.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.428.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.428.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.429.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.430.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.430.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.430.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.430.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.430.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.431.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.431.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.432.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.432.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.432.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.432.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.455.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.W.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.456.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.457.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.458.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.458.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.458.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.458.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.459.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.gap.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.P.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.466.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.467.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.467.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.467.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.469.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.A.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.471.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.474.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.474.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.474.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.475.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.475.M.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.476.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.476.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.477.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.477.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.544.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.544.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.569.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.569.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.569.X.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.589.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.589.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.E.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.655.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.668.D.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.668.G.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.668.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.668.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.668.T.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.675.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.675.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.H.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.N.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.677.S.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.680.W.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.681.Y.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.683.K.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.683.Q.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.683.R.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.688.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.688.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.702.F.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.702.I.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.702.L.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.702.V.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.29.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.49.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.59.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.88.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.130.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.132.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.133.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.134.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.135.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.136.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.137.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.138.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.139.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.140.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.141.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.142.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.143.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.144.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.145.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.146.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.147.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.148.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.149.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.150.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.156.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.160.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.171.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.185.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.186.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.187.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.188.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.197.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.229.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.230.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.232.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.234.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.241.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.268.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.276.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.278.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.289.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.293.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.295.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.301.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.302.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.324.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.332.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.334.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.337.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.339.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.343.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.344.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.350.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.354.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.355.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.356.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.358.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.360.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.362.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.363.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.386.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.392.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.393.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.394.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.395.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.396.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.397.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.398.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.399.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.400.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.401.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.402.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.403.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.404.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.405.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.406.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.407.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.408.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.409.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.410.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.411.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.412.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.413.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.442.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.444.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.446.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.448.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.460.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.461.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.462.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.463.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.465.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.611.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.616.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.618.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.619.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.624.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.625.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.637.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.674.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.743.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.750.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.787.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.816.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{hxb2.824.sequon_actual.1mer}{Amino acid sequence feature denoting the presence (1) or absence (0) of a residue at the given HXB2-referenced site.}
#' \item{sequons.total.env}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.gp120}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.v5}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.loop.d}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.loop.e}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.vrc01}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.cd4}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.sj.fence}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{sequons.total.sj.trimer}{The total number of sequons in various areas of the
#' HIV viral envelope protein.}
#' \item{cysteines.total.env}{The number of cysteines in various areas of the HIV
#' viral envelope protein.}
#' \item{cysteines.total.gp120}{The number of cysteines in various areas of the HIV
#' viral envelope protein.}
#' \item{cysteines.total.v5}{The number of cysteines in various areas of the HIV
#' viral envelope protein.}
#' \item{cysteines.total.vrc01}{The number of cysteines in various areas of the HIV
#' viral envelope protein.}
#' \item{length.env}{The length of various areas of the HIV viral envelope protein.}
#' \item{length.gp120}{The length of various areas of the HIV viral envelope protein.}
#' \item{length.v5}{The length of various areas of the HIV viral envelope protein.}
#' \item{length.v5.outliers}{The length of various areas of the HIV viral envelope protein.}
#' \item{length.loop.e}{The length of various areas of the HIV viral envelope protein.}
#' \item{length.loop.e.outliers}{The length of various areas of the HIV viral envelope protein.}
#' \item{taylor.small.total.v5}{The steric bulk of residues at critical locations.}
#' \item{taylor.small.total.loop.d}{The steric bulk of residues at critical locations.}
#' \item{taylor.small.total.cd4}{The steric bulk of residues at critical locations.}
#' }
#' @source \url{https://github.com/benkeser/vrc01/blob/master/data/fulldata.csv}
#' @usage data("vrc01")
"vrc01"
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/data.R
|
#' Estimate a nonparametric predictiveness functional
#'
#' Compute nonparametric estimates of the chosen measure of predictiveness.
#'
#' @param fitted_values fitted values from a regression function using the
#' observed data.
#' @param y the observed outcome.
#' @param a the observed treatment assignment (may be within a specified fold,
#' for cross-fitted estimates). Only used if \code{type = "average_value"}.
#' @param full_y the observed outcome (from the entire dataset, for
#' cross-fitted estimates).
#' @param type which parameter are you estimating (defaults to \code{r_squared},
#' for R-squared-based variable importance)?
#' @param C the indicator of coarsening (1 denotes observed, 0 denotes
#' unobserved).
#' @param Z either \code{NULL} (if no coarsening) or a matrix-like object
#' containing the fully observed data.
#' @param ipc_weights weights for inverse probability of coarsening (e.g.,
#' inverse weights from a two-phase sample) weighted estimation. Assumed to
#' be already inverted (i.e., ipc_weights = 1 / [estimated probability weights]).
#' @param ipc_fit_type if "external", then use \code{ipc_eif_preds}; if "SL",
#' fit a SuperLearner to determine the correction to the efficient influence
#' function.
#' @param ipc_eif_preds if \code{ipc_fit_type = "external"}, the fitted values
#' from a regression of the full-data EIF on the fully observed
#' covariates/outcome; otherwise, not used.
#' @param ipc_est_type IPC correction, either \code{"ipw"} (for classical
#' inverse probability weighting) or \code{"aipw"} (for augmented inverse
#' probability weighting; the default).
#' @param scale if doing an IPC correction, then the scale that the correction
#' should be computed on (e.g., "identity"; or "logit" to logit-transform,
#' apply the correction, and back-transform).
#' @param na.rm logical; should NA's be removed in computation?
#' (defaults to \code{FALSE})
#' @param nuisance_estimators (only used if \code{type = "average_value"})
#' a list of nuisance function estimators on the
#' observed data (may be within a specified fold, for cross-fitted estimates).
#' Specifically: an estimator of the optimal treatment rule; an estimator of the
#' propensity score under the estimated optimal treatment rule; and an estimator
#' of the outcome regression when treatment is assigned according to the estimated optimal rule.
#' @param ... other arguments to SuperLearner, if \code{ipc_fit_type = "SL"}.
#'
#' @return A list, with: the estimated predictiveness; the estimated efficient
#' influence function; and the predictions of the EIF based on inverse
#' probability of censoring.
#'
#' @details See the paper by Williamson, Gilbert, Simon, and Carone for more
#' details on the mathematics behind this function and the definition of the
#' parameter of interest.
#' @export
est_predictiveness <- function(fitted_values, y, a = NULL, full_y = NULL,
type = "r_squared",
C = rep(1, length(y)), Z = NULL,
ipc_weights = rep(1, length(C)),
ipc_fit_type = "external",
ipc_eif_preds = rep(1, length(C)),
ipc_est_type = "aipw", scale = "identity",
na.rm = FALSE, nuisance_estimators = NULL, ...) {
# get the correct measure function; if not one of the supported ones, say so
types <- c("accuracy", "auc", "deviance", "r_squared", "anova", "mse",
"cross_entropy", "average_value")
full_type <- types[pmatch(type, types)]
if (is.na(full_type)) stop(
paste0("We currently do not support the entered variable importance ",
"parameter.")
)
measure_funcs <- c(measure_accuracy, measure_auc, measure_deviance,
measure_r_squared, NA, measure_mse,
measure_cross_entropy, measure_average_value)
measure_func <- measure_funcs[pmatch(type, types)]
# compute plug-in point estimate, EIF, inverse-weighted EIF predictions
if (!is.na(measure_func)) {
est_lst <- measure_func[[1]](
fitted_values = fitted_values, y = y, full_y = full_y, C = C, Z = Z,
ipc_weights = ipc_weights, ipc_fit_type = ipc_fit_type,
ipc_eif_preds = ipc_eif_preds, ipc_est_type = ipc_est_type,
scale = scale, na.rm = na.rm, nuisance_estimators = nuisance_estimators,
a = a, ...
)
} else { # if type is anova, no plug-in from predictiveness
est_lst <- list(point_est = NA, ic = NA,
ipc_eif_preds = rep(NA, length(y)))
}
# return it
return(list(point_est = est_lst$point_est, eif = est_lst$eif,
ipc_eif_preds = est_lst$ipc_eif_preds))
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/est_predictiveness.R
|
#' Estimate a nonparametric predictiveness functional using cross-fitting
#'
#' Compute nonparametric estimates of the chosen measure of predictiveness.
#'
#' @param fitted_values fitted values from a regression function using the
#' observed data; a list of length V, where each object is a set of
#' predictions on the validation data, or a vector of the same length as \code{y}.
#' @param y the observed outcome.
#' @param full_y the observed outcome (from the entire dataset, for
#' cross-fitted estimates).
#' @param folds the cross-validation folds for the observed data.
#' @param type which parameter are you estimating (defaults to \code{r_squared},
#' for R-squared-based variable importance)?
#' @param C the indicator of coarsening (1 denotes observed, 0 denotes
#' unobserved).
#' @param Z either \code{NULL} (if no coarsening) or a matrix-like object
#' containing the fully observed data.
#' @param folds_Z either the cross-validation folds for the observed data
#' (no coarsening) or a vector of folds for the fully observed data Z.
#' @param ipc_weights weights for inverse probability of coarsening (e.g.,
#' inverse weights from a two-phase sample) weighted estimation. Assumed to be
#' already inverted (i.e., ipc_weights = 1 / [estimated probability weights]).
#' @param ipc_fit_type if "external", then use \code{ipc_eif_preds}; if "SL",
#' fit a SuperLearner to determine the correction to the efficient
#' influence function.
#' @param ipc_eif_preds if \code{ipc_fit_type = "external"}, the fitted values
#' from a regression of the full-data EIF on the fully observed
#' covariates/outcome; otherwise, not used.
#' @param ipc_est_type IPC correction, either \code{"ipw"} (for classical
#' inverse probability weighting) or \code{"aipw"} (for augmented inverse
#' probability weighting; the default).
#' @param scale if doing an IPC correction, then the scale that the correction
#' should be computed on (e.g., "identity"; or "logit" to logit-transform,
#' apply the correction, and back-transform).
#' @param na.rm logical; should NA's be removed in computation?
#' (defaults to \code{FALSE})
#' @param ... other arguments to SuperLearner, if \code{ipc_fit_type = "SL"}.
#'
#' @return The estimated measure of predictiveness.
#'
#' @details See the paper by Williamson, Gilbert, Simon, and Carone for more
#' details on the mathematics behind this function and the definition of the
#' parameter of interest. If sample-splitting is also requested
#' (recommended, since in this case inferences
#' will be valid even if the variable has zero true importance), then the
#' prediction functions are trained as if \eqn{2K}-fold cross-validation were run,
#' but are evaluated on only \eqn{K} sets (independent between the full and
#' reduced nuisance regression).
#' @export
est_predictiveness_cv <- function(fitted_values, y, full_y = NULL,
folds,
type = "r_squared",
C = rep(1, length(y)), Z = NULL,
folds_Z = folds,
ipc_weights = rep(1, length(C)),
ipc_fit_type = "external",
ipc_eif_preds = rep(1, length(C)),
ipc_est_type = "aipw", scale = "identity",
na.rm = FALSE, ...) {
# get the correct measure function; if not one of the supported ones, say so
types <- c("accuracy", "auc", "deviance", "r_squared", "anova", "mse",
"cross_entropy")
full_type <- types[pmatch(type, types)]
if (is.na(full_type)) stop(
paste0("We currently do not support the entered variable importance ",
"parameter.")
)
measure_funcs <- c(measure_accuracy, measure_auc, measure_cross_entropy,
measure_mse, NA, measure_mse, measure_cross_entropy)
measure_func <- measure_funcs[pmatch(type, types)]
if (is.list(fitted_values)) {
fitted_values_vector <- vector("numeric", length = length(y))
for (v in seq_len(length(unique(folds)))) {
fitted_values_vector[folds == v] <- fitted_values[[v]]
}
fitted_values <- fitted_values_vector
}
# compute point estimate, EIF
if (!is.na(measure_func)) {
V <- length(unique(folds))
max_nrow <- max(sapply(1:V, function(v) length(C[folds_Z == v])))
ics <- vector("list", length = V)
ic <- vector("numeric", length = length(C))
measures <- vector("list", V)
for (v in seq_len(V)) {
measures[[v]] <- measure_func[[1]](
fitted_values = fitted_values[folds == v], y = y[folds == v],
full_y = full_y,
C = C[folds_Z == v], Z = Z[folds_Z == v, , drop = FALSE],
ipc_weights = ipc_weights[folds_Z == v],
ipc_fit_type = ipc_fit_type,
ipc_eif_preds = ipc_eif_preds[folds_Z == v],
ipc_est_type = ipc_est_type, scale = scale, na.rm = na.rm, ...
)
ics[[v]] <- measures[[v]]$eif
ic[folds_Z == v] <- measures[[v]]$eif
}
point_ests <- sapply(1:V, function(v) measures[[v]]$point_est)
point_est <- mean(point_ests)
ipc_eif_preds <- sapply(
1:V, function(v) measures[[v]]$ipc_eif_preds, simplify = FALSE
)
} else { # if type is anova, no plug-in from predictiveness
point_est <- point_ests <- NA
ic <- rep(NA, length(y))
}
# check whether or not we need to do ipc weighting -- if y is fully observed,
# i.e., is part of Z, then we don't
do_ipcw <- as.numeric(!all(ipc_weights == 1))
if (is.null(full_y)) {
mn_y <- mean(y, na.rm = na.rm)
} else {
mn_y <- mean(full_y, na.rm = na.rm)
}
# if full_type is "r_squared" or "deviance", post-hoc computing from "mse"
# or "cross_entropy"
if (full_type == "r_squared") {
var <- measure_mse(
fitted_values = rep(mn_y, length(y)), y,
C = switch(do_ipcw + 1, rep(1, length(C)), C), Z = Z,
ipc_weights = ipc_weights,
ipc_fit_type = switch(do_ipcw + 1, ipc_fit_type, "SL"), ipc_eif_preds,
ipc_est_type = ipc_est_type, scale = "identity", na.rm = na.rm, ...
)
ic <- (-1) * as.vector(
matrix(c(1 / var$point_est,
-point_est / (var$point_est ^ 2)),
nrow = 1) %*% t(cbind(ic, var$eif))
)
tmp_ics <- vector("list", length = V)
for (v in 1:V) {
tmp_ics[[v]] <- (-1) * as.vector(
matrix(c(1 / var$point_est,
-point_ests[v] / (var$point_est ^ 2)),
nrow = 1) %*% t(cbind(ics[[v]],
var$eif[folds_Z == v]))
)
}
point_ests <- 1 - point_ests/var$point_est
point_est <- mean(point_ests)
ics <- tmp_ics
}
if (full_type == "deviance") {
denom <- measure_cross_entropy(
fitted_values = rep(mn_y, length(y)), y,
C = switch(do_ipcw + 1, rep(1, length(C)), C), Z = Z,
ipc_weights = ipc_weights,
ipc_fit_type = switch(do_ipcw + 1, ipc_fit_type, "SL"),
ipc_eif_preds, ipc_est_type = ipc_est_type,
scale = "identity", na.rm = na.rm, ...
)
ic <- (-1) * as.vector(
matrix(c(1 / denom$point_est,
-point_est / (denom$point_est ^ 2)),
nrow = 1) %*% t(cbind(ic, denom$eif))
)
tmp_ics <- vector("list", length = V)
for (v in 1:V) {
tmp_ics[[v]] <- (-1) * as.vector(
matrix(c(1 / denom$point_est,
-point_ests[v] / (denom$point_est ^ 2)),
nrow = 1) %*% t(cbind(ics[[v]],
denom$eif[folds_Z == v]))
)
}
point_ests <- 1 - point_ests / denom$point_est
point_est <- mean(point_ests)
ics <- tmp_ics
}
# return it
return(list(point_est = point_est, all_ests = point_ests, eif = ic,
all_eifs = ics, ipc_eif_preds = ipc_eif_preds))
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/est_predictiveness_cv.R
|
#' Estimate a Predictiveness Measure
#'
#' Generic function for estimating a predictiveness measure (e.g., R-squared or classification accuracy).
#'
#' @param x An R object. Currently, there are methods for \code{predictiveness_measure} objects only.
#' @param ... further arguments passed to or from other methods.
#'
#' @export
estimate <- function(x, ...) {
UseMethod("estimate")
}
|
/scratch/gouwar.j/cran-all/cranData/vimp/R/estimate.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.