content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
accepted.amplicons <-
function(x){
#### internal function #######
get.accepted.amplicons <-
function(x){
accep <- which(x$rejected == FALSE)
return(as.integer(row.names(x[accep,])))
}
######################
if(class(x) == "list"){
acamp <- lapply(X = x, FUN = get.accepted.amplicons)
names(acamp) <- names(x)
}else{
acamp <- get.accepted.amplicons(x)
}
return(acamp)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/accepted.amplicons.R |
ampliconduo <-
function(A , B = NULL, sample.names = NULL, correction = "fdr", ...){
###internal functions####
make.duo <-
function(A, B, sample.name, correction, ...){
cat(".")
in.both <- which(A!=0 | B!=0) # amplicons with both 0 are removed
duo <- data.frame(A, B)
duo <- duo[in.both,] ## keep row names from original data, get with row.names(duo)
A.all <- sum(duo[,1])
B.all <- sum(duo[,2])
tst <-
t(
apply(X=duo[, 1:2],
FUN =
function(x) test.AiBi(x, A.all, B.all, ...),
MARGIN = 1)
)
q <- p.adjust(tst[,1], correction)
duo <- cbind(
duo,
tst,
q
)
duo[[8]] <- rep(FALSE, times = length(A))
duo[[9]] <- (rep(sample.name, times = length(A)))
names(duo) <- c("freqA", "freqB", "p", "OR", "CI.low", "CI.up", "q", "rejected", "sample")
return(duo)
}
test.AiBi <-
function(x, n.A, n.B, conf.level = 0.95, or = 1, alternative = "two.sided")
{
m <-
matrix(
c(x[1],
x[2],
n.A-x[1],
n.B-x[2]),
nrow=2
)
t <- fisher.test(m, conf.level = conf.level, or = or, alternative = alternative)
#gives back p-value, odds-ratio, conf.interval:
c(t$p.value,
t$estimate,
t$conf.int)
}
#############################
data <- list()
if((class(A) == "data.frame" & class(B) == "data.frame") || (class(A)=="list" & class(B)=="list") ){
if(length(A)!= length(B)){ stop("Dimensions of A and B are different.") }
if(class(A)=="list"){
A <- as.data.frame(A)
B <- as.data.frame(B)
}
if(length(sample.names)!= length(A)){
if(!is.null(sample.names)){
warning("Count of names does not correspond to sample count. Numbering will be used instead.")
}
sample.names <- c(1:length(A))
}else{
if(class(sample.names) == "list"){
sample.names = unlist(sample.names)
}
}
for (i in 1:length(A)){
duo <- make.duo(A[i], B[i], sample.names[i], correction, ...)
data[[i]] <- duo
names(data)[i]<- sample.names[i]
}
}
else{## data are all in A
if((class(A)=="list" | class(A)=="data.frame")){
if(as.integer(length(A))%%2 != 0){stop("Odd number of columns in A.")}
A <- as.data.frame(A)
sample.number = length(A) / 2
if(length(sample.names)!= sample.number){
if(!is.null(sample.names)){
warning("Count of names does not correspond to sample count. Numbering will be used instead.")
}
sample.names <- c(1:length(A))
}
for (i in 1:sample.number){
duo <- make.duo(A[(i*2-1)], A[(i*2)], sample.names[i], correction, ...)
data[[i]] <- duo
names(data)[i]<- sample.names[i]
}
}
else(stop("wrong data format."))
}
return(data)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/ampliconduo.R |
discordance.delta <-
function(x, names = NULL, theta = 0.05, corrected = TRUE,
printToTex = FALSE, directory = NULL, file.name = NULL){
## internal functions ##
delta.prime <-
function(
q,
theta
)
{
sum(q<theta) / length(q)
}
delta <-
function(
rA,
rB,
q,
theta
)
{
sum((rA+rB)*(q<theta)) / (sum(rA) + sum(rB))
}
##############
if(is.null(names)){
samples <- names(x)
}
n.s <- length(x)
out.df <- data.frame(samples = samples,
Delta = 1:n.s,
Delta.prime = 1:n.s)
theta = theta
if (corrected == T){
ind <- 7
}else{
ind <- 3
}
for (i in 1:n.s) {
in.df <- as.data.frame(x[[i]])
out.df$Delta[out.df$samples == samples[i]] <-
delta(
in.df$freqA,
in.df$freqB,
in.df[ind],
theta
)
out.df$Delta.prime[out.df$samples == samples[[i]]] <-
delta.prime(
in.df[[ind]],
theta
)
}
if(printToTex ==T){
if(is.null(file.name)){
file.name = paste("discordanceDelta", ".txt", sep = "" )
}
if(!is.null(directory)){
file.name = paste(directory, "/", file.name, sep = "")
}
table = xtable(out.df, label="tab:Delta", digits = c(3), caption = "$\\Delta$ and $\\Delta'$.")
print(table, file = file.name)
}
return(out.df)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/discordance.delta.R |
filter.ampliconduo <-
function(x, min.freq = 1, OR = NULL, q = NULL, p = NULL, remove = FALSE){
data <- x
# x is a data.frame with the ampliconduo data
filter <- which(data$freqA < min.freq | data$freqB < min.freq)
data$rejected[filter]<- TRUE
if(!is.null(OR)){
filter <- which(data$OR < OR)
data$rejected[filter]<- TRUE
}
if(!is.null(q)){
filter <- which(data$q < q)
data$rejected[filter]<- TRUE
}
if(!is.null(q)){
filter <- which(data$p < p)
data$rejected[filter]<- TRUE
}
if(remove){
data <- data[data$rejected == FALSE, ]
}
return(data)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/filter.ampliconduo.R |
filter.ampliconduo.set <-
function(x, min.freq = 1, OR = NULL, q = NULL, p = NULL, remove = FALSE){
data.f <- lapply(X = x, FUN = filter.ampliconduo, min.freq, OR, q, p , remove = remove)
return(data.f)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/filter.ampliconduo.set.R |
plotAmpliconduo <-
function(x , color.treshold = 0.05,
xlab = "Abundance (PCR A)",
ylab = "Abundance (PCR B)",
main = NULL, log = "xy",
corrected = TRUE, asp = 1,
legend.position = NULL,
save = FALSE, path = NULL,
file.name = NULL, format = "jpeg",
h.start = 0, ...){
if(is.null(main)){
main = x[1,9]
}
freqA <- freqB <- q <- p <- NULL ## just for CMD check not to complain
if(corrected == T){
legend.title = paste("q > ", color.treshold, sep = "")
ampliplot <- qplot(
freqA,
freqB,
xlab = xlab,
ylab = xlab,
data=x,
log=log,
asp = asp,
colour = q > color.treshold,
main = main, ...
) + scale_colour_discrete(name = legend.title, h.start = h.start)
}else{
legend.title = paste("p > ", color.treshold, sep = "")
ampliplot <- qplot(
freqA,
freqB,
xlab = xlab,
ylab = xlab,
data=x,
log=log,
asp = asp,
colour = p > color.treshold,
main = main, ...
) + scale_colour_discrete(name = legend.title, h.start = h.start)
}
if(save == T){
if(is.null(file.name)){
file.name = paste(main, "_", Sys.Date(), ".", format , sep ="")
}else{
file.name = paste(file.name, ".", format, sep = "")
}
ggsave(filename = file.name, path = path, ... )
}
print(ampliplot)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/plotAmpliconduo.R |
plotAmpliconduo.set <-
function(x , color.treshold = 0.05,
xlab = "Abundance (PCR A)",
ylab = "Abundance (PCR B)",
log = "xy", corrected = TRUE,
asp = 1, nrow = 1, legend.position = NULL ,
save = FALSE, path = NULL, file.name = NULL,
format = "jpeg", h.start = 0, ...){
if(length(x)> 1){
dat = x[[1]]
for(i in 2:length(x)){
dat = rbind(dat,x[[i]])
}
}else{
dat =x[[1]]
}
freqA <- freqB <- sample <- q <- p <- NULL ## just for CMD check not to complain
## determines legend position
if(is.null(legend.position)){
s.z <- length(x)
rest <- s.z %% nrow
mod <- s.z %/% nrow
if (rest == 0 || rest > s.z || (mod == 1 && rest > 0)){
if(mod > nrow){
legend.position <- "top"
}else{
legend.position <- "right"
}
}else{
legend.position <- c(1, 0)
}
}
if(corrected == TRUE){
legend.title = paste("q > ", color.treshold, sep = "")
ampliplot <- qplot(
freqA,
freqB,
xlab = xlab,
ylab = ylab,
data = dat,
log = log,
colour= q > color.treshold,
asp = asp,
...
) +
facet_wrap(~ sample, nrow = nrow) +
theme(legend.position = legend.position, legend.justification = legend.position) +
scale_colour_discrete(name = legend.title, h.start = h.start)
}else{
legend.title = paste("p > ", color.treshold, sep = "")
ampliplot <- qplot(
freqA,
freqB,
xlab = xlab,
ylab = ylab,
data = dat,
log = log,
colour= p > color.treshold,
asp = asp,
...
) +
facet_wrap(facets = ~ sample, nrow = nrow) +
theme(legend.position = legend.position) + scale_colour_discrete(name = legend.title)
}
if(save == T){
if(is.null(file.name)){
file.name = paste("ampliconduo_", Sys.Date(), ".", format , sep ="")
}else{
file.name = paste(file.name, ".", format, sep = "")
}
ggsave(filename = file.name, path= path, ... )
}
print(ampliplot)
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/plotAmpliconduo.set.R |
plotORdensity <-
function(x, log = "x", ncol = 2, adjust.zeroinf = TRUE, zero.pos = 0.005,
inf.pos = 200, binwidth = 0.15, color = "black", xlab = "odds ratio",
save = FALSE, path = NULL, file.name = NULL,
format = "jpeg",...){
##### internal functions ############
plot.ORdensity.ampliconduo <-
function(x, log, ncol, adjust.zeroinf, zero.pos,
inf.pos, binwidth, color, xlab,
save, path, file.name, format, main = NULL, ...){
if(is.null(main)){
main = x[1,9]
}
OR <- ..density.. <- NULL ## just for CMD check not to complain
if(adjust.zeroinf == T){
d2 <- x
d2[d2[4] == 0,][4] <- zero.pos ### d2[4] is same as d2$OR
d2[d2[4] == Inf,][4] <- inf.pos
plotOR <-qplot(
OR,
log = log,
data = d2,
..density..,
geom = "histogram",
main = main,
fill = I(color),
binwidth = binwidth,
...
) +
xlab(xlab)
}else{
plotOR <- qplot(
OR,
log = log,
data = x,
..density..,
geom = "histogram",
main = main,
fill = I(color),
binwidth = binwidth,
...
) +
xlab(xlab)
}
if(save == T){
if(is.null(file.name)){
file.name = paste("ORdensity", ".", format , sep ="")
}else{
file.name = paste(file.name, ".", format, sep = "")
}
ggsave(filename = file.name, path = path, ... )
}
print(plotOR)
}
plot.ORdensity.ampliconduo.set <-
function(x, log, ncol, adjust.zeroinf, zero.pos,
inf.pos, binwidth, color, xlab,
save, path, file.name, format, ...){
OR <- NULL ## just to make CMD check not complain
..density.. <- NULL
if(length(x)> 1){
dat = x[[1]]
for(i in 2:length(x)){
dat = rbind(dat, x[[i]])
}
}else{
dat = x[[1]]
}
if(adjust.zeroinf == T){
d2 <- dat
d2[d2[4] == 0,][4] <- zero.pos ## d2[4] is same as d2$OR
d2[d2[4] == Inf,][4] <- inf.pos
plotOR <- qplot(
OR,
log = log,
data = d2,
..density..,
geom = "histogram",
binwidth = binwidth,
fill = I(color),
...
) +
xlab(xlab) +
facet_wrap(~sample, ncol = ncol)
}else{
plotOR <- qplot(
OR,
log = log,
data = dat,
..density..,
geom = "histogram",
binwidth = binwidth,
fill = I(color),
...
) +
xlab(xlab) +
facet_wrap(~sample, ncol = ncol)
}
if(save == T){
if(is.null(file.name)){
file.name = paste("ORdensity", ".", format, sep ="")
}else{
file.name = paste(file.name, ".", format, sep = "")
}
ggsave(filename = file.name, path = path, ... )
}
print(plotOR)
}
##################################
ncol=as.integer(ncol)
xclass <- class(x)
if(xclass[1] == "list"){
plot.ORdensity.ampliconduo.set(x, log, ncol, adjust.zeroinf, zero.pos,
inf.pos, binwidth, color, xlab,
save, path, file.name, format, ...)
}else{
if(xclass[1] == "data.frame"){
plot.ORdensity.ampliconduo(x, log, ncol, adjust.zeroinf, zero.pos,
inf.pos, binwidth, color, xlab,
save, path, file.name, format, ...)
}else{
stop("wrong data format of x!")
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AmpliconDuo/R/plotORdensity.R |
#' Prediction of amyloids
#'
#' Amyloids are proteins associated with the number of clinical disorders (e.g.,
#' Alzheimer's, Creutzfeldt-Jakob's and Huntington's diseases). Despite their
#' diversity, all amyloid proteins can undergo aggregation initiated by 6- to
#' 15-residue segments called hot spots. Henceforth, amyloids form unique,
#' zipper-like beta-structures, which are often harmful. To find the patterns
#' defining the hot spots, we developed our novel predictor of amyloidogenicity
#' AmyloGram, based on random forests.
#'
#' AmyloGram is available as R function (\code{\link{predict.ag_model}}) or
#' shiny GUI (\code{\link{AmyloGram_gui}}).
#'
#' The package is enriched with the benchmark data set \code{\link{pep424}}.
#'
#' @name AmyloGram-package
#' @aliases AmyloGram-package AmyloGram
#' @docType package
#' @author
#' Maintainer: Michal Burdukiewicz <michalburdukiewicz@@gmail.com>
#' @references Burdukiewicz MJ, Sobczyk P, Roediger S, Duda-Madej A,
#' Mackiewicz P, Kotulska M. (2017) \emph{Amyloidogenic motifs revealed
#' by n-gram analysis}. Scientific Reports 7
#' \url{https://doi.org/10.1038/s41598-017-13210-9}
#' @keywords package
#' @importFrom biogram count_multigrams decode_ngrams degenerate seq2ngrams
#' test_features
#' @importFrom ranger ranger
#' @importFrom seqinr a read.fasta
#' @importFrom shiny runApp
#' @importFrom stats predict
#' @importFrom utils capture.output
NULL
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/AmyloGram-package.R |
#' AmyloGram Graphical User Interface
#'
#' Launches graphical user interface that predicts presence of amyloids.
#'
#' @section Warning : Any ad-blocking software may cause malfunctions.
#' @export AmyloGram_gui
AmyloGram_gui <- function()
runApp(system.file("AmyloGram", package = "AmyloGram"))
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/AmyloGram_gui.R |
#' @name AmyloGram_model
#' @title Random forest model of amyloid proteins
#' @description Random forest grown using the \code{ranger} package with additional
#' information.
#' @docType data
#' @seealso \code{\link[ranger]{ranger}}
#' @format A list of length three: random forest, a vector of important n-grams
#' and the best-performing encoding.
#' @keywords datasets
NULL
#' @name spec_sens
#' @title Specificity/sensitivity balance
#' @description Sensitivity, specificity and Matthew's Correlation Coefficient
#' of AmyloGram for different cutoffs computed on \code{pep424} dataset.
#' @docType data
#' @usage spec_sens
#' @source Walsh, I., Seno, F., Tosatto, S.C.E., and Trovato, A. (2014).
#' \emph{PASTA 2.0: an improved server for protein aggregation prediction}.
#' Nucleic Acids Research gku399.
#' @format a data frame with four columns and 99 rows.
#' @keywords datasets
NULL
#' @name pep424
#' @title pep424 data set
#' @description Benchmark dataset for PASTA 2.0. 5 sequences shorter than 6 amino acids
#' (1\% of the original dataset) were removed.
#' @docType data
#' @usage pep424
#' @source Walsh, I., Seno, F., Tosatto, S.C.E., and Trovato, A. (2014).
#' \emph{PASTA 2.0: an improved server for protein aggregation prediction}.
#' Nucleic Acids Research gku399.
#' @format a list of 424 peptides (class \code{\link[seqinr]{SeqFastaAA}}).
#' @keywords datasets
NULL
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/datasets.R |
#' Protein test
#'
#' Checks if an object is a protein (contains letters from one-letter amino acid code).
#'
#' @param object \code{character} vector where each elemenents represent one amino acid.
#' @return \code{TRUE} or \code{FALSE}.
#' @export
is_protein <- function(object) {
#only amino acids
all(toupper(object) %in% c(a()[-1], "X", "J", "Z", "B", "U"))
}
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/is_protein.R |
#' Predict amyloids
#'
#' Recognizes amyloids using AmyloGram algorithm.
#' @param object \code{ag_model} object.
#' @param newdata \code{list} of sequences (for example as given by
#' \code{\link[seqinr]{read.fasta}}).
#' @param ... further arguments passed to or from other methods.
#' @export
#' @examples
#' data(AmyloGram_model)
#' data(pep424)
#' predict(AmyloGram_model, pep424[17])
predict.ag_model <- function(object, newdata, ...) {
if(any(!sapply(newdata, is_protein)))
stop("Atypical aminoacid detected in input data.")
seqs_m <- tolower(t(sapply(newdata, function(i)
c(i, rep(NA, max(lengths(newdata)) - length(i))))))
gl <- do.call(rbind, lapply(1L:nrow(seqs_m), function(i) {
res <- do.call(rbind, strsplit(decode_ngrams(seq2ngrams(seqs_m[i, ][!is.na(seqs_m[i, ])], 6, a()[-1])), ""))
cbind(res, id = paste0("P", rep(i, nrow(res))))
}))
bitrigrams <- as.matrix(count_multigrams(ns = c(1, rep(2, 4), rep(3, 3)),
ds = list(0, 0, 1, 2, 3, c(0, 0), c(0, 1), c(1, 0)),
seq = degenerate(gl[, -7], object[["enc"]]),
u = as.character(1L:length(object[["enc"]]))))
test_ngrams <- bitrigrams > 0
storage.mode(test_ngrams) <- "integer"
test_lengths <- lengths(newdata) - 5
raw_preds <- predict(object[["rf"]],
data.frame(test_ngrams[, object[["imp_features"]],
drop = FALSE]))[["predictions"]]
preds <- data.frame(prob = if(is.matrix(raw_preds)) {
raw_preds[, 2]
} else {
raw_preds[2]
},
prot = unlist(lapply(1L:length(test_lengths), function(i) rep(i, test_lengths[i])))
)
data.frame(Name = names(newdata),
Probability = vapply(unique(preds[["prot"]]), function(single_prot)
max(preds[preds[["prot"]] == single_prot, "prob"]),
0)
)
}
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/predict.R |
#' Print AmyloGram object
#'
#' Prints \code{ag_model} objects.
#' @param x \code{ag_model} object.
#' @param ... further arguments passed to or from other methods.
#' @export
#' @examples
#' data(AmyloGram_model)
#' print(AmyloGram_model)
print.ag_model <- function(x, ...) {
rf_dat <- capture.output(print(x[["rf"]]))[7L:13]
ngram_dat <- paste0("Number of informative n-grams: ", length(x[["imp_features"]]))
enc_dat <- data.frame(ID = 1L:length(x[["enc"]]),
Aminoacids = sapply(x[["enc"]], function(i) paste0(toupper(i), collapse = ","))
)
cat("AmyloGram prediction model of amyloids",
rf_dat,
ngram_dat,
"\nReduced amino acid alphabet:",
sep = "\n"
)
print(enc_dat)
}
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/print.R |
#' Read sequences from .txt file
#'
#' Read sequence data saved in text file.
#'
#' @param connection a \code{\link{connection}} to the text (.txt) file.
#' @keywords manip
#' @return a list of sequences. Each element has class \code{\link[seqinr]{SeqFastaAA}}. If
#' connection contains no characters, function prompts warning and returns \code{NULL}.
#' @details The input file should contain one or more amino acid sequences separated by
#' empty line(s).
#' @export
#' @keywords manip
read_txt <- function(connection) {
content <- readLines(connection)
#test for empty content
if(content[1] != "" || length(content) > 1) {
if (sum(grepl(">", content, fixed = TRUE)) == 0) {
if (content[1] != "")
content <- c("", content)
#number of empty lines
nel <- 0
#content without too many empty lines
content2 <- c()
for (i in 1L:length(content)) {
if(content[i] == "") {
nel <- nel + 1
} else {
nel <- 0
}
if (nel <= 1)
content2 <- c(content2, content[i])
}
content <- content2
content_end <- length(content)
while(content[content_end] == "i")
content_end <- content_end - 1
prot_names <- sapply(1L:sum(content == ""), function(i)
paste0(">sequence", i))
content[content == ""] <- prot_names
}
read.fasta(textConnection(content), seqtype = "AA", as.string = FALSE)
} else {
warning("No text detected.")
NULL
}
} | /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/read_txt.R |
make_decision <- function(x, cutoff)
data.frame(x, Amyloid = factor(ifelse(x[["Probability"]] > cutoff, "yes", "no")))
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/R/utils.R |
library(shiny)
library(AmyloGram)
data(AmyloGram_model)
data(spec_sens)
options(shiny.maxRequestSize=10*1024^2)
options(DT.options = list(dom = "Brtip",
buttons = c("copy", "csv", "excel", "print")
))
my_DT <- function(x)
datatable(x, escape = FALSE, extensions = 'Buttons',
filter = "none", rownames = FALSE)
shinyServer(function(input, output) {
prediction <- reactive({
if (!is.null(input[["seq_file"]]))
input_sequences <- read_txt(input[["seq_file"]][["datapath"]])
input[["use_area"]]
isolate({
if (!is.null(input[["text_area"]]))
if(input[["text_area"]] != "")
input_sequences <- read_txt(textConnection(input[["text_area"]]))
})
if(exists("input_sequences")) {
if(length(input_sequences) > 50) {
#dummy error, just to stop further processing
stop("Too many sequences.")
} else {
if(any(lengths(input_sequences) < 6)) {
#dummy error, just to stop further processing
stop("The minimum length of the sequence is 6 amino acids.")
} else {
predict(AmyloGram_model, input_sequences)
}
}
} else {
NULL
}
})
decision <- reactive({
if(!is.null(prediction())) {
res <- AmyloGram:::make_decision(prediction(), input[["cutoff"]])
colnames(res) <- c("Input name", "Amyloid probability", "Is amyloid?")
res
}
})
output$dynamic_ui <- renderUI({
if (!is.null(input[["seq_file"]]))
input_sequences <- read_txt(input[["seq_file"]][["datapath"]])
input[["use_area"]]
isolate({
if (!is.null(input[["text_area"]]))
if(input[["text_area"]] != "")
input_sequences <- read_txt(textConnection(input[["text_area"]]))
})
if(exists("input_sequences")) {
tags$p(HTML("<h3><A HREF=\"javascript:history.go(0)\">Start a new query</A></h3>"))
}
})
output$pred_table <- renderTable({
#formatRound(my_DT(decision()), 2, 4)
decision()
})
output$sensitivity <- renderUI({
dat <- spec_sens[spec_sens[["Cutoff"]] == input[["cutoff"]], ]
HTML(paste0("Sensitivity: ", round(dat[["Sensitivity"]], 4), "<br>",
"Specificity: ", round(dat[["Specificity"]], 4), "<br>",
"MCC: ", round(dat[["MCC"]], 4)
))
})
output$downloadData <- downloadHandler(
filename = function() { "AmyloGram_results.csv" },
content = function(file) {
write.csv(decision(), file)
}
)
output$dynamic_tabset <- renderUI({
if(is.null(prediction())) {
tabPanel(title = "Sequence input",
tags$textarea(id = "text_area", style = "width:90%",
placeholder="Paste sequences (FASTA format required) here...", rows = 22, cols = 60, ""),
p(""),
actionButton("use_area", "Submit data from field above"),
p(""),
fileInput('seq_file', 'Submit .fasta or .txt file:'))
} else {
tabsetPanel(
tabPanel("Results",
tableOutput("pred_table"),
downloadButton('downloadData', 'Download results (.csv)'),
h4("Cut-off adjustment"),
HTML("Adjust a cut-off (a probability threshold) to obtain required specificity and sensitivity. <br>
The cut-off value affects decisions made by AmyloGram ('Is amyloid?' field in the table)."),
br(),
br(),
fluidRow(
column(3, numericInput("cutoff", value = 0.5,
label = "Cutoff", min = 0.01, max = 0.95, step = 0.01)),
column(3, htmlOutput("sensitivity"))
)
),
tabPanel("Help (explained output format)",
includeMarkdown("output_format.md")
)
)
}
})
})
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/inst/AmyloGram/server.R |
library(shiny)
shinyUI(fluidPage(tags$head(includeScript("ga.js")),
title = "AmyloGram",
includeCSS = "www/shiny_paper.css",
headerPanel(""),
sidebarLayout(
sidebarPanel(style = "background-color: #e0e0e0;",
includeMarkdown("readme.md"),
pre(includeText("prots.txt")),
uiOutput("dynamic_ui")
),
mainPanel(
uiOutput("dynamic_tabset")
)
)))
| /scratch/gouwar.j/cran-all/cranData/AmyloGram/inst/AmyloGram/ui.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @name setRestartFileSettings
#' @title setRestartFileSettings
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Set restart file output name and frequency prior to running MCMC
#' @param filename name of restart file
#' @param interval number of samples (ie. iterations * thinning) between writing new restart file
#' @param multiple if true, will output a new restart file at each interval (file name will include sample it was written at)
NULL
#' @name setStepsToAdapt
#' @title setStepsToAdapt
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Set number of iterations (total iterations = samples * thinning) to allow proposal widths to adapt
#' @param steps a postive value
NULL
#' @name getStepsToAdapt
#' @title getStepsToAdapt
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Return number of iterations (total iterations = samples * thinning) to allow proposal widths to adapt
#' @return number of sample steps to adapt
NULL
#' @name getLogPosteriorTrace
#' @title getLogPosteriorTrace
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Returns the logPosterior trace
#' @return vector representing logPosterior trace
NULL
#' @name getLogLikelihoodTrace
#' @title getLogLikelihoodTrace
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Returns the logLikelihood trace
#' @return vector representing logLikelihood trace
NULL
#' @name getLogPosteriorMean
#' @title getLogPosteriorMean
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Calculate the mean log posterior probability over the last n samples
#' @param samples postive value less than total length of the MCMC trace
#' @return mean logPosterior
NULL
#' @name getSamples
#' @title getSamples
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Return number of samples set for MCMCAlgorithm object
#' @return number of samples used during MCMC
NULL
#' @name getThinning
#' @title getThinning
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Return thinning value, which is the number of iterations (total iterations = samples * thinning) not being kept
#' @return thinning value used during MCMC
NULL
#' @name getAdaptiveWidth
#' @title getAdaptiveWidth
#' @description Return sample adaptiveWidth value, which is the number of samples (not iterations) between adapting parameter proposal widths
#' @return number of sample steps between adapting proposal widths
NULL
#' @name setSamples
#' @title setSamples
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Set number of samples set for MCMCAlgorithm object
#' @param _samples postive value
NULL
#' @name setThinning
#' @title setThinning
#' @description Set thinning value, which is the number of iterations (total iterations = samples * thinning) not being kept
#' @param _thinning postive value
NULL
#' @name setAdaptiveWidth
#' @title setAdaptiveWidth
#' @description Method of MCMC class (access via mcmc$<function name>, where mcmc is an object initialized by initializeMCMCObject). Set sample adaptiveWidth value, which is the number of samples (not iterations) between adapting parameter proposal widths
#' @param _adaptiveWidth postive value
NULL
#' @name simulateGenome
#' @title simulateGenome
#' @description Method of Model class (access via model$<function name>, where model is an object initialized by initializeModelObject). Will simulate a version of the given genome using the current set of parameters stored in the Parameter object. This can be written to a FASTA file using genome$writeFasta(<filename>,simulated = TRUE).
#' @param genome a Genome object initialized by initializeGenomeObject
NULL
#' @name readPhiValue
#' @title readPhiValue
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Read synthesis rate values from file. File should be two column file <gene_id,phi> and is expected to have a header row
#' @param filename name of file to be read
NULL
#' @name setGroupList
#' @title setGroupList
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Set amino acids (ROC, FONSE) or codons (PA, PANSE) for which parameters will be estimated. Note that non-default groupLists are still in beta testing and should be used with caution.
#' @param List of strings epresenting groups for parameters to be estimated. Should be one letter amino acid (ROC, FONSE) or list of sense codons (PA, PANSE).
NULL
#' @name getGroupList
#' @title getGroupList
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get amino acids (ROC, FONSE) or codons (PA, PANSE) for which parameters will be estimated
#' @return returns list of amino acids or codons
NULL
#' @name getTraceObject
#' @title getTraceObject
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get Trace object stored by a Parameter object. Useful for plotting certain parameter traces.
#' @return Trace object
NULL
#' @name initializeSynthesisRateByGenome
#' @title initializeSynthesisRateByGenome
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Initialize synthesis rates using SCUO values calcuated from the genome
#' @param genome a Genome object
NULL
#' @name initializeSynthesisRateByRandom
#' @title initializeSynthesisRateByRandom
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Initialize synthesis rates by drawing a from a lognormal distribution with mean = -(sd_phi)^2/2 and sd = sd_phi
#' @param sd_phi a positive value which will be the standard deviation of the lognormal distribution
NULL
#' @name initializeSynthesisRateByList
#' @title initializeSynthesisRateByList
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Initialize synthesis rates with values passed in as a list
#' @param expression a list of values to use as initial synthesis rate values. Should be same size as number of genes in genome.
NULL
#' @name getSynthesisRate
#' @title getSynthesisRate
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get current synthesis rates for all genes and all mixtures
#' @return 2 by 2 vector of numeric values
NULL
#' @name getSynthesisRatePosteriorMeanForGene
#' @title getSynthesisRatePosteriorMeanForGene
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get posterior mean synthesis rate value for a gene
#' @param samples number of samples over which to calculate mean
#' @param geneIndex corresponding index of gene in genome for which posterior mean synthesis rate will be calculated. Should be a number between 1 and length(genome)
#' @param log_scale Calculate posterior mean on log scale
#' @return posterior mean synthesis rate for gene
NULL
#' @name getSynthesisRatePosteriorVarianceForGene
#' @title getSynthesisRatePosteriorVarianceForGene
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get synthesis rate variance for a gene
#' @param samples number of samples over which to calculate variance
#' @param geneIndex corresponding index of gene in genome for which synthesis rate variance will be calculated. Should be a number between 1 and length(genome)
#' @param unbiased Should calculate variance using unbiased (N-1) or biased (N) correction
#' @param log_scale Calculate variance on log scale
#' @return posterior mean synthesis rate for gene
NULL
#' @name getEstimatedMixtureAssignmentForGene
#' @title getEstimatedMixtureAssignmentForGene
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get estimated mixture assignment for gene
#' @param samples number of samples over which to calculate mixture assignment
#' @param geneIndex corresponding index of gene in genome. Should be a number between 1 and length(genome).
#' @return returns value between 1 and n, where n is number of mixtures
NULL
#' @name getEstimatedMixtureAssignmentProbabilitiesForGene
#' @title getEstimatedMixtureAssignmentProbabilitiesForGene
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Get estimated mixture assignment probabilities for gene
#' @param samples number of samples over which to calculate mixture assignment probabilities
#' @param geneIndex corresponding index of gene in genome. Should be a number between 1 and length(genome).
#' @return returns vector of probabilities representing mixture probabilities for gene
NULL
#' @name getStdDevSynthesisRatePosteriorMean
#' @title getStdDevSynthesisRatePosteriorMean
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate posterior mean of standard deviation parameter of lognormal describing distribution of synthesis rates
#' @param samples number of samples over which to calculate posterior mean
#' @param mixture mixture index to use. Should be number between 0 and n-1, where n is number of mixtures
#' @return returns posterior mean for standard deviation of lognormal distribution of synthesis rates
NULL
#' @name getStdDevSynthesisRateVariance
#' @title getStdDevSynthesisRateVariance
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate variance of standard deviation parameter of lognormal describing distribution of synthesis rates
#' @param samples number of samples over which to calculate variance
#' @param mixture mixture index to use. Should be number between 0 and n-1, where n is number of mixtures
#' @param unbiased If TRUE, should calculate variance using unbiased (N-1). Otherwise, used biased (N) correction
#' @return returns variance for standard deviation of lognormal distribution of synthesis rates
NULL
#' @name getCodonSpecificPosteriorMeanForCodon
#' @title getCodonSpecificPosteriorMeanForCodon
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate codon-specific parameter (CSP) posterior mean
#' @param mixtureElement mixture to calculate CSP posterior mean. Should be between 1 and n, where n is number of mixtures.
#' @param samples number of samples to use for calculating posterior mean
#' @param codon codon to calculate CSP
#' @param paramType CSP to calculate posterior mean for. 0: Mutation (ROC,FONSE) or Alpha (PA, PANSE). 1: Selection (ROC,FONSE), Lambda (PANSE), Lambda^prime (PA). 2: NSERate (PANSE)
#' @param withoutReference If model uses reference codon, then ignore this codon (fixed at 0). Should be TRUE for ROC and FONSE. Should be FALSE for PA and PANSE.
#' @param log_scale If true, calculate posterior mean on log scale. Should only be used for PA and PANSE.
#' @return posterior mean value for CSP
NULL
#' @name getCodonSpecificPosteriorVarianceForCodon
#' @title getCodonSpecificPosteriorVarianceForCodon
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate codon-specific parameter (CSP) variance
#' @param mixtureElement mixture to calculate CSP variance. Should be between 1 and n, where n is number of mixtures.
#' @param samples number of samples to use for calculating variance
#' @param codon codon to calculate CSP
#' @param paramType CSP to calculate variance for. 0: Mutation (ROC,FONSE) or Alpha (PA, PANSE). 1: Selection (ROC,FONSE), Lambda (PANSE), Lambda^prime (PA). 2: NSERate (PANSE)
#' @param unbiased If TRUE, should calculate variance using unbiased (N-1). Otherwise, used biased (N) correction
#' @param withoutReference If model uses reference codon, then ignore this codon (fixed at 0). Should be TRUE for ROC and FONSE. Should be FALSE for PA and PANSE.
#' @param log_scale If true, calculate posterior mean on log scale. Should only be used for PA and PANSE.
#' @return variance over trace for CSP
NULL
#' @name getCodonSpecificQuantilesForCodon
#' @title getCodonSpecificQuantilesForCodon
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate quantiles of CSP traces
#' @param mixtureElement mixture to calculate CSP variance. Should be between 1 and n, where n is number of mixtures.
#' @param samples number of samples to use for calculating variance
#' @param codon codon to calculate CSP
#' @param paramType CSP to calculate variance for. 0: Mutation (ROC,FONSE) or Alpha (PA, PANSE). 1: Selection (ROC,FONSE), Lambda (PANSE), Lambda^prime (PA). 2: NSERate (PANSE)
#' @param probs vector of two doubles between 0 and 1 indicating range over which to calculate quantiles. <0.0275, 0.975> would give 95\% quantiles.
#' @param withoutReference If model uses reference codon, then ignore this codon (fixed at 0). Should be TRUE for ROC and FONSE. Should be FALSE for PA and PANSE.
#' @param log_scale If true, calculate posterior mean on log scale. Should only be used for PA and PANSE.
#' @return vector representing lower and upper bound of quantile
NULL
#' @name getNoiseOffsetPosteriorMean
#' @title getNoiseOffsetPosteriorMean
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate posterior mean of standard deviation parameter of lognormal describing distribution of synthesis rates
#' @param index mixture index to use. Should be number between 0 and n-1, where n is number of mixtures
#' @param samples number of samples over which to calculate posterior mean
#' @return returns posterior mean for standard deviation of lognormal distribution of synthesis rates
NULL
#' @name getNoiseOffsetVariance
#' @title getNoiseOffsetVariance
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Calculate variance of noise offset parameter used when fitting model with empirical estimates of synthesis rates (ie. withPhi fits)
#' @param index mixture index to use. Should be number between 0 and n-1, where n is number of mixtures
#' @param samples number of samples over which to calculate variance
#' @param unbiased If TRUE, should calculate variance using unbiased (N-1). Otherwise, used biased (N) correction
#' @return returns variance for noise offset
NULL
#' @name fixSphi
#' @title fixSphi
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Fix the value of s_phi (standard deviation of lognormal for synthesis rates) at its current value
NULL
#' @name initMutationCategories
#' @title initMutationCategories
#' @description Initialize values for mutation CSP. File should be of comma-separated with header. Three columns should be of order Amino_acid,Codon,Value
#' @param files list of files containing starting values. Number of files should equal the number of categories.
#' @param numCategories number of mutation categories (should be less than or equal to number of mixtures)
#' @param fix Can use this parameter to fix mutation at current values (won't change over course of MCMC run)
NULL
#' @name initSelectionCategories
#' @title initSelectionCategories
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Initialize values for selection CSP. File should be of comma-separated with header. Three columns should be of order Amino_acid,Codon,Value
#' @param files list of files containing starting values. Number of files should equal the number of categories.
#' @param numCategories number of mutation categories (should be less than or equal to number of mixtures)
#' @param fix Can use this parameter to fix selection at current values (won't change over course of MCMC run)
NULL
#' @name fixDM
#' @title fixDM
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Fix the value of mutation its current value
NULL
#' @name fixDEta
#' @title fixDEta
#' @description Method of Parameter class (access via parameter$<function name>, where parameter is an object initialized by initializeParameterObject). Fix the value of selection its current value
NULL
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/RcppExports.R |
# colors for mixture elements, make as long as possible
.mixtureColors <- c("black", "red", "blue", "green4", "magenta", "darkorange", "deeppink3", "gray", "cyan", "brown", "yellow")
# for up to six codons
.codonColors <- list(GCA="blue", GCC="darkorange", GCG="purple", GCT="green4", #Ala
TGC="darkorange", TGT="green4", #Cys
GAC="darkorange", GAT="green4", #Asp
GAA="blue", GAG="purple", #Glu
TTC="darkorange", TTT="green4", #Phe
GGA="blue", GGC="darkorange", GGG="purple", GGT="green4", #Gly
CAC="darkorange", CAT="green4", #His
ATA="blue", ATC="darkorange", ATT="green4", #Ile
AAA="blue", AAG="purple", #Lys
CTA="blue", CTC="darkorange", CTG="purple", CTT="green4", TTA="darkturquoise", TTG="deeppink3", #Leu
ATG="purple",
AAC="darkorange", AAT="green4", #Asn
CCA="blue", CCC="darkorange", CCG="purple", CCT="green4", #Pro
CAA="blue", CAG="purple", #Gln
CGA="blue", CGC="darkorange", CGG="purple", CGT="green4", AGA="darkturquoise", AGG="deeppink3", #Arg
TCA="blue", TCC="darkorange", TCG="purple", TCT="green4", #Ser4
ACA="blue", ACC="darkorange", ACG="purple", ACT="green4", #Thr
GTA="blue", GTC="darkorange", GTG="purple", GTT="green4", #Val
TAC="darkorange", TAT="green4", #Tyr
AGC="darkorange", AGT="green4", #Ser2
TGG="blue",
TAA="blue", TAG="purple", TGA="darkturquoise") #Stop
.ribModelConstants <- list(
#FONSE/ROC "constants"
deltaM = "Mutation", deltaEta = "Selection", deltaOmega = "Selection",
#PA "constants
alpha = "Alpha",lambdaPrime = "LambdaPrime")
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/colorSchemes.R |
#' Genome Initialization
#'
#' \code{initializeGenomeObject} initializes the Rcpp Genome object
#'
#' @param file A file of coding sequences in fasta or RFPData format
#'
#' @param genome A genome object can be passed in to concatenate the input file to it (optional).
#'
#' @param observed.expression.file String containing the location of a file containing
#' empirical expression rates (optional). Default value is NULL.
#'
#' @param fasta Boolean value indicating whether \code{file} argument is a
#' fasta file (TRUE) or an RFPData file (FALSE). Default value is TRUE.
#'
#' @param positional Boolean indicating if the positional information in the RFPData file is necessary. Default value is FALSE
#'
#' @param match.expression.by.id If TRUE, observed expression values will be assigned by matching sequence identifier.
#' If FALSE, observed expression values will be assigned by order. Default value is TRUE.
#'
#' @param append If TRUE, function will read in additional genome data to append to an existing genome.
#' If FALSE, genome data is cleared before reading in data (no preexisting data). Default value is FALSE.
#'
#' @return This function returns the initialized Genome object.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' genes_file <- system.file("extdata", "more_genes.fasta", package = "AnaCoDa")
#' expression_file <- system.file("extdata", "expression.csv", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' ## reading genome and observed expression data
#' genome <- initializeGenomeObject(file = genome_file, observed.expression.file = expression_file)
#'
#' ## add aditional genes to existing genome
#' genome <- initializeGenomeObject(file = genome_file)
#' genome <- initializeGenomeObject(file = genes_file, genome = genome, append = TRUE)
#'
initializeGenomeObject <- function(file, genome=NULL, observed.expression.file=NULL, fasta=TRUE, positional = FALSE,
match.expression.by.id=TRUE, append=FALSE) {
if (is.null(genome)){
genome <- new(Genome)
}
if (fasta == TRUE) {
genome$readFasta(file, append)
} else {
genome$readRFPData(file, append, positional)
}
if(!is.null(observed.expression.file)) {
genome$readObservedPhiValues(observed.expression.file, match.expression.by.id)
}
return(genome)
}
#' Get Codon Counts For all Amino Acids
#'
#'
#' @param genome A genome object from which the counts of each
#' codon can be obtained.
#'
#' @return Returns a data.frame storing the codon counts for each amino acid.
#'
#' @description provides the codon counts for a fiven amino acid across all genes
#'
#' @details The returned matrix containes a row for each gene and a column
#' for each synonymous codon of \code{aa}.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#' counts <- getCodonCounts(genome)
#'
getCodonCounts <- function(genome){
codons <- codons()
ORF <- getNames(genome)
codonCounts <- lapply(codons, function(codon) {
codonCounts <- genome$getCodonCountsPerGene(codon)
})
codonCounts <- do.call("cbind", codonCounts)
colnames(codonCounts) <- codons
rownames(codonCounts) <- ORF
return(as.data.frame(codonCounts,stringsAsFactors = F))
}
#' Get Codon Counts For a specific Amino Acid
#'
#' @param aa One letter code of the amino acid for which the codon counts should be returned
#'
#' @param genome A genome object from which the counts of each
#' codon can be obtained.
#'
#' @return Returns a data.frame storing the codon counts for the specified amino acid.
#'
#' @description provides the codon counts for a fiven amino acid across all genes
#'
#' @details The returned matrix containes a row for each gene and a coloumn
#' for each synonymous codon of \code{aa}.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#' counts <- getCodonCountsForAA("A", genome)
#'
getCodonCountsForAA <- function(aa, genome){
# get codon count for aa
codons <- AAToCodon(aa, F)
codonCounts <- lapply(codons, function(codon){
codonCounts <- genome$getCodonCountsPerGene(codon)
})
codonCounts <- do.call("cbind", codonCounts)
return(codonCounts)
}
#' calculates the synonymous codon usage order (SCUO)
#'
#' \code{calculateSCUO} calulates the SCUO value for each gene in genome. Note that if a codon is absent, this will be treated as NA and will be skipped in final calculation
#'
#' @param genome A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @return returns the SCUO value for each gene in genome
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#' scuo <- calculateSCUO(genome)
#'
calculateSCUO <- function(genome)
{
aas <- aminoAcids()
genes <- genome$getGenes(F)
scuo.values <- data.frame(ORF=getNames(genome), SCUO=rep(NA, length(genome)))
for(i in 1:length(genes))
{
g <- genes[[i]]
total.aa.count <- g$length()/3
scuo.per.aa <- unlist(lapply(X = aas, FUN = function(aa)
{
codon <- AAToCodon(aa = aa, focal = F)
num.codons <- length(codon)
aa.count <- g$getAACount(aa)
if(aa.count == 0) return(0)
codon.count <- unlist(lapply(codon, FUN = function(c){return(g$getCodonCount(c))}))
codon.propotions <- codon.count / aa.count
aa.entropy <- sum(codon.propotions * log(codon.propotions))
max.entropy <- -log(1/num.codons)
norm.entropy.diff <- (max.entropy - aa.entropy) / max.entropy
comp.ratio <- aa.count / total.aa.count
scuo.aa <- comp.ratio * norm.entropy.diff
return(scuo.aa)
}))
scuo.values[i,"SCUO"] <- sum(scuo.per.aa,na.rm = T)
}
return(scuo.values)
}
#' Length of Genome
#'
#' \code{length} gives the length of a genome
#'
#' @param x A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @return returns the number of genes in a genome
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#' length(genome) # 10
#'
length.Rcpp_Genome <- function(x) {
return(x$getGenomeSize(F))
}
#' Summary of Genome
#'
#' \code{summary} summarizes the description of a genome, such as number of genes and average gene length.
#'
#' @param object A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @param ... Optional, additional arguments to be passed to the main summary function
#' that affect the summary produced.
#'
#' @return This function returns by default an object of class c("summaryDefault", table").
summary.Rcpp_Genome <- function(object, ...) {
# TODO output stuff like:
# - no. of genes
# - avg. gene length
# - avg. A,C,G,T content
# - avg. AA composition
# - ...
summary(object, ...)
}
#' Gene Names of Genome
#'
#'
#' @param genome A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @param simulated A logical value denoting if the gene names to be listed are simulated or not.
#' The default value is FALSE.
#'
#' @description returns the identifiers of the genes within the genome specified.
#'
#' @return gene.names Returns the names of the genes as a vector of strings.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' ## return all gene ids for the genome
#' geneIDs <- getNames(genome, FALSE)
#'
getNames <- function(genome, simulated = FALSE)
{
genes <- genome$getGenes(simulated)
gene.names <- unlist(lapply(1:length(genes), function(i){return(genes[[i]]$id)}))
return(gene.names)
}
#' Add gene observed synthesis rates
#'
#' \code{addObservedSynthesisRateSet} returns the observed
#' synthesis rates of the genes within the genome specified.
#'
#' @param genome A genome object initialized with
#' \code{\link{initializeGenomeObject}} to add observed expression data.
#'
#' @param observed.expression.file A string containing
#' the location of a file containing empirical expression rates (optional).
#'
#' @param match.expression.by.id If TRUE (default) observed expression
#' values will be assigned by matching sequence identifier.
#' If FALSE observed expression values will be assigned by order
#'
#' @return Returns the genome after adding the new gene expression values
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' expression_file <- system.file("extdata", "expression.csv", package = "AnaCoDa")
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#'
#' ## add expression values after the genome was initiallized,
#' ## or adding an additional set of expression values
#' genome <- addObservedSynthesisRateSet(genome = genome,
#' observed.expression.file = expression_file)
#'
addObservedSynthesisRateSet <- function(genome, observed.expression.file, match.expression.by.id=TRUE)
{
genome$readObservedPhiValues(observed.expression.file, match.expression.by.id)
return(genome)
}
#' Get gene observed synthesis rates
#'
#' \code{getObservedSynthesisRateSet} returns the observed
#' synthesis rates of the genes within the genome specified.
#'
#' @param genome A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @param simulated A logical value denoting if the synthesis
#' rates to be listed are simulated or not. The default value is FALSE.
#'
#' @return Returns a data.frame with the observed expression values in genome
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' expression_file <- system.file("extdata", "expression.csv", package = "AnaCoDa")
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#'
#' ## return expression values as a data.frame with gene ids in the first column.
#' expressionValues <- getObservedSynthesisRateSet(genome = genome)
#'
getObservedSynthesisRateSet <- function(genome, simulated = FALSE)
{
genes <- genome$getGenes(simulated)
expression <- lapply(1:length(genes), function(i){return(genes[[i]]$getObservedSynthesisRateValues())})
ids <- getNames(genome, simulated)
mat <- do.call(rbind,expression)
return(cbind.data.frame(ids, mat,stringsAsFactors=F))
}
#' Calculate the CAI codon weigths for a reference genome
#'
#'
#' \code{getCAIweights} returns the weights for the Codon Adaptation Index
#' based on a reference genome.
#'
#' @param referenceGenome A genome object initialized with \code{\link{initializeGenomeObject}}.
#' @param default.weight Set default weight for any codon not observed in the reference genome
#' @return Returns a named vector with the CAI weights for each codon
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' referenceGenome <- initializeGenomeObject(file = genome_file)
#'
#' wi <- getCAIweights(referenceGenome)
#'
getCAIweights <- function(referenceGenome,default.weight=0.5)
{
aa.vec <- aminoAcids()
aa.vec <- aa.vec[-length(aa.vec)]
wi.list <- vector(mode = "list", length = length(aa.vec))
names(wi.list) <- aa.vec
codon.names <- NULL
for(aa in aa.vec)
{
codon.names <- c(codon.names, AAToCodon(aa))
## create reference table for each codon and gene
codonCountForAA.ref <- getCodonCountsForAA(aa, genome = referenceGenome)
fi <- colSums(codonCountForAA.ref)
fi.max <- max(fi)
wi.list[[aa]] <- fi / fi.max
}
wi.vec <- unlist(wi.list)
names(wi.vec) <- codon.names
wi.vec[wi.vec == 0.0] <- default.weight
return(wi.vec)
}
### NOT EXPOSED
calcCAI <- function(gene, wi)
{
# sequence string to triplets
seq <- gene$seq
seq <- unlist(strsplit(seq, ""))
seq <- paste(seq[c(T,F,F)], seq[c(F,T,F)], seq[c(F,F,T)], sep="")
codon.length <- length(seq)
CAI <- 0
for(s in seq)
{
## Sharp and Li reccommend not counting Methionine and Tryptophan for CAI. Also skip stop codons
if(is.na(wi[s]) || s == "ATG" || s == "TGG" || s == "TAG" || s=="TAA" || s=="TGA")
{
codon.length <- codon.length - 1
next
}
## Calculate on log-scale to avoid potential numerical issues
CAI <- CAI + log(wi[s])
}
CAI <- exp((1/codon.length)*CAI)
return(CAI)
}
#' Calculate the Codon Adaptation Index
#'
#'
#' \code{getCAI} returns the Codon Adaptation Index for a
#' genome based on a provided reference.
#'
#' @param referenceGenome A genome object initialized with \code{\link{initializeGenomeObject}}.
#' Serves as reference set to calculate the necessary codon weights.
#'
#' @param testGenome A genome object initialized with \code{\link{initializeGenomeObject}}.
#' The genome for which the CAI is supposed to be calculated
#'
#' @param default.weight Default weight to use if codon is missing from referenceGenome
#' @return Returns a named vector with the CAI for each gene
#'
#' @examples
#'
#' genome_file1 <- system.file("extdata", "more_genes.fasta", package = "AnaCoDa")
#' genome_file2 <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' ## reading genome
#' referenceGenome <- initializeGenomeObject(file = genome_file1)
#' testGenome <- initializeGenomeObject(file = genome_file2)
#'
#' cai <- getCAI(referenceGenome, testGenome)
#'
getCAI <- function(referenceGenome, testGenome,default.weight=0.5)
{
genes <- testGenome$getGenes(FALSE)
wi <- getCAIweights(referenceGenome,default.weight)
CAI <- unlist(lapply(genes, calcCAI, wi))
names(CAI) <- getNames(testGenome, FALSE)
return(CAI)
}
#' Calculate the Effective Number of Codons
#'
#'
#' \code{getNc} returns the Effective Number of Codons for a genome.
#'
#' @param genome A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @return Returns a named vector with the Effective Number of Codons for each gene
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "more_genes.fasta", package = "AnaCoDa")
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' nc <- getNc(genome)
#'
getNc <- function(genome)
{
aa.vec <- aminoAcids()
aa.vec <- aa.vec[-length(aa.vec)]
f.mat <- matrix(0, ncol = 6, nrow = length(genome))
division.counter <- matrix(0, ncol = 6, nrow = length(genome))
for(aa in aa.vec)
{
if(aa == "M" || aa == "W") next # contribution of M and W is 2 in total
codonCountForAA <- getCodonCountsForAA(aa, genome = genome)
n <- rowSums(codonCountForAA)
pi <- codonCountForAA / n
f.vec <- ( ((n*rowSums(pi*pi))-1) / (n-1) )
f.vec[!is.finite(f.vec)] <- 0
ncodons <- length(AAToCodon(aa))
f.mat[, ncodons] <- f.mat[, ncodons] + f.vec
division.counter[n > 1, ncodons] <- division.counter[n > 1, ncodons] + 1
}
# adjusted number of AA with codons 2, 4, and 6 since we split Serine
meanF <- data.frame(SF2=f.mat[,2]/division.counter[, 2], SF3=f.mat[,3], SF4=f.mat[,4]/division.counter[, 4], SF6=f.mat[,6]/division.counter[, 6])
rare.Ile <- meanF$SF3 < 1
meanF$SF3[rare.Ile] <- (meanF$SF2[rare.Ile] + meanF$SF4[rare.Ile])/2 # correcting for rare or mising Ile as suggested by Wright (1990, p25)
#Wright (1990) Eqn. 3 adjusted for split serine
Nc <- 2 + 10/meanF$SF2 + 1/meanF$SF3 + 6/meanF$SF4 + 2/meanF$SF6
Nc[Nc > 61] <- 61 # revising Nc as suggested by Wright (1990, p25)
names(Nc) <- getNames(genome, FALSE)
return(Nc)
}
#' Calculate the Effective Number of Codons for each Amino Acid
#'
#'
#' \code{getNcAA} returns the Effective Number of Codons for each Amino Acid.
#'
#' @param genome A genome object initialized with \code{\link{initializeGenomeObject}}.
#'
#' @return Returns an object of type \code{data.frame} with the Effective Number of Codons
#' for each amino acid in each gene.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "more_genes.fasta", package = "AnaCoDa")
#' ## reading genome
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' nc <- getNcAA(genome)
#'
getNcAA <- function(genome)
{
aa.vec <- aminoAcids()
aa.vec <- aa.vec[-length(aa.vec)]
f.mat <- data.frame(matrix(NA, ncol = length(aa.vec), nrow = length(genome)))
colnames(f.mat) <- aa.vec
for(aa in aa.vec)
{
if(aa == "M" || aa == "W") next # contribution of M and W is 2 in total
codonCountForAA <- getCodonCountsForAA(aa, genome = genome)
n <- rowSums(codonCountForAA)
pi <- codonCountForAA / n
f.vec <- ( ((n*rowSums(pi*pi))-1) / (n-1) )
f.vec <- 1/f.vec
f.vec[!is.finite(f.vec)] <- NA
f.mat[[aa]] <- f.vec
}
rownames(f.mat) <- getNames(genome, FALSE)
return(f.mat)
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/genomeObject.R |
#' Initialize MCMC
#'
#' @param samples Number of samples to be produced when running the
#' MCMC algorithm. No default value.
#'
#' @param thinning The thinning interval between consecutive observations. If set to
#' 1, every step will be saved as a sample. Default value is 1.
#'
#' @param adaptive.width Number that determines how often the acceptance/rejection
#' window should be altered. Default value is 100 samples.
#' Proportion of MCMC steps where the proposal distribution is adaptive can be set using \code{mcmc$setStepsToAdapt}. The default parameter passed in as -1 uses the full iterations.
#'
#' @param est.expression Boolean that tells whether or not synthesis rate values
#' should be estimated in the MCMC algorithm run. Default value is TRUE.
#'
#' @param est.csp Boolean that tells whether or not codon specific values
#' should be estimated in the MCMC algorithm run. Default value is TRUE.
#'
#' @param est.hyper Boolean that tells whether or not hyper parameters
#' should be estimated in the MCMC algorithm run. Default value is TRUE.
#' Setting for expression noise parameter sepsilon can be overridden by setting \code{fix.observation.noise} in \code{initializeModelObject()}
#'
#' @param est.mix Boolean that tells whether or not the genes' mixture element
#' should be estimated in the MCMC algorithm run. Default value is TRUE.
#'
#' @return mcmc Returns an intialized MCMC object.
#'
#' @description \code{initializeMCMCObject} initializes a MCMC object to
#' perform a model fitting for a parameter and model object.
#'
#' @details \code{initializeMCMCObject} sets up the MCMC object
#' (monte carlo markov chain) and returns the object so a model fitting can be done.
#' It is important to note that est.expression and est.hyper will affect one another
#' negatively if their values differ.
#'
#' @examples
#'
#' ## initializing an object of type mcmc
#'
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#'
#' ## estimate all parameter types
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#'
#' ## do not estimate expression values, initial conditions will remain constant
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=FALSE, est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#'
#' ## do not estimate hyper parameters, initial conditions will remain constant
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=TRUE, est.csp=TRUE, est.hyper=FALSE, est.mix = TRUE)
#'
initializeMCMCObject <- function(samples, thinning=1, adaptive.width=100,
est.expression=TRUE, est.csp=TRUE,
est.hyper=TRUE, est.mix=TRUE){
# error check given values.
if (!is.numeric(samples) || samples < 1 || !all(samples == as.integer(samples))) {
stop("samples must be a positive integer\n")
}
if (!is.numeric(thinning) || thinning < 1 || !all(thinning == as.integer(thinning))) {
stop("thinning must be a positive integer\n")
}
if (!is.numeric(adaptive.width) || adaptive.width < 1 ||
!all(adaptive.width == as.integer(adaptive.width))) {
stop("adaptive.width must be a positive integer\n")
}
if (!identical(est.expression, TRUE) && !identical(est.expression, FALSE)) {
stop("est.expression must be a boolean value\n")
}
if (!identical(est.csp, TRUE) && !identical(est.csp, FALSE)) {
stop("est.csp must be a boolean value\n")
}
if (!identical(est.hyper, TRUE) && !identical(est.hyper, FALSE)) {
stop("est.hyper must be a boolean value\n")
}
if (!identical(est.mix, TRUE) && !identical(est.mix, FALSE)) {
stop("est.mix must be a boolean value\n")
}
mcmc <- new(MCMCAlgorithm, samples, thinning, adaptive.width, est.expression,
est.csp, est.hyper)
mcmc$setEstimateMixtureAssignment(est.mix)
return(mcmc)
}
#' Run MCMC
#'
#' @param mcmc MCMC object that will run the model fitting algorithm.
#'
#' @param genome Genome that the model fitting will run on. Should be
#' the same genome associated with the parameter and model objects.
#'
#' @param model Model to run the fitting on. Should be associated with
#' the given genome.
#'
#' @param ncores Number of cores to perform the model fitting with. Default
#' value is 1.
#'
#' @param divergence.iteration Number of steps that the initial conditions
#' can diverge from the original conditions given. Default value is 0.
#'
#' @return This function has no return value.
#'
#' @description \code{runMCMC} will run a monte carlo markov chain algorithm
#' for the given mcmc, genome, and model objects to perform a model fitting.
#'
#' @details \code{runMCMC} will run for the number of samples times the number
#' thinning given when the mcmc object is initialized. Updates are provided every 100
#' steps, and the state of the chain is saved every thinning steps.
#'
#' @examples
#'
#' #fitting a model to a genome using the runMCMC function
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#' }
#'
runMCMC <- function(mcmc, genome, model, ncores = 1, divergence.iteration = 0){
if(class(mcmc) != "Rcpp_MCMCAlgorithm") stop("mcmc is not of class Rcpp_MCMCAlgorithm")
if (ncores < 1 || !all(ncores == as.integer(ncores))) {
stop("ncores must be a positive integer\n")
}
if(class(model) == "Rcpp_PANSEModel")
{
mcmc$run_PANSE(genome, model, ncores, divergence.iteration)
} else
{
mcmc$run(genome, model, ncores, divergence.iteration)
}
}
#' Set Restart Settings
#'
#' @param mcmc MCMC object that will run the model fitting algorithm.
#'
#' @param filename Filename for the restart files to be written.
#'
#' @param samples Number of samples that should occur before a file is written.
#'
#' @param write.multiple Boolean that determines if multiple restart files
#' are written. Default value is TRUE.
#'
#' @return This function has no return value.
#'
#' @description \code{setRestartSettings} sets the needed information (what the file
#' is called, how often the file should be written) to write
#' information to restart the MCMC algorithm from a given point.
#'
#' @details \code{setRestartSettings} writes a restart file every set amount of samples
#' that occur. Also, if write.multiple is true, instead of overwriting the previous restart
#' file, the sample number is prepended onto the file name and multiple rerstart files
#' are generated for a run.
#'
#' @examples
#'
#' ## set restart settings for checkpointing
#'
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#'
#' ## estimate all parameter types
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#'
#' # prompts the mcmc to write a restart file every 100 samples during the run.
#' setRestartSettings(mcmc = mcmc, filename = "test_restart", samples = 100)
#'
#' # prompts the mcmc to write a restart file every 100 samples during the run,
#' # but will overwrite it each time.
#' setRestartSettings(mcmc = mcmc, filename = "test_restart", samples = 100,
#' write.multiple = FALSE)
#'
setRestartSettings <- function(mcmc, filename, samples, write.multiple=TRUE){
if(class(mcmc) != "Rcpp_MCMCAlgorithm") stop("mcmc is not of class Rcpp_MCMCAlgorithm")
mcmc$setRestartFileSettings(filename, samples, write.multiple)
}
#' Convergence Test
#'
#' @param object an object of either class Trace or MCMC
#'
#' @param samples number of samples at the end of the trace used to determine convergence (< length of trace).
#' Will use as starting point of convergence test. If the MCMC trace
#' is of length x, then starting point for convergence test will be x - samples.
#'
#' @param frac1 fraction to use from beginning of samples
#'
#' @param frac2 fraction to use from end of samples
#'
#' @param thin the thinning interval between consecutive observations, which is used in creating a coda::mcmc object (according to the Coda documentation, users should specify if a MCMC chain has already been thinned using a the thin parameter). This does not further thin the data.
#'
#' @param plot (logical) plot result instead of returning an object
#'
#' @param what (for Trace Object only) which parameter to calculate convergence.test -- current options are Selection, Mutation, MixtureProbability, Sphi, Mphi, ExpectedPhi, and AcceptanceCSP
#'
#' @param mixture (for Trace Object only) mixture for which to calculate convergence.test
#'
#' @details Be aware that convergence.test for Trace objects works primarily for Trace objects from the ROC parameter class. Future updates will adapt this function to work for parameters from other models and expression traces
#'
#' @return Geweke score object evaluating whether means of two fractions (frac1 and frac2) differ. Convergence occurs when they don't differ significantly, i.e. pnorm(abs(convergence.test(mcmcObj)$a, ,lower.tail=FALSE)*2 > 0.05
#'
#' @examples
#'
#' ## check for convergence after a run:
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#' # check if posterior trace has converged
#' convergence.test(object = mcmc, samples = 500, plot = TRUE)
#'
#' trace <- getTrace(parameter)
#' # check if Mutation trace has converged
#' convergence.test(object = trace, samples = 500, plot = TRUE, what = "Mutation")
#' # check if Sphi trace has converged
#' convergence.test(object = trace, samples = 500, plot = TRUE, what = "Sphi")
#' # check if ExpectedPhi trace has converged
#' convergence.test(object = trace, samples = 500, plot = TRUE, what = "ExpectedPhi")
#' }
convergence.test <- function(object, samples = 10, frac1 = 0.1, frac2 = 0.5,
thin = 1, plot = FALSE, what = "Mutation", mixture = 1){
UseMethod("convergence.test", object)
}
convergence.test.Rcpp_MCMCAlgorithm <- function(object, samples = 10, frac1 = 0.1,
frac2 = 0.5, thin = 1, plot = FALSE, what = "Mutation", mixture = 1){
# TODO: extend to work with multiple chains once we have that capability.
trace <- object$getLogPosteriorTrace()
loglik.trace <- object$getLogPosteriorTrace()
trace.length <- length(loglik.trace)
start <- max(1, trace.length - samples)
# the start and end parameter do NOT work, using subsetting to achieve goal
mcmcobj <- coda::mcmc(data=loglik.trace[start:trace.length], thin = thin)
diag <- coda::geweke.diag(mcmcobj, frac1=frac1, frac2=frac2)
if(plot){
coda::geweke.plot(mcmcobj, frac1=frac1, frac2=frac2)
}else{
return(diag)
}
}
#' Write MCMC Object
#'
#' @param mcmc MCMC object that has run the model fitting algorithm.
#'
#' @param file A filename where the data will be stored.
#'
#' @return This function has no return value.
#'
#' @description \code{writeMCMCObject} stores the MCMC information from the
#' model fitting run in a file.
#'
#' @examples
#'
#' ## saving the MCMC object after model fitting
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#' writeMCMCObject(mcmc = mcmc, file = file.path(tempdir(), "file.Rda"))
#'
#' }
writeMCMCObject <- function(mcmc, file){
logPostTrace <- mcmc$getLogPosteriorTrace()
logLikeTrace <- mcmc$getLogLikelihoodTrace()
samples <- mcmc$getSamples()
thinning <- mcmc$getThinning()
adaptiveWidth <- mcmc$getAdaptiveWidth()
save(list = c("logPostTrace","logLikeTrace", "samples", "thinning", "adaptiveWidth"), file=file)
}
#' Load MCMC Object
#'
#' @param files The filenames where the data will be stored.
#'
#' @return This function has no return value.
#'
#' @description \code{loadMCMCObject} creates a new MCMC object and fills it with
#' the information in the file given.
#'
#' @details This MCMC object is not intended to be used to do another model fitting, only
#' to graph the stored results.
#'
#' @examples
#'
#' ## loading mcmc objects from the filesystem
#' \dontrun{
#' # load one mcmc object
#' mcmc <- loadMCMCObject(files = "mcmc.Rda")
#'
#' # load and combine multiple mcmc objects. Useful when using checkpointing
#' mcmc <- loadMCMCObject(files = c("mcmc1.Rda", "mcmc2.Rda"))
#' }
loadMCMCObject <- function(files){
mcmc <- new(MCMCAlgorithm)
samples <- 0
logPostTrace <- numeric(0)
logLikeTrace <- numeric(0)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
samples <- samples + tempEnv$samples
max <- tempEnv$samples + 1
curLogPostTrace <- tempEnv$logPostTrace
curLoglikelihoodTrace <- tempEnv$logLikeTrace
logPostTrace <- c(logPostTrace, curLogPostTrace[2:max])
logLikeTrace <- c(logLikeTrace, curLoglikelihoodTrace[2:max])
}
mcmc$setSamples(samples)
mcmc$setThinning(tempEnv$thinning) #not needed?
mcmc$setAdaptiveWidth(tempEnv$adaptiveWidth) #not needed?
mcmc$setLogPosteriorTrace(logPostTrace)
mcmc$setLogLikelihoodTrace(logLikeTrace)
return(mcmc)
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/mcmcObject.R |
#' Model Initialization
#'
#' @param parameter An object created with \code{initializeParameterObject}.
#'
#' @param model A string containing the model to run (ROC, FONSE, or PA), has to match parameter object.
#'
#' @param with.phi (ROC only) A boolean that determines whether or not to include empirical
#' phi values (expression rates) for the calculations. Default value is FALSE
#'
#' @param fix.observation.noise (ROC only) Allows fixing the noise term sepsilon in the observed expression dataset to its initial condition. This value should override the est.hyper=TRUE setting in \code{initializeMCMCObject()}
#' The initial condition for the observed expression noise is set in the parameter object. Default value is FALSE.
#'
#' @param rfp.count.column (PA and PANSE only) A number representing the RFP count column to use. Default value is 1.
#'
#' @return This function returns the model object created.
#'
#' @description initializes the model object.
#'
#' @details initializeModelObject initializes a model. The type of model is determined based on the string passed to the \code{model} argument.
#' The Parameter object has to match the model that is initialized. E.g. to initialize a ROC model,
#' it is required that a ROC parameter object is passed to the function.
#'
#' @examples
#'
#' #initializing a model object
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' expression_file <- system.file("extdata", "expression.csv", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file,
#' observed.expression.file = expression_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' # initializing a model object assuming we have observed expression (phi)
#' # values stored in the genome object.
#' initializeModelObject(parameter = parameter, model = "ROC", with.phi = TRUE)
#'
#' # initializing a model object ignoring observed expression (phi)
#' # values stored in the genome object.
#' initializeModelObject(parameter = parameter, model = "ROC", with.phi = FALSE)
#'
initializeModelObject <- function(parameter, model = "ROC", with.phi = FALSE, fix.observation.noise = FALSE, rfp.count.column = 1) {
if(model == "ROC") {
c.model <- new(ROCModel, with.phi, fix.observation.noise)
} else if (model == "FONSE") {
c.model = new(FONSEModel,with.phi,fix.observation.noise)
} else if (model == "PA") {
c.model <- new(PAModel, rfp.count.column,with.phi,fix.observation.noise)
} else if (model == "PANSE") {
c.model <- new(PANSEModel, rfp.count.column,with.phi,fix.observation.noise)
} else {
stop("Unknown model.")
}
c.model$setParameter(parameter)
return(c.model)
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/modelObject.R |
NAMESPACE <- environment()
# TODO: Test with exposure and what doesn't need to be exposed to R
Rcpp::loadModule("Test_mod", TRUE)
Rcpp::loadModule("Trace_mod", TRUE)
Rcpp::loadModule("CovarianceMatrix_mod", TRUE)
Rcpp::loadModule("MCMCAlgorithm_mod", TRUE)
Rcpp::loadModule("Model_mod", TRUE)
Rcpp::loadModule("Parameter_mod", TRUE)
Rcpp::loadModule("Genome_mod", TRUE)
Rcpp::loadModule("Gene_mod", TRUE)
Rcpp::loadModule("SequenceSummary_mod", TRUE)
#.onLoad <- function(libname, pkgname){
#library.dynam("ribModel", pkgname, libname)
# invisible()
#} # End of .onLoad().
#.onUnload <- function(libpath){
# library.dynam.unload("ribModel", libpath)
# invisible()
#} # End of .onUnload().
#.onAttach <- function(libname, pkgname){
# invisible()
#} # End of .onAttach().
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/onloadPackage.R |
#' Initialize Parameter
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#' The default value is NULL.
#'
#' @param sphi Initial values for sphi. Expected is a vector of length numMixtures.
#' The default value is NULL.
#'
#' @param num.mixtures The number of mixtures elements for the underlying mixture distribution (numMixtures > 0).
#' The default value is 1.
#'
#' @param gene.assignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#' The default Value is NULL.
#'
#' @param initial.expression.values (Optional) A vector with intial phi values.
#' The length of the vector has to equal the number of genes in the Genome object and the order of the genes should match the order of the genes in the Genome.
#' The default value is NULL.
#'
#' @param model Specifies the model used. Valid options are "ROC", "PA", "PANSE", or "FONSE".
#' The default model is "ROC".
#' ROC is described in Gilchrist et al. 2015.
#' PA, PANSE and FONSE are currently unpublished.
#'
#' @param split.serine Whether serine should be considered as
#' one or two amino acids when running the model.
#' TRUE and FALSE are the only valid values.
#' The default value for split.serine is TRUE.
#'
#' @param mixture.definition A string describing how each mixture should
#' be treated with respect to mutation and selection.
#' Valid values consist of "allUnique", "mutationShared", and "selectionShared".
#' The default value for mixture.definition is "allUnique".
#' See details for more information.
#'
#' @param mixture.definition.matrix A matrix representation of how
#' the mutation and selection categories correspond to the mixtures.
#' The default value for mixture.definition.matrix is NULL.
#' If provided, the model will use the matrix to initialize the mutation and selection
#' categories instead of the definition listed directly above.
#' See details for more information.
#'
#' @param init.with.restart.file File name containing information to reinitialize a
#' previous Parameter object.
#' If given, all other arguments will be ignored.
#' The default value for init.with.restart.file is NULL.
#'
#' @param mutation.prior.mean Controlling the mean of the normal prior on mutation paramters.
#' If passed in as single number (default is 0), this will be the mean value for all categories, for all codons. User may also
#' supply a vector with n * 40 values, where n is the number of mutation categories. Future versions will check the number of rows matches
#' the number of mutation categories definded by user.
#'
#' @param mutation.prior.sd Controlling the standard deviation of the normal prior on the mutation parameters.
#' If passed in as single number (default is 0.35), this will be the standard deviation value for all categories, for all codons. User may also
#' supply a vector with n * 40 values, where n is the number of mutation categories. Future versions will check the number of rows matches
#' the number of mutation categories definded by user.
#'
#' @param propose.by.prior Mutation bias parameters will be proposed based on the means and standard deviations set in mutation.prior.mean and mutation.prior.sd
#'
#' @param init.csp.variance specifies the initial proposal width for codon specific parameter (default is 0.0025).
#' The proposal width adapts during the runtime to reach a taget acceptance rate of ~0.25
#'
#' @param init.sepsilon specifies the initial value for sepsilon. default is 0.1
#'
#' @param init.w.obs.phi If TRUE, initialize phi values with observed phi values
#' (data from RNAseq, mass spectrometry, ribosome footprinting) Default is FALSE.
#' If multiple observed phi values exist for a gene, the geometric mean of these values is used as initial phi.
#' When using this function, one should remove any genes with
#' missing phi values, as these genes will not have an initial phi value.
#'
#' @param init.initiation.cost FOR FONSE ONLY. Initializes the initiation cost a_1 at this value.
#'
#' @param init.partition.function FOR PANSE ONLY. initializes the partition function Z.
#'
#' @return parameter Returns an initialized Parameter object.
#'
#' @description \code{initializeParameterObject} initializes a new parameter object or reconstructs one from a restart file
#'
#' @details \code{initializeParameterObject} checks the values of the arguments
#' given to insure the values are valid.
#'
#' The mixture definition and mixture definition matrix describes how the mutation
#' and selection categories are set up with respect to the number of mixtures. For
#' example, if mixture.definition = "allUnique" and numMixtures = 3, a matrix
#' representation would be \code{matrix(c(1,2,3,1,2,3), ncol=2)}
#' where each row represents a mixture, the first column represents the mutation
#' category, and the second column represents the selection category.
#' Another example would be mixture.definition = "selectionShared" and numMixtures = 4 (
#' \code{matrix(c(1,2,3,4,1,1,1,1), ncol=2)}).
#' In this case, the selection category is the same for every mixture. If a matrix
#' is given, and it is valid, then the mutation/selection relationship will be
#' defined by the given matrix and the keyword will be ignored. A matrix should only
#' be given in cases where the keywords would not create the desired matrix.
#'
#' @examples
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#' restart_file <- system.file("extdata", "restart_file.rst", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#'
#' ## initialize a new parameter object
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## re-initialize a parameter object from a restart file. Useful for checkpointing
#' parameter <- initializeParameterObject(init.with.restart.file = restart_file)
#'
#' ## initialize a parameter object with a custon mixture definition matrix
#' def.matrix <- matrix(c(1,1,1,2), ncol=2)
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2,
#' gene.assignment = geneAssignment,
#' mixture.definition.matrix = def.matrix)
#'
initializeParameterObject <- function(genome = NULL, sphi = NULL, num.mixtures = 1,
gene.assignment = NULL, initial.expression.values = NULL,
model = "ROC", split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL,
init.with.restart.file = NULL, mutation.prior.mean = 0.0, mutation.prior.sd = 0.35, propose.by.prior=FALSE,
init.csp.variance = 0.0025, init.sepsilon = 0.1,
init.w.obs.phi=FALSE, init.initiation.cost = 4,init.partition.function=1){
# check input integrity
if(is.null(init.with.restart.file)){
if(length(sphi) != num.mixtures){
stop("Not all mixtures have an Sphi value assigned!\n")
}
if(length(genome) != length(gene.assignment)){
stop("Not all Genes have a mixture assignment!\n")
}
if(max(gene.assignment) > num.mixtures){
stop("Gene is assigned to non existing mixture!\n")
}
if(num.mixtures < 1){
stop("num. mixture has to be a positive non-zero value!\n")
}
if (!is.null(sphi)) {
if (length(sphi) != num.mixtures) {
stop("sphi must be a vector of length numMixtures\n")
}
}
if (!is.null(initial.expression.values)) {
if (length(initial.expression.values) != length.Rcpp_Genome(genome)) {
stop("initial.expression.values must have length equal to the number of genes in the Genome object\n")
}
}
if (!identical(split.serine, TRUE) && !identical(split.serine, FALSE)) {
stop("split.serine must be a boolean value\n")
}
if (mixture.definition != "allUnique" && mixture.definition != "mutationShared" &&
mixture.definition != "selectionShared") {
stop("mixture.definition must be \"allUnique\", \"mutationShared\", or \"selectionShared\". Default is \"allUnique\"\n")
}
if (mutation.prior.sd < 0) {
stop("mutation.prior.sd should be positive\n")
}
if (init.csp.variance < 0) {
stop("init.csp.variance should be positive\n")
}
if (any(init.sepsilon < 0)) {
stop("init.sepsilon should be positive\n")
}
} else {
if (!file.exists(init.with.restart.file)) {
stop("init.with.restart.file provided does not exist\n")
}
}
if(model == "ROC"){
if(is.null(init.with.restart.file)){
parameter <- initializeROCParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix,
mutation.prior.mean,mutation.prior.sd,propose.by.prior,init.csp.variance, init.sepsilon,init.w.obs.phi)
}else{
parameter <- new(ROCParameter, init.with.restart.file)
}
}else if(model == "FONSE"){
if(is.null(init.with.restart.file)){
parameter <- initializeFONSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.sepsilon,init.w.obs.phi,init.initiation.cost)
}else{
parameter <- new(FONSEParameter, init.with.restart.file)
}
}else if(model == "PA"){
if(is.null(init.with.restart.file)){
parameter <- initializePAParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.sepsilon,init.w.obs.phi)
}else{
parameter <- new(PAParameter, init.with.restart.file)
}
}else if(model == "PANSE"){
if(is.null(init.with.restart.file)){
parameter <- initializePANSEParameterObject(genome, sphi, num.mixtures,
gene.assignment, initial.expression.values, split.serine,
mixture.definition, mixture.definition.matrix, init.csp.variance,init.sepsilon,init.w.obs.phi,init.partition.function)
}else{
parameter <- new(PANSEParameter, init.with.restart.file)
}
}else{
stop("Unknown model.")
}
return(parameter)
}
#Called from initializeParameterObject.
initializeROCParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, mutation_prior_mean = 0.0, mutation_prior_sd = 0.35, propose.by.prior=FALSE,init.csp.variance = 0.0025, init.sepsilon = 0.1,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix)){
# keyword constructor
parameter <- new(ROCParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(ROCParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome,mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geomMean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
if (length(mutation_prior_mean) == 1)
{
mutation_prior_mean <- rep(mutation_prior_mean,length=parameter$numMutationCategories * 40)
} else{
mutation_prior_mean <- as.vector(t(mutation_prior_mean))
}
if (length(mutation_prior_sd) == 1)
{
mutation_prior_sd <- rep(mutation_prior_sd,length=parameter$numMutationCategories * 40)
} else{
mutation_prior_sd <- as.vector(t(mutation_prior_sd))
}
parameter$setMutationPriorMean(mutation_prior_mean)
parameter$setMutationPriorStandardDeviation(mutation_prior_sd)
parameter$setProposeByPrior(propose.by.prior)
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#Called from initializeParameterObject.
initializePAParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance = 0.0025 ,init.sepsilon = 0.1,init.w.obs.phi=FALSE){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PAParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PAParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByRandom(mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geomMean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
## TODO (Cedric): use init.csp.variance to set initial proposal width for CSP parameters
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
return (parameter)
}
#Called from initializeParameterObject.
initializePANSEParameterObject <- function(genome, sphi, numMixtures, geneAssignment,
expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance = 0.0025 ,init.sepsilon = 0.1,init.w.obs.phi=FALSE,init.partition.function=1){
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(PANSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(PANSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByRandom(mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geomMean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
parameter$setTotalRFPCount(genome);
for (j in 1:numMixtures)
{
parameter$setPartitionFunction(init.partition.function,j-1)
}
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
return (parameter)
}
#Called from initializeParameterObject.
initializeFONSEParameterObject <- function(genome, sphi, numMixtures,
geneAssignment, expressionValues = NULL, split.serine = TRUE,
mixture.definition = "allUnique",
mixture.definition.matrix = NULL, init.csp.variance = 0.0025 ,init.sepsilon = 0.1,init.w.obs.phi=FALSE,init.initiation.cost = 4){
# create Parameter object
if(is.null(mixture.definition.matrix))
{ # keyword constructor
parameter <- new(FONSEParameter, as.vector(sphi), numMixtures, geneAssignment,
split.serine, mixture.definition, init.initiation.cost)
}else{
#matrix constructor
mixture.definition <- c(mixture.definition.matrix[, 1],
mixture.definition.matrix[, 2])
parameter <- new(FONSEParameter, as.vector(sphi), geneAssignment,
mixture.definition, split.serine,init.initiation.cost)
}
# initialize expression values
# initialize expression values
if(is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByGenome(genome,mean(sphi))
}
else if(init.w.obs.phi == T && is.null(expressionValues))
{
observed.phi <- getObservedSynthesisRateSet(genome)
if (ncol(observed.phi)-1 > 1)
{
observed.phi <- apply(observed.phi[,2:ncol(observed.phi)],geomMean,MARGIN = 1)
}
else
{
observed.phi <- observed.phi[,2]
}
parameter$initializeSynthesisRateByList(observed.phi)
}
else if (!is.null(expressionValues) && init.w.obs.phi == F)
{
parameter$initializeSynthesisRateByList(expressionValues)
}
else
{
stop("expressionValues is not NULL and init.w.obs.phi == TRUE. Please choose only one of these options.")
}
n.obs.phi.sets <- ncol(getObservedSynthesisRateSet(genome)) - 1
parameter$setNumObservedSynthesisRateSets(n.obs.phi.sets)
if (n.obs.phi.sets != 0){
parameter$setInitialValuesForSepsilon(as.vector(init.sepsilon))
}
parameter <- initializeCovarianceMatrices(parameter, genome, numMixtures, geneAssignment, init.csp.variance)
return(parameter)
}
#' Calculates the marginal log-likelihood for a set of parameters
#'
#' @param parameter An object created with \code{initializeParameterObject}.
#'
#' @param mcmc An object created with \code{initializeMCMCObject}
#'
#' @param mixture determines for which mixture the marginal log-likelihood should be calculated
#'
#' @param n.samples How many samples should be used for the calculation
#'
#' @param divisor A value > 1 in order to scale down the tails of the importance distribution
#'
#' @param warnings Print warnings such as when the variance of a parameter is 0, which might occur when parameter is fixed
#' @return This function returns the model object created.
#'
#' @description initializes the model object.
#'
#' @details calculateMarginalLogLikelihood Calculate marginal log-likelihood for
#' calculation of the Bayes factor using a generalized harmonix mean estimator of the
#' marginal likelihood. See Gronau et al. (2017) for details
#'
#' @examples
#' \dontrun{
#' # Calculate the log-marginal likelihood
#' parameter <- loadParameterObject("parameter.Rda")
#' mcmc <- loadMCMCObject("mcmc.Rda")
#' calculate_marginal_likelihood(parameter, mcmc, mixture = 1,
#' samples = 500, scaling = 1.5)
#'
#' # Calculate the Bayes factor for two models
#' parameter1 <- loadParameterObject("parameter1.Rda")
#' parameter2 <- loadParameterObject("parameter2.Rda")
#' mcmc1 <- loadMCMCObject("mcmc1.Rda")
#' mcmc2 <- loadMCMCObject("mcmc2.Rda")
#' mll1 <- calculate_marginal_likelihood(parameter1, mcmc1, mixture = 1,
#' samples = 500, scaling = 1.5)
#' mll2 <- calculate_marginal_likelihood(parameter2, mcmc2, mixture = 1,
#' samples = 500, scaling = 1.5)
#' cat("Bayes factor: ", mll1 - mll2, "\n")
#' }
#'
calculateMarginalLogLikelihood <- function(parameter, mcmc, mixture, n.samples, divisor,warnings=TRUE)
{
if(divisor < 1) stop("Generalized Harmonic Mean Estimation of Marginal Likelihood requires importance sampling distribution variance divisor be greater than 1")
## Collect information from AnaCoDa objects
trace <- parameter$getTraceObject()
## This should be the posterior instead of the log_posterior but this causes an overflow, find fix!!!
log_posterior <- mcmc$getLogPosteriorTrace()
log_posterior <- log_posterior[(length(log_posterior) - n.samples+1):(length(log_posterior))]
### HANDLE CODON SPECIFIC PARAMETERS
log_imp_dens_sample <- rep(0, n.samples)
for (k in 1:mixture)
{
for(ptype in 0:1) # for all parameter types (mutation/selection parameters)
{
for(aa in AnaCoDa::aminoAcids()) # for all amino acids
{
if(aa == "M" || aa == "W" || aa == "X") next # ignore amino acids with only one codon or stop codons
codons <- AnaCoDa::AAToCodon(aa, focal = T)
## get covariance matrix and mean of importance distribution
sample_mat <- matrix(NA, ncol = length(codons), nrow = n.samples)
mean_vals <- rep(NA, length(codons))
for(i in 1:length(codons)) # for all codons
{
vec <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(k, codons[i], ptype, TRUE)
vec <- vec[(length(vec) - n.samples+1):(length(vec))]
sample_mat[,i] <- vec
mean_vals[i] <- mean(vec)
}
## scale/shrinked covariance matrix
cov_mat <- cov(sample_mat) / divisor
if(all(cov_mat == 0))
{
if(warnings) print(paste("Covariance matrix for codons in amino acid",aa,"has 0 for all values. Skipping."))
next
}
for(i in 1:n.samples)
{
## calculate importance density for collected samples
## mikeg: We should double check with Russ that it is okay to use the full covariance matrix of the sample (even though we have no prior on the cov structure) when constructing the importance density function.
## It seems logical to do so
log_imp_dens_aa <- dmvnorm(x = sample_mat[i,], mean = mean_vals, sigma = cov_mat, log = TRUE)
log_imp_dens_sample[i] = log_imp_dens_sample[i] + log_imp_dens_aa
}
}
}
}
## HANDLE GENE SPECIFIC PARAMETERS
# phi values are stored on natural scale.
synt_trace <- trace$getSynthesisRateTrace()[[mixture]]
n_genes <- length(synt_trace);
sd_vals <- rep(NA, n_genes)
mean_vals <- rep(NA, n_genes)
for(i in 1:n_genes) ## i is indexing across genes
{
vec <- synt_trace[[i]]
vec <- vec[(length(vec) - n.samples+1):(length(vec))]
sd_vals[i] <- sd(vec)
if (all(sd_vals[i] == 0))
{
if(warnings) print(paste("Variance of gene",i,"is 0. Skipping."))
next
}
mean_vals[i] <- mean(vec)
log_mean_vals <- log(mean_vals) - 0.5 * log(1+(sd_vals^2/mean_vals^2))
log_sd_vals <- sqrt(log(1+(sd_vals^2/mean_vals^2)))
## Calculate vector of importance density for entire \phi trace of gene.
log_imp_dens_phi <- dlnorm(x = vec, meanlog = log_mean_vals[i], sdlog = log_sd_vals[i]/divisor, log = TRUE)
## update importance density function vector of sample with current gene;
log_imp_dens_sample <- log_imp_dens_sample + log_imp_dens_phi
} ## end synth_trace loop
## Scale importance density for each sample by its posterior probability (on log scale)
log_imp_dens_over_posterior <- log_imp_dens_sample - log_posterior
## now scale by max term to facilitate summation
max_log_term <- max(log_imp_dens_over_posterior)
## Y = X - max_X
offset_log_imp_dens_over_posterior <- log_imp_dens_over_posterior - max_log_term
## Z = sum(exp(vec(Y)))
offset_sum_imp_dens_over_posterior <- sum(exp(offset_log_imp_dens_over_posterior))
log_sum_imp_dens_over_posterior <- log(offset_sum_imp_dens_over_posterior) + max_log_term
## ln(ML) = ln(n) -(Z + max_X)
log_marg_lik <- log(n.samples) - log_sum_imp_dens_over_posterior
##marg_lik = 1.0/(log_inv_marg_lik/n.samples) # equation 9
return(log_marg_lik)
}
#' Find and return list of optimal codons
#'
#' \code{findOptimalCodon} extracrs the optimal codon for each amino acid.
#'
#' @param csp a \code{data.frame} as returned by \code{getCSPEstimates}.
#'
#' @return A named list with with optimal codons for each amino acid.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' csp_mat <- getCSPEstimates(parameter, CSP="Selection")
#' opt_codons <- findOptimalCodon(csp_mat)
#' }
findOptimalCodon <- function(csp)
{
aas <- aminoAcids()
n.aa <- length(aas)
result <- vector("list", length(aas))
names(result) <- aas
for(j in 1:n.aa)
{
aa <- aas[j]
if(aa == "W" || aa == "M" || aa == "X") next
aa.pos <- which(csp$AA == aa)
opt.codon.pos <- which(csp[aa.pos, 3] == min(csp[aa.pos, 3]))
result[[j]] <- csp$Codon[aa.pos[opt.codon.pos]]
}
return(result)
}
getNSEProbabilityTrace <- function(parameter,mixture,codon,samples)
{
trace <- parameter$getTraceObject()
alpha <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon, 0, F)
lambda <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon, 1, F)
nserate <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon, 2, F)
prob.nse.trace <- nserate * (alpha/lambda)
return(prob.nse.trace[(length(prob.nse.trace)-samples):length(prob.nse.trace)])
}
#' Return Codon Specific Paramters (or write to csv) estimates as data.frame
#'
#' @param parameter parameter an object created by \code{initializeParameterObject}.
#'
#' @param filename Posterior estimates will be written to file (format: csv). Filename will be in the format <parameter_name>_<filename>.csv.
#'
#' @param mixture estimates for which mixture should be returned
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @param relative.to.optimal.codon Boolean determining if parameters should be relative to the preferred codon or the alphabetically last codon (Default=TRUE). Only applies to ROC and FONSE models
#'
#' @param report.original.ref Include the original reference codon (Default = TRUE). Note this is only included for the purposes of simulations, which expect the input parameter file to be in a specific format. Later version of AnaCoDa will remove this.
#'
#' @param log.scale Calculate posterior means, standard deviation, and posterior probability intervals on the natural log scale. Should be used for PA and PANSE models only.
#'
#' @return returns a list data.frame with the posterior estimates of the models
#' codon specific parameters or writes it directly to a csv file if \code{filename} is specified
#'
#' @description \code{getCSPEstimates} returns the codon specific
#' parameter estimates for a given parameter and mixture or write it to a csv file.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for codon specific parameters
#' csp_mat <- getCSPEstimates(parameter)
#'
#' # write the result directly to the filesystem as a csv file. No values are returned
#' getCSPEstimates(parameter, filename=file.path(tempdir(), "test.csv"))
#'
#' }
getCSPEstimates <- function(parameter, filename=NULL, mixture = 1, samples = 10, relative.to.optimal.codon=T, report.original.ref = T,log.scale=F)
{
if((class(parameter)=="Rcpp_ROCParameter" || class(parameter)=="Rcpp_FONSEParameter") && log.scale)
{
stop("Log transformation will be performed on negative values. Stopping execution of getCSPEstimates.")
}
model.conditions <- checkModel(parameter)
model.uses.ref.codon <- model.conditions$model.uses.ref.codon
names.aa <- model.conditions$aa
codons <- model.conditions$codons
parameter.names <- model.conditions$parameter.names
## Creates empty vector of 0 for initial dataframes
init <- rep(0.0,length(codons))
param.1<- data.frame(Codon=codons,AA=names.aa,Mean=init,Std.Dev=init,Lower.quant=init,Upper.quant=init,stringsAsFactors = F,row.names = codons)
param.2 <- data.frame(Codon=codons,AA=names.aa,Mean=init,Std.Dev=init,Lower.quant=init,Upper.quant=init,stringsAsFactors = F,row.names=codons)
param.3 <- data.frame(Codon=codons,AA=names.aa,Mean=init,Std.Dev=init,Lower.quant=init,Upper.quant=init,stringsAsFactors = F,row.names=codons)
param.4 <- data.frame(Codon=codons,AA=names.aa,Mean=init,Std.Dev=init,Lower.quant=init,Upper.quant=init,stringsAsFactors = F,row.names=codons)
if (model.uses.ref.codon)
{
codons <- codons[which(codons %in% unlist(lapply(X = names.aa,FUN = AAToCodon,T)))]
}
## Get parameter estimate for each codon
for (codon in codons)
{
param.1[codon,"Mean"] <- parameter$getCodonSpecificPosteriorMean(mixtureElement=mixture,samples=samples,codon=codon,paramType=0,withoutReference=model.uses.ref.codon,log_scale=log.scale)
param.2[codon,"Mean"] <- parameter$getCodonSpecificPosteriorMean(mixtureElement=mixture,samples=samples,codon=codon,paramType=1,withoutReference=model.uses.ref.codon,log_scale=log.scale)
param.1[codon,"Std.Dev"] <- sqrt(parameter$getCodonSpecificVariance(mixtureElement=mixture,samples=samples,codon=codon,paramType=0,unbiased=T,withoutReference=model.uses.ref.codon,log_scale=log.scale))
param.2[codon,"Std.Dev"] <- sqrt(parameter$getCodonSpecificVariance(mixtureElement=mixture,samples=samples,codon=codon,paramType=1,unbiased=T,withoutReference=model.uses.ref.codon,log_scale=log.scale))
param.1[codon,c("Lower.quant","Upper.quant")] <- parameter$getCodonSpecificQuantile(mixtureElement=mixture, samples=samples,codon=codon,paramType=0, probs=c(0.025, 0.975),withoutReference=model.uses.ref.codon,log_scale=log.scale)
param.2[codon,c("Lower.quant","Upper.quant")] <- parameter$getCodonSpecificQuantile(mixtureElement=mixture, samples=samples,codon=codon,paramType=1, probs=c(0.025, 0.975),withoutReference=model.uses.ref.codon,log_scale=log.scale)
if (length(parameter.names) == 4)
{
param.3[codon,"Mean"] <- parameter$getCodonSpecificPosteriorMean(mixtureElement=mixture,samples=samples,codon=codon,paramType=2,withoutReference=model.uses.ref.codon,log_scale=log.scale)
param.3[codon,"Std.Dev"] <- sqrt(parameter$getCodonSpecificVariance(mixtureElement=mixture,samples=samples,codon=codon,paramType=2,unbiased=T,withoutReference=model.uses.ref.codon,log_scale=log.scale))
param.3[codon,c("Lower.quant","Upper.quant")] <- parameter$getCodonSpecificQuantile(mixtureElement=mixture, samples=samples,codon=codon,paramType=2, probs=c(0.025, 0.975),withoutReference=model.uses.ref.codon,log_scale=log.scale)
prob.nse.trace <- getNSEProbabilityTrace(parameter,mixture,codon,samples)
if (log.scale)
{
prob.nse.trace <- log10(prob.nse.trace)
}
param.4[codon,"Mean"] <- mean(prob.nse.trace)
param.4[codon,"Std.Dev"] <- sd(prob.nse.trace)
param.4[codon,c("Lower.quant","Upper.quant")] <- quantile(prob.nse.trace,probs=c(0.025,0.975),type=8)
}
}
colnames(param.1) <- c("Codon", "AA", "Mean","Std.Dev","2.5%", "97.5%")
colnames(param.2) <- c("Codon", "AA", "Mean","Std.Dev","2.5%", "97.5%")
colnames(param.3) <- c("Codon", "AA", "Mean","Std.Dev","2.5%", "97.5%")
colnames(param.4) <- c("Codon", "AA", "Mean","Std.Dev","2.5%", "97.5%")
## Only called if model actually uses reference codon
if(relative.to.optimal.codon && model.uses.ref.codon)
{
csp.param <- optimalAsReference(param.1,param.2,parameter.names,report.original.ref)
} else if (relative.to.optimal.codon == F || model.uses.ref.codon == F ){
## This is just in case the user wants to exclude the original reference codon
## TO DO: update C++ function which might expect certain format for the input CSP file parameters
if (model.uses.ref.codon && !report.original.ref)
{
param.1 <- param.1[-which(param.1[,"Mean"]==0),]
param.2 <- param.2[-which(param.2[,"Mean"]==0),]
}
csp.param <- vector(mode="list",length=length(parameter.names))
names(csp.param) <- parameter.names
csp.param[[parameter.names[1]]] <- param.1[,c("AA", "Codon", "Mean", "Std.Dev","2.5%", "97.5%")]
csp.param[[parameter.names[2]]] <- param.2[,c("AA", "Codon", "Mean", "Std.Dev","2.5%", "97.5%")]
if (length(parameter.names)==4)
{
csp.param[[parameter.names[3]]] <- param.3[,c("AA", "Codon", "Mean", "Std.Dev","2.5%", "97.5%")]
csp.param[[parameter.names[4]]] <- param.4[,c("AA", "Codon", "Mean", "Std.Dev","2.5%", "97.5%")]
}
}
if(is.null(filename))
{
return(csp.param)
}else {
if (log.scale)
{
suffix <- "_log_scale.csv"
} else{
suffix <- ".csv"
}
write.csv(csp.param[[parameter.names[1]]], file = paste0(filename,"_",parameter.names[1],suffix), row.names = FALSE, quote=FALSE)
write.csv(csp.param[[parameter.names[2]]], file = paste0(filename,"_",parameter.names[2],suffix), row.names = FALSE, quote=FALSE)
if (length(parameter.names)==4)
{
write.csv(csp.param[[parameter.names[3]]], file = paste0(filename,"_",parameter.names[3],suffix), row.names = FALSE, quote=FALSE)
write.csv(csp.param[[parameter.names[4]]], file = paste0(filename,"_",parameter.names[4],suffix), row.names = FALSE, quote=FALSE)
}
}
}
## NOT EXPOSED
optimalAsReference <- function(param.1,param.2,parameter.names,report.original.ref)
{
updated.param.1 <- data.frame()
updated.param.2 <- data.frame()
aa <- unique(param.2[,"AA"])
for (a in aa)
{
codons <- AAToCodon(a)
## Create temporary data frames for modifying values
tmp.1 <- param.1[codons,] ## "Mutation" parameter
tmp.2 <- param.2[codons,] ## "Selection" parameter
current.reference.row <- which(tmp.2[,"Mean"]==0)
optimal.parameter.value <- min(tmp.2[,"Mean"])
## No reason to do anything if optimal value is 0
if (optimal.parameter.value != 0.0)
{
tmp.2[,c("Mean","2.5%","97.5%")] <- tmp.2[,c("Mean","2.5%","97.5%")] - optimal.parameter.value
##Get row of the optimal codon, which should be 0
optimal.codon.row <- which(tmp.2[,"Mean"]==0.0)
tmp.2[current.reference.row,"Std.Dev"] <- tmp.2[optimal.codon.row,"Std.Dev"]
tmp.2[current.reference.row,c("2.5%","97.5%")] <- tmp.2[optimal.codon.row,c("2.5%","97.5%")] + tmp.2[current.reference.row,"Mean"]
## Can now change optimal codon values to 0.0
tmp.2[optimal.codon.row,c("Mean","Std.Dev","2.5%","97.5%")] <- 0.0
## Find corresponding reference value for other parameter
optimal.parameter.value <- tmp.1[optimal.codon.row,"Mean"]
tmp.1[current.reference.row,"Std.Dev"] <- tmp.1[optimal.codon.row,"Std.Dev"]
tmp.1[,c("Mean","2.5%","97.5%")] <- tmp.1[,c("Mean","2.5%","97.5%")] - optimal.parameter.value
tmp.1[current.reference.row,c("2.5%","97.5%")] <- tmp.1[optimal.codon.row,c("2.5%","97.5%")] + tmp.1[current.reference.row,"Mean"]
tmp.1[optimal.codon.row,c("Mean","Std.Dev","2.5%","97.5%")] <- 0.0
}
if (!report.original.ref)
{
tmp.1 <- tmp.1[-current.reference.row,]
tmp.2 <- tmp.2[-current.reference.row,]
}
updated.param.1 <- rbind(updated.param.1,tmp.1)
updated.param.2 <- rbind(updated.param.2,tmp.2)
}
csp.param <- vector(mode="list",length=2)
names(csp.param) <- parameter.names
csp.param[[parameter.names[1]]] <- updated.param.1[,c("AA", "Codon", "Mean","Std.Dev", "2.5%", "97.5%")]
csp.param[[parameter.names[2]]] <- updated.param.2[,c("AA", "Codon", "Mean","Std.Dev","2.5%", "97.5%")]
return(csp.param)
}
## NOT EXPOSED
checkModel <- function(parameter)
{
class.type <- class(parameter)
if(class(parameter)=="Rcpp_ROCParameter" || class(parameter)=="Rcpp_FONSEParameter")
{
model.uses.ref.codon <- TRUE
names.aa <- parameter$getGroupList()
codons <- unlist(lapply(names.aa,AAToCodon))
aa <- unlist(lapply(codons,codonToAA))
parameter.names <- c("Mutation","Selection")
} else if (class(parameter)=="Rcpp_PANSEParameter")
{
model.uses.ref.codon <- FALSE
codons <- parameter$getGroupList()
aa <- unlist(lapply(codons,codonToAA))
parameter.names <- c("Alpha","Lambda","NSERate","NSEProb")
} else
{
model.uses.ref.codon <- FALSE
codons <- parameter$getGroupList()
aa <- unlist(lapply(codons,codonToAA))
parameter.names <- c("Alpha","Lambda_Prime")
}
return(list(aa=aa,codons=codons,model.uses.ref.codon=model.uses.ref.codon,parameter.names=parameter.names))
}
#' Calculate Selection coefficients
#'
#' \code{getSelectionCoefficients} calculates the selection coefficient of each codon in each gene.
#'
#' @param genome A genome object initialized with
#' \code{\link{initializeGenomeObject}} to add observed expression data.
#'
#' @param parameter an object created by \code{initializeParameterObject}.
#'
#' @param samples The number of samples used for the posterior estimates.
#'
#' @return A matrix with selection coefficients.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- 1
#' numMixtures <- 1
#' geneAssignment <- rep(1, length(genome))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' ## return estimates for selection coefficients s for each codon in each gene
#' selection.coefficients <- getSelectionCoefficients(genome = genome,
#' parameter = parameter, samples = 1000)
#' }
#'
getSelectionCoefficients <- function(genome, parameter, samples = 100)
{
sel.coef <- parameter$calculateSelectionCoefficients(samples)
grouplist <- parameter$getGroupList()
codon.names <- NULL
if(class(parameter) == "Rcpp_ROCParameter" || class(parameter) == "Rcpp_FONSEParameter")
{
for(aa in grouplist)
codon.names <- c(codon.names, AAToCodon(aa))
sel.coef <- sel.coef[, -c(60, 61)] # The matrix is to large as it could store M and W which is not used here.
}else{
codon.names <- grouplist
}
gene.names <- getNames(genome)
colnames(sel.coef) <- codon.names
rownames(sel.coef) <- gene.names
return(sel.coef)
}
# Uses a multinomial logistic regression to estimate the codon specific parameters for every category.
# Delta M is the intercept - and Delta eta is the slope of the regression.
# The package VGAM is used to perform the regression.
getCSPbyLogit <- function(codonCounts, phi, coefstart = NULL, x.arg = FALSE,
y.arg = FALSE, qr.arg = FALSE)
{
#avoid cases with 0 aa count
idx <- rowSums(codonCounts) != 0
# performs the regression and returns Delta M and Delta eta as well as other information no used here
ret <- vglm(codonCounts[idx, ] ~ phi[idx],
multinomial, coefstart = coefstart,
x.arg = x.arg, y.arg = y.arg, qr.arg = qr.arg)
coefficients <- ret@coefficients
## convert delta.t to delta.eta
coefficients <- -coefficients
ret <- list(coefficients = coefficients,
coef.mat = matrix(coefficients, nrow = 2, byrow = TRUE),
R = ret@R)
return(ret)
}
#TODO: Need comments explaining what is going on
subMatrices <- function(M, r, c){
rg <- (row(M) - 1) %/% r + 1
cg <- (col(M) - 1) %/% c + 1
rci <- (rg - 1) * max(cg) + cg
return(rci)
}
#TODO: Need comments explaining what is going on
splitMatrix <- function(M, r, c){
rci <- subMatrices(M, r, c)
N <- prod(dim(M)) / r / c
cv <- lapply(1:N, function(x) M[rci==x])
return(lapply(1:N, function(i) matrix(cv[[i]], nrow = r)))
}
#' extracts an object of traces from a parameter object.
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#'
#' @return trace Returns an object of type Trace extracted from the given parameter object
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' trace <- getTrace(parameter) # empty trace object since no MCMC was perfomed
#'
getTrace <- function(parameter){
return(parameter$getTraceObject())
}
#######
### CURRENTLY NOT EXPOSED
#######
#' Initialize Covariance Matrices
#'
#' @param parameter A Parameter object that corresponds to one of the model types.
#' Valid values are "ROC", "PA", and "FONSE".
#'
#' @param genome An object of type Genome necessary for the initialization of the Parameter object.
#'
#' @param numMixtures The number of mixture elements for the underlying mixture distribution (numMixtures > 0).
#'
#' @param geneAssignment A vector holding the initial mixture assignment for each gene.
#' The vector length has to equal the number of genes in the genome.
#' Valid values for the vector range from 1 to numMixtures.
#' It is possible but not advised to leave a mixture element empty.
#'
#' @param init.csp.variance initial proposal variance for codon specific parameter, default is 0.0025.
#'
#' @return parameter Returns the Parameter argument, now modified with initialized mutation, selection, and covariance matrices.
#'
# Also initializes the mutaiton and selection parameter
initializeCovarianceMatrices <- function(parameter, genome, numMixtures, geneAssignment, init.csp.variance = 0.0025) {
numMutationCategory <- parameter$numMutationCategories
numSelectionCategory <- parameter$numSelectionCategories
phi <- parameter$getCurrentSynthesisRateForMixture(1) # phi values are all the same initially
names.aa <- aminoAcids()
# ct <- getInstance()
# names.aa <- ct$getGroupList()
for(aa in names.aa)
{
if(aa == "M" || aa == "W" || aa == "X") next
#should go away when CT is up and running
codonCounts <- getCodonCountsForAA(aa, genome) # ignore column with gene ids
numCodons <- dim(codonCounts)[2] - 1
#-----------------------------------------
# TODO WORKS CURRENTLY ONLY FOR ALLUNIQUE!
#-----------------------------------------
covmat <- vector("list", numMixtures)
for(mixElement in 1:numMixtures)
{
idx <- geneAssignment == mixElement
csp <- getCSPbyLogit(codonCounts[idx, ], phi[idx])
parameter$initMutation(csp$coef.mat[1,], mixElement, aa)
parameter$initSelection(csp$coef.mat[2,], mixElement, aa)
}
# One covariance matrix for all mixtures.
# Currently only variances used.
compl.covMat <- diag((numMutationCategory + numSelectionCategory) * numCodons) * init.csp.variance
parameter$initCovarianceMatrix(compl.covMat, aa)
}
return(parameter)
}
#' Returns mixture assignment estimates for each gene
#'
#' @param parameter on object created by \code{initializeParameterObject}
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the mixture assignment of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning, adaptive.width=adaptiveWidth,
#' est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the mixture assignment for all genes
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 1:length(genome), samples = 1000)
#'
#' # get the mixture assignment for a subsample
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = 5:100, samples = 1000)
#' # or
#' mixAssign <- getMixtureAssignmentEstimate(parameter = parameter,
#' gene.index = c(10, 30:50, 3, 90), samples = 1000)
#' }
#'
getMixtureAssignmentEstimate <- function(parameter, gene.index, samples)
{
mixtureAssignment <- unlist(lapply(gene.index, function(geneIndex){parameter$getEstimatedMixtureAssignmentForGene(samples, geneIndex)}))
return(mixtureAssignment)
}
#' Returns the estimated phi posterior for a gene
#'
#' @param parameter on object created by \code{initializeParameterObject}.
#'
#' @param gene.index a integer or vector of integers representing the gene(s) of interesst.
#'
#' @param samples number of samples for the posterior estimate
#'
#' @param quantiles vector of quantiles, (default: c(0.025, 0.975))
#'
#' @param genome if genome is given, then will include gene ids in output (default is NULL)
#'
#' @return returns a vector with the mixture assignment of each gene corresbonding to \code{gene.index} in the same order as the genome.
#'
#' @description Posterior estimates for the phi value of specified genes
#'
#' @details The returned vector is unnamed as gene ids are only stored in the \code{genome} object,
#' but the \code{gene.index} vector can be used to match the assignment to the genome.
#'
#' @examples
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#' model <- initializeModelObject(parameter = parameter, model = "ROC")
#' samples <- 2500
#' thinning <- 50
#' adaptiveWidth <- 25
#' mcmc <- initializeMCMCObject(samples = samples, thinning = thinning,
#' adaptive.width=adaptiveWidth, est.expression=TRUE,
#' est.csp=TRUE, est.hyper=TRUE, est.mix = TRUE)
#' divergence.iteration <- 10
#' \dontrun{
#' runMCMC(mcmc = mcmc, genome = genome, model = model,
#' ncores = 4, divergence.iteration = divergence.iteration)
#'
#' # get the estimated expression values for all genes based on the mixture
#' # they are assigned to at each step
#' estimatedExpression <- getExpressionEstimates(parameter, 1:length(genome), 1000)
#' }
#'
getExpressionEstimates <- function(parameter, gene.index, samples, quantiles=c(0.025, 0.975),genome=NULL)
{
if (!is.null(genome))
{
geneID <- unlist(lapply(gene.index, function(geneIndex){
gene <- genome$getGeneByIndex(geneIndex,F);
gene$id
}))
}
expressionValues <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, FALSE)
}))
expressionValuesLog <- unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, TRUE)
}))
expressionStdErr <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, FALSE)
})))
expressionStdErrLog <- sqrt(unlist(lapply(gene.index, function(geneIndex){
parameter$getSynthesisRateVarianceForGene(samples, geneIndex, TRUE, TRUE)
})))
expressionQuantile <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, FALSE)
})
expressionQuantile <- do.call(rbind, expressionQuantile)
expressionQuantileLog <- lapply(gene.index, function(geneIndex){
parameter$getExpressionQuantile(samples, geneIndex, quantiles, TRUE)
})
expressionQuantileLog <- do.call(rbind, expressionQuantileLog)
if (is.null(genome))
{
expr.mat <- cbind(expressionValues, expressionValuesLog, expressionStdErr, expressionStdErrLog, expressionQuantile, expressionQuantileLog)
colnames(expr.mat) <- c("Mean", "Mean.log10", "Std.Dev", "log10.Std.Dev", quantiles, paste("log10.", quantiles, sep=""))
} else {
expr.mat <- cbind(geneID,expressionValues, expressionValuesLog, expressionStdErr, expressionStdErrLog, expressionQuantile, expressionQuantileLog)
colnames(expr.mat) <- c("GeneID","Mean", "Mean.log10", "Std.Dev", "log10.Std.Dev", quantiles, paste("log10.", quantiles, sep=""))
}
return(expr.mat)
}
#' Write Parameter Object to a File
#'
#' @param parameter parameter on object created by \code{initializeParameterObject}.
#'
#' @param file A filename that where the data will be stored.
#'
#' @return This function has no return value.
#'
#' @description \code{writeParameterObject} will write the parameter object as binary to the filesystem
#'
#' @details As Rcpp object are not serializable with the default R \code{save} function,
#' therefore this custom save function is provided (see \link{loadParameterObject}).
#'
#' @examples
#' \dontrun{
#'
#' genome_file <- system.file("extdata", "genome.fasta", package = "AnaCoDa")
#'
#' genome <- initializeGenomeObject(file = genome_file)
#' sphi_init <- c(1,1)
#' numMixtures <- 2
#' geneAssignment <- c(rep(1,floor(length(genome)/2)),rep(2,ceiling(length(genome)/2)))
#' parameter <- initializeParameterObject(genome = genome, sphi = sphi_init,
#' num.mixtures = numMixtures,
#' gene.assignment = geneAssignment,
#' mixture.definition = "allUnique")
#'
#' ## writing an empty parameter object as the runMCMC routine was not called yet
#' writeParameterObject(parameter = parameter, file = file.path(tempdir(), "file.Rda"))
#'
#' }
#'
writeParameterObject <- function(parameter, file)
{
UseMethod("writeParameterObject", parameter)
}
# extracts traces and parameter information from the base class Parameter
extractBaseInfo <- function(parameter){
trace <- parameter$getTraceObject()
stdDevSynthesisRateTraces <- trace$getStdDevSynthesisRateTraces()
stdDevSynthesisRateAcceptRatTrace <- trace$getStdDevSynthesisRateAcceptanceRateTrace()
synthRateTrace <- trace$getSynthesisRateTrace()
synthAcceptRatTrace <- trace$getSynthesisRateAcceptanceRateTrace()
mixAssignTrace <- trace$getMixtureAssignmentTrace()
mixProbTrace <- trace$getMixtureProbabilitiesTrace()
codonSpecificAcceptRatTrace <- trace$getCodonSpecificAcceptanceRateTrace()
numMix <- parameter$numMixtures
numMut <- parameter$numMutationCategories
numSel <- parameter$numSelectionCategories
categories <- parameter$getCategories()
curMixAssignment <- parameter$getMixtureAssignment()
lastIteration <- parameter$getLastIteration()
grouplist <- parameter$getGroupList()
synthesisOffsetAcceptRatTrace <- trace$getSynthesisOffsetAcceptanceRateTrace()
synthesisOffsetTrace <- trace$getSynthesisOffsetTrace()
observedSynthesisNoiseTrace <- trace$getObservedSynthesisNoiseTrace()
if (length(synthesisOffsetTrace) == 0){
withPhi <- FALSE
}else{
withPhi <- TRUE
}
varList <- list(stdDevSynthesisRateTraces = stdDevSynthesisRateTraces,
stdDevSynthesisRateAcceptRatTrace = stdDevSynthesisRateAcceptRatTrace,
synthRateTrace = synthRateTrace,
synthAcceptRatTrace = synthAcceptRatTrace,
mixAssignTrace = mixAssignTrace,
mixProbTrace = mixProbTrace,
codonSpecificAcceptRatTrace = codonSpecificAcceptRatTrace,
numMix = numMix,
numMut = numMut,
numSel = numSel,
categories = categories,
curMixAssignment = curMixAssignment,
lastIteration = lastIteration,
grouplist = grouplist,
synthesisOffsetTrace = synthesisOffsetTrace,
synthesisOffsetAcceptRatTrace = synthesisOffsetAcceptRatTrace,
observedSynthesisNoiseTrace = observedSynthesisNoiseTrace,
withPhi = withPhi
)
return(varList)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_ROCParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
proposedMutation <- parameter$proposedMutationParameter
proposedSelection <- parameter$proposedSelectionParameter
model = "ROC"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentMutation", "currentSelection",
"proposedMutation", "proposedSelection", "model",
"mutationPrior", "mutationTrace", "selectionTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PAParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
model = "PA"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "proposedAlpha",
"proposedLambdaPrime", "model", "alphaTrace", "lambdaPrimeTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_PANSEParameter <- function(parameter, file){
paramBase <- extractBaseInfo(parameter)
currentAlpha <- parameter$currentAlphaParameter
currentLambdaPrime <- parameter$currentLambdaPrimeParameter
currentNSERate <- parameter$currentNSERateParameter
proposedAlpha <- parameter$proposedAlphaParameter
proposedLambdaPrime <- parameter$proposedLambdaPrimeParameter
proposedNSERate <- parameter$proposedNSERateParameter
model <- "PANSE"
trace <- parameter$getTraceObject()
alphaTrace <- trace$getCodonSpecificParameterTrace(0)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTrace(1)
NSERateTrace <- trace$getCodonSpecificParameterTrace(2)
partitionTrace <- trace$getPartitionFunctionTraces()
nseSpecificAcceptRatTrace <- trace$getNseRateSpecificAcceptanceRateTrace()
save(list = c("paramBase", "currentAlpha", "currentLambdaPrime", "currentNSERate", "proposedAlpha",
"proposedLambdaPrime", "proposedNSERate", "model", "alphaTrace", "lambdaPrimeTrace","NSERateTrace","partitionTrace","nseSpecificAcceptRatTrace"),
file=file)
}
#called from "writeParameterObject."
writeParameterObject.Rcpp_FONSEParameter <- function(parameter, file)
{
paramBase <- extractBaseInfo(parameter)
currentMutation <- parameter$currentMutationParameter
currentSelection <- parameter$currentSelectionParameter
proposedMutation <- parameter$proposedMutationParameter
proposedSelection <- parameter$proposedSelectionParameter
model = "FONSE"
mutationPrior <- parameter$getMutationPriorStandardDeviation()
trace <- parameter$getTraceObject()
mutationTrace <- trace$getCodonSpecificParameterTrace(0)
selectionTrace <- trace$getCodonSpecificParameterTrace(1)
initiationCostTrace <- trace$getInitiationCostTrace()
save(list = c("paramBase", "currentMutation", "currentSelection",
"model","mutationPrior", "mutationTrace", "selectionTrace","initiationCostTrace"),
file=file)
}
#' Load Parameter Object
#'
#' @param files A list of parameter filenames to be loaded. If multiple files are given,
#' the parameter objects will be concatenated in the order provided
#'
#' @return Returns an initialized Parameter object.
#'
#' @description \code{loadParameterObject} will load a parameter object from the filesystem
#'
#' @details The function loads one or multiple files. In the case of multiple file, e.g. due to the use of check pointing, the files will
#' be concatenated to one parameter object. See \link{writeParameterObject} for the writing of parameter objects
#'
#' @examples
#' \dontrun{
#' # load a single parameter object
#' parameter <- loadParameterObject("parameter.Rda")
#'
#' # load and concatenate multiple parameter object
#' parameter <- loadParameterObject(c("parameter1.Rda", "parameter2.Rda"))
#' }
#'
loadParameterObject <- function(files)
{
#A temporary env is set up to stop R errors.
firstModel <- "Invalid model"
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1){
firstModel <- tempEnv$model
}else{
if (firstModel != tempEnv$model){
stop("The models do not match between files")
}#end of error check
}#end of if-else
}#end of for
# browser()
if (firstModel == "ROC"){
parameter <- new(ROCParameter)
parameter <- loadROCParameterObject(parameter, files)
}else if (firstModel == "PA") {
parameter <- new(PAParameter)
parameter <- loadPAParameterObject(parameter, files)
}else if (firstModel == "PANSE") {
parameter <- new(PANSEParameter)
parameter <- loadPANSEParameterObject(parameter, files)
}else if (firstModel == "FONSE") {
parameter <- new(FONSEParameter)
parameter <- loadFONSEParameterObject(parameter, files)
}else{
stop("File data corrupted")
}
return(parameter)
}
#Sets all the common variables in the Parameter objects.
setBaseInfo <- function(parameter, files)
{
for (i in 1:length(files)) {
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
if (i == 1) {
categories <- tempEnv$paramBase$categories
categories.matrix <- do.call("rbind", tempEnv$paramBase$categories)
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
mixtureAssignment <- tempEnv$paramBase$curMixAssignment
lastIteration <- tempEnv$paramBase$lastIteration
max <- tempEnv$paramBase$lastIteration + 1
grouplist <- tempEnv$paramBase$grouplist
stdDevSynthesisRateTraces <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
stdDevSynthesisRateTraces[[j]] <- tempEnv$paramBase$stdDevSynthesisRateTraces[[j]][1:max]
}
stdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
synthesisRateTrace <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$paramBase$synthRateTrace[[j]])){
synthesisRateTrace[[j]][[k]] <- tempEnv$paramBase$synthRateTrace[[j]][[k]][1:max]
}
}
synthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
mixtureAssignmentTrace <- vector("list", length = length(tempEnv$paramBase$mixAssignTrace))
for (j in 1:length(tempEnv$paramBase$mixAssignTrace)){
mixtureAssignmentTrace[[j]] <- tempEnv$paramBase$mixAssignTrace[[j]][1:max]
}
mixtureProbabilitiesTrace <- c()
for (j in 1:numMixtures) {
mixtureProbabilitiesTrace[[j]] <- tempEnv$paramBase$mixProbTrace[[j]][1:max]
}
codonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
withPhi <- tempEnv$paramBase$withPhi
if (withPhi){
phiGroups <- length(tempEnv$synthesisOffsetTrace)
synthesisOffsetTrace <- c()
for (j in 1:phiGroups) {
synthesisOffsetTrace[[j]] <- tempEnv$paramBase$synthesisOffsetTrace[[j]][1:max]
}
synthesisOffsetAcceptanceRateTrace <- tempEnv$paramBase$synthesisOffsetAcceptRatTrace
observedSynthesisNoiseTrace <- c()
for (j in 1:phiGroups) {
observedSynthesisNoiseTrace[[j]] <- tempEnv$paramBase$observedSynthesisNoiseTrace[[j]][1:max]
}
#need number of phi groups, not the number of mixtures apparently.
}else {
synthesisOffsetTrace <- c()
synthesisOffsetAcceptanceRateTrace <- c()
observedSynthesisNoiseTrace <- c()
}
} else {
if (sum(categories.matrix != do.call("rbind", tempEnv$paramBase$categories)) != 0){
stop("categories is not the same between all files")
}#end of error check
if (numMixtures != tempEnv$paramBase$numMix){
stop("The number of mixtures is not the same between files")
}
if (numMutationCategories != tempEnv$paramBase$numMut){
stop("The number of mutation categories is not the same between files")
}
if (numSelectionCategories != tempEnv$paramBase$numSel){
stop("The number of selection categories is not the same between files")
}
if (length(mixtureAssignment) != length(tempEnv$paramBase$curMixAssignment)){
stop("The length of the mixture assignment is not the same between files.
Make sure the same genome is used on each run.")
}
if(length(grouplist) != length(tempEnv$paramBase$grouplist)){
stop("Number of Amino Acids/Codons is not the same between files.")
}
if (withPhi != tempEnv$paramBase$withPhi){
stop("Runs do not match in concern in with.phi")
}
curSynthesisOffsetTrace <- tempEnv$paramBase$synthesisOffsetTrace
curSynthesisOffsetAcceptanceRateTrace <- tempEnv$paramBase$synthesisOffsetAcceptRatTrace
curObservedSynthesisNoiseTrace <- tempEnv$paramBase$observedSynthesisNoiseTrace
if (withPhi){
combineTwoDimensionalTrace(synthesisOffsetTrace, curSynthesisOffsetTrace, max)
size <- length(curSynthesisOffsetAcceptanceRateTrace)
combineTwoDimensionalTrace(synthesisOffsetAcceptanceRateTrace, curSynthesisOffsetAcceptanceRateTrace, size)
combineTwoDimensionalTrace(observedSynthesisNoiseTrace, curObservedSynthesisNoiseTrace, max)
}
curStdDevSynthesisRateTraces <- tempEnv$paramBase$stdDevSynthesisRateTraces
curStdDevSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$stdDevSynthesisRateAcceptRatTrace
curSynthesisRateTrace <- tempEnv$paramBase$synthRateTrace
curSynthesisRateAcceptanceRateTrace <- tempEnv$paramBase$synthAcceptRatTrace
curMixtureAssignmentTrace <- tempEnv$paramBase$mixAssignTrace
curMixtureProbabilitiesTrace <- tempEnv$paramBase$mixProbTrace
curCodonSpecificAcceptanceRateTrace <- tempEnv$paramBase$codonSpecificAcceptRatTrace
lastIteration <- lastIteration + tempEnv$paramBase$lastIteration
#assuming all checks have passed, time to concatenate traces
max <- tempEnv$paramBase$lastIteration + 1
combineTwoDimensionalTrace(stdDevSynthesisRateTraces, curStdDevSynthesisRateTraces, max)
size <- length(curStdDevSynthesisRateAcceptanceRateTrace)
stdDevSynthesisRateAcceptanceRateTrace <- c(stdDevSynthesisRateAcceptanceRateTrace,
curStdDevSynthesisRateAcceptanceRateTrace[2:size])
combineThreeDimensionalTrace(synthesisRateTrace, curSynthesisRateTrace, max)
size <- length(curSynthesisRateAcceptanceRateTrace)
combineThreeDimensionalTrace(synthesisRateAcceptanceRateTrace, curSynthesisRateAcceptanceRateTrace, size)
combineTwoDimensionalTrace(mixtureAssignmentTrace, curMixtureAssignmentTrace, max)
combineTwoDimensionalTrace(mixtureProbabilitiesTrace, curMixtureProbabilitiesTrace, max)
size <- length(curCodonSpecificAcceptanceRateTrace)
combineTwoDimensionalTrace(codonSpecificAcceptanceRateTrace, curCodonSpecificAcceptanceRateTrace, size)
}
}
parameter$setCategories(categories)
parameter$setCategoriesForTrace()
parameter$numMixtures <- numMixtures
parameter$numMutationCategories <- numMutationCategories
parameter$numSelectionCategories <- numSelectionCategories
parameter$setMixtureAssignment(tempEnv$paramBase$curMixAssignment) #want the last in the file sequence
parameter$setLastIteration(lastIteration)
parameter$setGroupList(grouplist)
trace <- parameter$getTraceObject()
trace$setStdDevSynthesisRateTraces(stdDevSynthesisRateTraces)
trace$setStdDevSynthesisRateAcceptanceRateTrace(stdDevSynthesisRateAcceptanceRateTrace)
trace$setSynthesisRateTrace(synthesisRateTrace)
trace$setSynthesisRateAcceptanceRateTrace(synthesisRateAcceptanceRateTrace)
trace$setSynthesisOffsetTrace(synthesisOffsetTrace)
trace$setSynthesisOffsetAcceptanceRateTrace(synthesisOffsetAcceptanceRateTrace)
trace$setObservedSynthesisNoiseTrace(observedSynthesisNoiseTrace)
trace$setMixtureAssignmentTrace(mixtureAssignmentTrace)
trace$setMixtureProbabilitiesTrace(mixtureProbabilitiesTrace)
trace$setCodonSpecificAcceptanceRateTrace(codonSpecificAcceptanceRateTrace)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadROCParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
parameter$proposedMutationParameter <- tempEnv$proposedMutation
parameter$proposedSelectionParameter <- tempEnv$proposedSelection
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadPAParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1){
#for future use: This may break if PA is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
parameter$setTraceObject(trace)
return(parameter)
}
loadPANSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
max <- tempEnv$paramBase$lastIteration + 1
numMixtures <- tempEnv$paramBase$numMix
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
if (i == 1)
{
#for future use: This may break if PANSE is ran with more than
#one mixture, in this case just follow the format of the
#ROC CSP parameters.
alphaTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$alphaTrace[[j]])){
alphaTrace[[j]][[k]] <- tempEnv$alphaTrace[[j]][[k]][1:max]
}
}
lambdaPrimeTrace <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
for (k in 1:length(tempEnv$lambdaPrimeTrace[[j]])){
lambdaPrimeTrace[[j]][[k]] <- tempEnv$lambdaPrimeTrace[[j]][[k]][1:max]
}
}
NSERateTrace <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
for (k in 1:length(tempEnv$NSERateTrace[[j]])){
NSERateTrace[[j]][[k]] <- tempEnv$NSERateTrace[[j]][[k]][1:max]
}
}
partitionTrace <- vector("list", length = numSelectionCategories)
for (j in 1:numSelectionCategories) {
partitionTrace[[j]] <- tempEnv$partitionTrace[[j]][1:max]
}
nseSpecificAcceptanceRateTrace <- tempEnv$nseSpecificAcceptRatTrace
}else{
curAlphaTrace <- tempEnv$alphaTrace
curLambdaPrimeTrace <- tempEnv$lambdaPrimeTrace
curNSERateTrace <- tempEnv$NSERateTrace
combineThreeDimensionalTrace(alphaTrace, curAlphaTrace, max)
combineThreeDimensionalTrace(lambdaPrimeTrace, curLambdaPrimeTrace, max)
combineThreeDimensionalTrace(NSERateTrace, curNSERateTrace, max)
}
}#end of for loop (files)
parameter$currentAlphaParameter <- tempEnv$currentAlpha
parameter$proposedAlphaParameter <- tempEnv$proposedAlpha
parameter$currentLambdaPrimeParameter <- tempEnv$currentLambdaPrime
parameter$proposedLambdaPrimeParameter <- tempEnv$proposedLambdaPrime
parameter$proposedNSERateParameter <- tempEnv$proposedNSERate
parameter$currentNSERateParameter <- tempEnv$currentNSERate
trace <- parameter$getTraceObject()
trace$resizeNumberCodonSpecificParameterTrace(3)
trace$setCodonSpecificParameterTrace(alphaTrace, 0)
trace$setCodonSpecificParameterTrace(lambdaPrimeTrace, 1)
trace$setCodonSpecificParameterTrace(NSERateTrace,2)
trace$setNseRateSpecificAcceptanceRateTrace(nseSpecificAcceptanceRateTrace)
trace$setPartitionFunctionTraces(partitionTrace)
parameter$setTraceObject(trace)
return(parameter)
}
#Called from "loadParameterObject."
loadFONSEParameterObject <- function(parameter, files)
{
parameter <- setBaseInfo(parameter, files)
for (i in 1:length(files)){
tempEnv <- new.env();
load(file = files[i], envir = tempEnv)
numMutationCategories <- tempEnv$paramBase$numMut
numSelectionCategories <- tempEnv$paramBase$numSel
max <- tempEnv$paramBase$lastIteration + 1
if (i == 1){
codonSpecificParameterTraceMut <- vector("list", length=numMutationCategories)
for (j in 1:numMutationCategories) {
codonSpecificParameterTraceMut[[j]] <- vector("list", length=length(tempEnv$mutationTrace[[j]]))
for (k in 1:length(tempEnv$mutationTrace[[j]])){
codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
#codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
codonSpecificParameterTraceSel <- vector("list", length=numSelectionCategories)
for (j in 1:numSelectionCategories) {
codonSpecificParameterTraceSel[[j]] <- vector("list", length=length(tempEnv$selectionTrace[[j]]))
for (k in 1:length(tempEnv$selectionTrace[[j]])){
#codonSpecificParameterTraceMut[[j]][[k]] <- tempEnv$mutationTrace[[j]][[k]][1:max]
codonSpecificParameterTraceSel[[j]][[k]] <- tempEnv$selectionTrace[[j]][[k]][1:max]
}
}
}else{
curCodonSpecificParameterTraceMut <- tempEnv$mutationTrace
curCodonSpecificParameterTraceSel <- tempEnv$selectionTrace
combineThreeDimensionalTrace(codonSpecificParameterTraceMut, curCodonSpecificParameterTraceMut, max)
combineThreeDimensionalTrace(codonSpecificParameterTraceSel, curCodonSpecificParameterTraceSel, max)
}#end of if-else
}#end of for loop (files)
trace <- parameter$getTraceObject()
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceMut, 0)
trace$setCodonSpecificParameterTrace(codonSpecificParameterTraceSel, 1)
trace$setInitiationCostTrace(tempEnv$initiationCostTrace)
parameter$currentMutationParameter <- tempEnv$currentMutation
parameter$currentSelectionParameter <- tempEnv$currentSelection
##parameter$proposedMutationParameter <- tempEnv$proposedMutation
##parameter$proposedSelectionParameter <- tempEnv$proposedSelection
parameter$setTraceObject(trace)
return(parameter)
}
#' Take the geometric mean of a vector
#'
#' @param x A vector of numerical .
#'
#' @param rm.invalid Boolean value for handling 0, negative, or NA values in the vector. Default is TRUE and will not
#' include these values in the calculation. If FALSE, these values will be replaced by the value give to \code{default} and will
#' be included in the calculation.
#'
#' @param default Numerical value that serves as the value to replace 0, negative, or NA values in the calculation when rm.invalid is FALSE.
#' Default is 1e-5.
#'
#' @return Returns the geometric mean of a vector.
#'
#' @description \code{geomMean} will calculate the geometric mean of a list of numerical values.
#'
#' @details This function is a special version of the geometric mean specifically for AnaCoda.
#' Most models in Anacoda assume a log normal distribution for phi values, thus all values in \code{x} are expectd to be positive.
#' geomMean returns the geometric mean of a vector and can handle 0, negative, or NA values.
#'
#' @examples
#' x <- c(1, 2, 3, 4)
#' geomMean(x)
#'
#' y<- c(1, NA, 3, 4, 0, -1)
#' # Only take the mean of non-Na values greater than 0
#' geomMean(y)
#'
#' # Replace values <= 0 or NAs with a default value 0.001 and then take the mean
#' geomMean(y, rm.invalid = FALSE, default = 0.001)
#'
geomMean <- function(x, rm.invalid = TRUE, default = 1e-5)
{
if(!rm.invalid)
{
x[x <= 0 | is.na(x)] <- default
} else{
x <- x[which(x > 0 & !is.na(x))]
}
total <- prod(x) ^ (1/length(x))
return(total)
}
#Intended to combine 2D traces (vector of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineTwoDimensionalTrace <- function(trace1, trace2,start=2,end=NULL){
if(start < 2)
{
print("Start must be at least 2 because the last element of first trace is first of second trace. Setting start = 2.")
}
if(end <= start)
{
print("End must be greater than start. Setting end to length of trace2.")
end <- trace2
}
if(end == NULL)
{
end <- length(trace2)
}
for (size in 1:length(trace1))
{
trace1[[size]]<- c(trace1[[size]], trace2[[size]][start:end])
}
return(trace1)
}
#Intended to combine 3D traces (vector of vectors of vectors) read in from C++. The first
#element of the second trace is omited since it should be the same as the
#last value of the first trace.
combineThreeDimensionalTrace <- function(trace1, trace2, max){
for (size in 1:length(trace1)){
for (sizeTwo in 1:length(trace1[[size]])){
trace1[[size]][[sizeTwo]] <- c(trace1[[size]][[sizeTwo]],
trace2[[size]][[sizeTwo]][2:max])
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/parameterObject.R |
#' Plot MCMC algorithm
#'
#' @param x An Rcpp_MCMC object initialized with \code{initializeMCMCObject}.
#'
#' @param zoom.window A vector describing the start and end of the zoom window.
#'
#' @param what character defining if log(Posterior) (Default) or log(Likelihood)
#' options are: LogPosterior or logLikelihood
#'
#' @param ... Arguments to be passed to methods, such as graphical parameters.
#'
#' @return This function has no return value.
#'
#' @description This function will plot the logLikelihood trace, and if the Hmisc package is installed, it will
#' plot a subplot of the logLikelihood trace with the first few samples removed.
plot.Rcpp_MCMCAlgorithm <- function(x, what = "LogPosterior", zoom.window = NULL, ...)
{
if(what[1] == "LogPosterior")
{
trace <- x$getLogPosteriorTrace()
ylab = "log(Posterior Probability)"
}else{
trace <- x$getLogLikelihoodTrace()
ylab = "log(Likelihood Probability)"
}
trace <- trace[-1]
trace.length <- length(trace)
zoomStart <- round(0.9*trace.length)
zoomEnd <- trace.length
logL <- mean(trace[zoomStart:trace.length])
#TODO change main title
plot(trace, type="l", main=paste0(ylab, ": ", logL), xlab="Sample", ylab=ylab)
grid (NULL,NULL, lty = 6, col = "cornsilk2")
trace[trace == -Inf] <- NA
# TODO (Cedric): get rid of that line once problem with first element beeing 0 is solved
trace <- trace[-1]
if(!(is.null(zoom.window))) {
zoomStart <- zoom.window[1]
zoomEnd <- zoom.window[2]
}
else{
warning("No window was given, zooming in at last 10% of trace")
}
Hmisc::subplot(
plot(zoomStart:zoomEnd, trace[zoomStart:zoomEnd], type="l", xlab=NA, ylab=NA, las=2, cex.axis=0.55),
0.8*(round(0.9*trace.length)), (min(trace, na.rm = T)+max(trace, na.rm = T))/2, size=c(3,2))
}
#' Autocorrelation function for the likelihood or posterior trace
#'
#' @param mcmc object of class MCMC
#' @param type "LogPosterior" or "LogLikelihood", defaults to "LogPosterior"
#' @param samples number of samples at the end of the trace used to calculate the acf
#' @param lag.max Maximum amount of lag to calculate acf. Default is 10*log10(N), where N i the number of observations.
#' @param plot logical. If TRUE (default) a plot of the acf is created
#'
#' @description The function calculates and by defaults plots the acf and estimates the autocorrelation in the trace.
#'
#' @seealso \code{\link{acfCSP}}
#'
acfMCMC <- function(mcmc, type = "LogPosterior", samples = NULL, lag.max = 40, plot = TRUE)
{
if(type == "LogPosterior")
{
trace <- mcmc$getLogPosteriorTrace()
}else{
trace <- mcmc$getLogLikelihoodTrace()
}
if(is.null(samples)){ samples <- round(10*log10(length(trace))) }
trace <- trace[(length(trace)-samples):length(trace)]
trace.acf <- acf(x = trace, lag.max = lag.max, plot = FALSE)
if(plot){
header <- paste(type, "Trace Autocorrelation",sep=" ")
plot(x = trace.acf, xlab = "Lag time", ylab = "Autocorrelation", main = header)
}else{
return(trace.acf)
}
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/plotMCMCAlgorithmObject.R |
#' Plot Model Object
#'
#' @param x An Rcpp model object initialized with \code{initializeModelObject}.
#' @param genome An Rcpp genome object initialized with \code{initializeGenomeObject}.
#' @param samples The number of samples in the trace
#' @param mixture The mixture for which to graph values.
#' @param simulated A boolean value that determines whether to use the simulated genome.
#' @param ... Optional, additional arguments.
#' For this function, a possible title for the plot in the form of a list if set with "main".
#'
#' @return This function has no return value.
#'
#' @description Plots traces from the model object such as synthesis rates for each gene.
#' Will work regardless of whether or not expression/synthesis rate levels are being
#' estimated. If you wish to plot observed/empirical values, these values MUST be set
#' using the initial.expression.values parameter found in initializeParameterObject.
#' Otherwise, the expression values plotted will just be SCUO values estimated upon
#' initialization of the Parameter object.
plot.Rcpp_ROCModel <- function(x, genome = NULL, samples = 100, mixture = 1,
simulated = FALSE, ...)
{
model <- x
opar <- par(no.readonly = T)
input_list <- as.list(list(...))
if("main" %in% names(input_list)){
main <- input_list$main
input_list$main <- NULL
}else{
main <- ""
}
mat <- matrix(c(rep(1, 4), 2:21, rep(22, 4)),
nrow = 7, ncol = 4, byrow = TRUE)
mat <- cbind(rep(23, 7), mat, rep(24, 7))
nf <- layout(mat, c(3, rep(8, 4), 2), c(3, 8, 8, 8, 8, 8, 3), respect = FALSE)
### Plot title.
par(mar = c(0, 0, 0, 0))
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.6, main)
text(0.5, 0.4, date(), cex = 0.6)
num.genes <- length(genome)
parameter <- model$getParameter()
mixtureAssignment <- unlist(lapply(1:num.genes, function(geneIndex){parameter$getEstimatedMixtureAssignmentForGene(samples, geneIndex)}))
genes.in.mixture <- which(mixtureAssignment == mixture)
expressionCategory <- parameter$getSynthesisRateCategoryForMixture(mixture)
# need expression values to know range
num.genes <- length(genes.in.mixture)
expressionValues <- unlist(lapply(genes.in.mixture, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, FALSE)
}))
expressionValues <- log10(expressionValues)
genome <- genome$getGenomeForGeneIndices(genes.in.mixture, simulated)
names.aa <- aminoAcids()
for(aa in names.aa)
{
if(aa == "M" || aa == "W" || aa == "X") next
codon.probability <- calculateProbabilityVector(parameter,model,expressionValues,mixture,samples,aa,model.type="ROC")
xlimit <- plotSinglePanel(parameter, model, genome, expressionValues, samples, mixture, aa,codon.probability = codon.probability)
box()
main.aa <- aa #TODO map to three letter code
text(mean(xlimit), 1, main.aa, cex = 1.5)
if(aa %in% c("A", "F", "K", "Q", "V")){
axis(2, las=1)
}
if(aa %in% c("T", "V", "Y", "Z")){
axis(1)
}
if(aa %in% c("A", "C", "D", "E")){
axis(3)
}
if(aa %in% c("E", "I", "P", "T")){
axis(4, las=1)
}
axis(1, tck = 0.02, labels = FALSE)
axis(2, tck = 0.02, labels = FALSE)
axis(3, tck = 0.02, labels = FALSE)
axis(4, tck = 0.02, labels = FALSE)
}
## adding a histogram of phi values to plot
hist.values <- hist(expressionValues, plot=FALSE, nclass=30)
plot(hist.values, axes = FALSE, main = "", xlab = "", ylab = "")
axis(1)
axis(4, las=1)
### Plot xlab.
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.2, expression("log"[10]~"(Protein Synthesis Rate"~phi~")"))
#text(0.5, 0.5, "Production Rate (log10)")
### Plot ylab.
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.5, "Propotion", srt = 90)
par(opar)
}
#' Plot Model Object
#'
#' @param x An Rcpp model object initialized with \code{initializeModelObject}.
#'
#' @param genome An Rcpp genome object initialized with \code{initializeGenomeObject}.
#'
#' @param samples The number of samples in the trace
#'
#' @param mixture The mixture for which to graph values.
#'
#' @param simulated A boolean value that determines whether to use the simulated genome.
#'
#' @param codon.window A boolean value that determines the codon window to use for calculating codon frequencies. If NULL (the default), use complete sequences.
#'
#' @param ... Optional, additional arguments.
#' For this function, a possible title for the plot in the form of a list if set with "main".
#'
#' @return This function has no return value.
#'
#' @description Plots traces from the model object such as synthesis rates for each gene.
#' Will work regardless of whether or not expression/synthesis rate levels are being
#' estimated. If you wish to plot observed/empirical values, these values MUST be set
#' using the initial.expression.values parameter found in initializeParameterObject.
#' Otherwise, the expression values plotted will just be SCUO values estimated upon
#' initialization of the Parameter object.
#'
plot.Rcpp_FONSEModel <- function(x, genome, samples = 100, mixture = 1,
simulated = FALSE, codon.window = NULL,...)
{
model <- x
opar <- par(no.readonly = T)
input_list <- as.list(list(...))
if("main" %in% names(input_list)){
main <- input_list$main
input_list$main <- NULL
}else{
main <- ""
}
mat <- matrix(c(rep(1, 4), 2:21, rep(22, 4)),
nrow = 7, ncol = 4, byrow = TRUE)
mat <- cbind(rep(23, 7), mat, rep(24, 7))
nf <- layout(mat, c(3, rep(8, 4), 2), c(3, 8, 8, 8, 8, 8, 3), respect = FALSE)
### Plot title.
par(mar = c(0, 0, 0, 0))
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.6, main)
text(0.5, 0.4, date(), cex = 0.6)
num.genes <- length(genome)
parameter <- model$getParameter()
mixtureAssignment <- unlist(lapply(1:num.genes, function(geneIndex){parameter$getEstimatedMixtureAssignmentForGene(samples, geneIndex)}))
genes.in.mixture <- which(mixtureAssignment == mixture)
expressionCategory <- parameter$getSynthesisRateCategoryForMixture(mixture)
# need expression values to know range
num.genes <- length(genes.in.mixture)
expressionValues <- unlist(lapply(genes.in.mixture, function(geneIndex){
parameter$getSynthesisRatePosteriorMeanForGene(samples, geneIndex, FALSE)
}))
expressionValues <- log10(expressionValues)
genome <- genome$getGenomeForGeneIndices(genes.in.mixture, simulated)
genes <- genome$getGenes(simulated)
genome$clear()
if (is.null(codon.window))
{
codon.window <- seq(1,100000)
} else if (length(codon.window) == 2)
{
codon.window <- seq(codon.window[1],codon.window[2])
}
for (i in 1:length(genes))
{
dna <- genes[[i]]$seq
start <- seq(1, nchar(dna), 3)
stop <- pmin(start + 2, nchar(dna))
codons <- substring(dna,start,stop)
codons <- codons[codon.window]
codons <- codons[which(is.na(codons) == F)]
dna <- paste(codons,collapse='')
genes[[i]]$seq <- dna
genome$addGene(genes[[i]],simulated)
}
names.aa <- aminoAcids()
for(aa in names.aa)
{
if(aa == "M" || aa == "W" || aa == "X") next
codon.probability <- calculateProbabilityVector(parameter,model,expressionValues,mixture,samples,aa,model.type="FONSE",codon.window = codon.window)
xlimit <- plotSinglePanel(parameter, model, genome, expressionValues, samples, mixture, aa,codon.probability = codon.probability)
box()
main.aa <- aa #TODO map to three letter code
text(mean(xlimit), 1, main.aa, cex = 1.5)
if(aa %in% c("A", "F", "K", "Q", "V")){
axis(2, las=1)
}
if(aa %in% c("T", "V", "Y", "Z")){
axis(1)
}
if(aa %in% c("A", "C", "D", "E")){
axis(3)
}
if(aa %in% c("E", "I", "P", "T")){
axis(4, las=1)
}
axis(1, tck = 0.02, labels = FALSE)
axis(2, tck = 0.02, labels = FALSE)
axis(3, tck = 0.02, labels = FALSE)
axis(4, tck = 0.02, labels = FALSE)
}
## adding a histogram of phi values to plot
hist.values <- hist(expressionValues, plot=FALSE, nclass=30)
plot(hist.values, axes = FALSE, main = "", xlab = "", ylab = "")
axis(1)
axis(4, las=1)
### Plot xlab.
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.2, expression("log"[10]~"(Protein Synthesis Rate"~phi~")"))
#text(0.5, 0.5, "Production Rate (log10)")
### Plot ylab.
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.5, "Propotion", srt = 90)
par(opar)
}
calculateProbabilityVector <- function(parameter,model,expressionValues,mixture,samples,aa,model.type="ROC",codon.window = c(1,300))
{
codons <- AAToCodon(aa, T)
# get codon specific parameter
selection <- vector("numeric", length(codons))
mutation <- vector("numeric", length(codons))
for (i in 1:length(codons))
{
selection[i] <- parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, T,log_scale = F)
mutation[i] <- parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, T,log_scale = F)
}
# calculate codon probabilities with respect to phi
expression.range <- range(expressionValues)
phis <- seq(from = expression.range[1], to = expression.range[2], by = 0.01)
if (model.type == "ROC")
{
codonProbability <- lapply(10^phis,
function(phi){
model$CalculateProbabilitiesForCodons(mutation, selection, phi)
})
codonProbability <- do.call("rbind",codonProbability)
} else if (model.type == "FONSE")
{
codonProbability <- vector(mode = "list", length = length(codon.window))
for (i in 1:length(codon.window))
{
codonProbability.tmp <- lapply(10^phis,
function(phi)
{
model$CalculateProbabilitiesForCodons(mutation, selection, phi,codon.window[i])
})
codonProbability[[i]] <- do.call("rbind",codonProbability.tmp)
}
codonProbability <- Reduce("+",codonProbability)/length(codon.window)
}
return(codonProbability)
}
# NOT EXPOSED
plotSinglePanel <- function(parameter, model, genome, expressionValues, samples, mixture, aa,codon.probability)
{
codons <- AAToCodon(aa, T)
#
# get codon specific parameter
selection <- vector("numeric", length(codons))
mutation <- vector("numeric", length(codons))
for (i in 1:length(codons))
{
selection[i] <- parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 1, T, log_scale = F)
mutation[i] <- parameter$getCodonSpecificPosteriorMean(mixture, samples, codons[i], 0, T, log_scale = F)
}
#
expression.range <- range(expressionValues)
phis <- seq(from = expression.range[1], to = expression.range[2], by = 0.01)
# codonProbability <- lapply(10^phis,
# function(phi){
# model$CalculateProbabilitiesForCodons(mutation, selection, phi)
# })
codonProbability <- codon.probability
#get codon counts
codons <- AAToCodon(aa, F)
codonCounts <- vector("list", length(codons))
for(i in 1:length(codons))
{
codonCounts[[i]] <- genome$getCodonCountsPerGene(codons[i])
}
codonCounts <- do.call("cbind", codonCounts)
# codon proportions
codonCounts <- codonCounts / rowSums(codonCounts)
codonCounts[is.nan(codonCounts)] <- NA # necessary if AA does not appear in gene
# make empty plot
xlimit <- range(expressionValues, na.rm = T)
plot(NULL, NULL, xlim=xlimit, ylim=c(-0.05,1.05),
xlab = "", ylab="", axes = FALSE)
# bin expression values of genes
quantiles <- quantile(expressionValues, probs = seq(0.05, 0.95, 0.05), na.rm = T)
for(i in 1:length(quantiles))
{
if(i == 1){
tmp.id <- expressionValues < quantiles[i]
}else if(i == length(quantiles)){
tmp.id <- expressionValues > quantiles[i]
}else{
tmp.id <- expressionValues > quantiles[i] & expressionValues < quantiles[i + 1]
}
# plot quantiles
means <- colMeans(codonCounts[tmp.id,], na.rm = T)
std <- apply(codonCounts[tmp.id,], 2, sd, na.rm = T)
for(k in 1:length(codons))
{
points(median(expressionValues[tmp.id]), means[k],
col=.codonColors[[ codons[k] ]] , pch=19, cex = 0.5)
lines(rep(median(expressionValues[tmp.id]),2), c(means[k]-std[k], means[k]+std[k]),
col=.codonColors[[ codons[k] ]], lwd=0.8)
}
}
# draw model fit
#codonProbability <- do.call("rbind", codonProbability)
for(i in 1:length(codons))
{
lines(phis, codonProbability[, i], col=.codonColors[[ codons[i] ]])
}
colors <- unlist(.codonColors[codons])
# add indicator to optimal codon
optim.codon.index <- which(min(c(selection, 0)) == c(selection, 0))
codons[optim.codon.index] <- paste0(codons[optim.codon.index], "*")
legend("topleft", legend = codons, col=colors, bty = "n", lty=1, cex=0.75)
return(xlimit)
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/plotModelObject.R |
#' Plot Parameter
#'
#' @param x A parameter object
#'
#' @param what Which aspect of the parameter to plot. Default value is
#' "Mutation".
#'
#' @param samples Number of samples to plot using the posterior mean. Default
#' value is 100.
#'
#' @param mixture.name a vector with names/descriptions of the mixture distributions in the parameter object
#'
#' @param with.ci Plot with or without confidence intervals. Default value
#' is TRUE
#'
#' @param ... Arguments to be passed to methods, such as graphical parameters.
#'
#' @return This function has no return value.
#'
#' @description \code{plot} graphs the mutation or selection parameter for a ROC or FONSE
#' parameter object for each mixture element.
#'
#' @details Graphs are based off the last # samples for the posterior mean.
#'
plot.Rcpp_ROCParameter <- function(x, what = "Mutation", samples = 100, mixture.name = NULL, with.ci = TRUE, ...)
{
plotParameterObject(x, what = what, samples= samples, mixture.name=mixture.name, with.ci=with.ci, ...)
}
#' Plot Parameter
#'
#' @param x A parameter object
#'
#' @param what Which aspect of the parameter to plot. Default value is
#' "Mutation".
#'
#' @param samples Number of samples to plot using the posterior mean. Default
#' value is 100.
#'
#' @param mixture.name a vector with names/descriptions of the mixture distributions in the parameter object
#'
#' @param with.ci Plot with or without confidence intervals. Default value
#' is TRUE
#'
#' @param ... Arguments to be passed to methods, such as graphical parameters.
#'
#' @return This function has no return value.
#'
#' @description \code{plot} graphs the mutation or selection parameter for a ROC or FONSE
#' parameter object for each mixture element.
#'
#' @details Graphs are based off the last # samples for the posterior mean.
#'
plot.Rcpp_FONSEParameter <- function(x, what = "Mutation", samples = 100, mixture.name = NULL, with.ci = TRUE, ...)
{
plotParameterObject(x, what = what, samples=samples,mixture.name = mixture.name, with.ci=with.ci, ...)
}
plot.Rcpp_PAParameter <- function(x, what = "Mutation", samples = 100, mixture.name = NULL, with.ci = TRUE, ...)
{
#to_plot = ifelse(what == "Alpha", "Mutation", "Selection")
plotParameterObject(x, what = what, samples=samples,mixture.name = mixture.name, with.ci=with.ci, ...)
#plotPA(x)
}
### NOT EXPOSED
plotParameterObject <- function(x, what = "Mutation", samples = 100, mixture.name = NULL, with.ci = TRUE, ...){
numMixtures <- x$numMixtures
means <- data.frame(matrix(0,ncol=numMixtures,nrow=40))
sd.values <- data.frame(matrix(0,ncol=numMixtures*2,nrow=40))
names.aa <- aminoAcids()
paramType <- ifelse(what == "Mutation", 0, 1)
#cat("ParamType: ", paramType, "\n")
for (mixture in 1:numMixtures) {
# get codon specific parameter
count <- 1
for (aa in names.aa) {
if (aa == "M" || aa == "W" || aa == "X") next
codons <- AAToCodon(aa, T)
for (i in 1:length(codons))
{
means[count,mixture] <- x$getCodonSpecificPosteriorMean(mixture, samples,codons[i], paramType, TRUE, log_scale=FALSE)
tmp <- x$getCodonSpecificQuantile(mixture,samples, codons[i], paramType, c(0.025, 0.975), TRUE, log_scale=FALSE)
## This approach to storing the quantiles may seem unconventional, but I actually found it to be the most straight forward approach
## for plotting later.
sd.values[count,mixture] <- tmp[1]
sd.values[count,mixture+numMixtures] <- tmp[2]
count <- count + 1
}
}
}
## Begin graphing
mat <- matrix(rep(0,numMixtures*numMixtures),
nrow = numMixtures, ncol = numMixtures, byrow = TRUE)
count <- 1
for(i in 1:numMixtures){
for(j in 1:numMixtures){
if(i<=j){
mat[i,j] <-count
count <- count + 1
}
}
}
nf <- layout(mat,widths=c(rep(5,numMixtures)),heights=c(rep(5,numMixtures)),respect=FALSE)
par(mar=c(1,1,1,1))
for(i in 1:numMixtures){
for(j in 1:numMixtures){
if(i==j)
{
plot(NULL, xlim=c(0,1), ylim=c(0,1), ylab="", xlab="",xaxt='n',yaxt='n',ann=FALSE)
if(is.null(mixture.name)){
text(x = 0.5, y = 0.5, paste0("Mixture\nElement",i),
cex = 1.6, col = "black")
}else{
text(x = 0.5, y = 0.5, mixture.name[i],
cex = 1.6, col = "black")
}
}
else if (i < j){
if(with.ci){
plot(means[,j],means[,i],ann=FALSE,xlim=range(cbind(sd.values[,j],sd.values[,j+numMixtures])),ylim=range(cbind(sd.values[,i],sd.values[,i+numMixtures])))
upper.panel.plot(means[,j],means[,i],sd.x=cbind(sd.values[,j],sd.values[,j+numMixtures]),sd.y=cbind(sd.values[,i],sd.values[,i+numMixtures]))
#confidenceInterval.plot(x = means[,j],y = mean[,i], sd.x=sd.values[,j],sd.y=sd.values[,i])
} else{
plot(means[,j],means[,i],ann=FALSE,xlim=range(means[,j]),ylim=range(means[,i]))
upper.panel.plot(means[,j],means[,i])
}
}
}
}
}
#TODO: should PA's ploting be here as well?
upper.panel.plot <- function(x, y, sd.x=NULL, sd.y=NULL, ...){
abline(0, 1, col = "blue", lty = 2)
points(x, y, ...)
if(!is.null(sd.y)){
y.up <- sd.y[,2]
y.low <- sd.y[,1]
epsilon <- range(x, na.rm = T) * 0.1
segments(x, y.low, x, y.up, ...)
}
if(!is.null(sd.x)){
x.up <- sd.x[,2]
x.low <- sd.x[,1]
epsilon <- range(y, na.rm = T) * 0.1
segments(x.low, y, x.up, y, ...)
}
lm.line <- lm(y~x, na.action = "na.exclude")
abline(lm.line, col="blue", lwd = 2)
R2 <- summary(lm.line)$r.squared
b <- lm.line$coef[2]
rho <- ifelse(b > 0, sqrt(R2), -sqrt(R2)) #make sure rho has correct sign
if(!is.null(sd.x))
{
xlim <- range(sd.x, na.rm = T)
}else{
xlim <- range(x,na.rm = T)
}
if(!is.null(sd.y))
{
ylim <- range(sd.y, na.rm = T)
}else{
ylim <- range(y,na.rm=T)
}
width <- xlim[2] - xlim[1]
height <- ylim[2] - ylim[1]
std.error <- summary(lm.line)$coefficients[4]
slope <- round(summary(lm.line)$coefficients[2], 3)
intercept <- round(summary(lm.line)$coefficients[1], 3)
t <- (slope - 1)/std.error
if((t > qt(1-(0.05/2), lm.line$df.residual - 1))||(t < qt((0.05/2),lm.line$df.residual-1))){
eq <- paste0("y = ", sprintf("%.3f", intercept), " + ", sprintf("%.3f", slope), "x *")
text(xlim[1] + width * 0.01, ylim[2] - height * 0.2, eq, pos = 4, cex = 1.5)
}else{
eq <- paste0("y = ", sprintf("%.3f", intercept), " + ", sprintf("%.3f", slope), "x")
text(xlim[1] + width * 0.01, ylim[2] - height * 0.2, eq, pos = 4, cex = 1.5)
}
if(b > 0){
text(xlim[2] - width * 0.04, ylim[1] + height * 0.05,
parse(text = paste0("rho == ", sprintf("%.4f", rho))),
pos = 2, cex = 1.5, font = 2)
}else{
text(xlim[2] - width * 0.04, ylim[2] - height * 0.05,
parse(text = paste0("rho == ", sprintf("%.4f", rho))),
pos = 2, cex = 1.5, font = 2)
}
}
lower.panel.plot <- function(x, y, ...)
{
}
confidenceInterval.plot <- function(x, y, sd.x=NULL, sd.y=NULL, ...){
points(x, y, ...)
if(!is.null(sd.y)){
y.up <- sd.y[,2]
y.low <- sd.y[,1]
epsilon <- range(x, na.rm = T) * 0.1
segments(x, y.low, x, y.up, ...)
}
if(!is.null(sd.x)){
x.up <- sd.x[,2]
x.low <- sd.x[,1]
epsilon <- range(y, na.rm = T) * 0.1
segments(x.low, y, x.up, y, ...)
}
lm.line <- lm(y~x, na.action = "na.exclude")
b <- lm.line$coef[2]
xlim <- range(x, na.rm = T)
ylim <- range(y, na.rm = T)
width <- xlim[2] - xlim[1]
height <- ylim[2] - ylim[1]
std.error <- summary(lm.line)$coefficients[4]
slope <- round(summary(lm.line)$coefficients[2], 3)
intercept <- round(summary(lm.line)$coefficients[1], 3)
t <- (slope - 1)/std.error
}
plotPA <- function(parameter,genome,samples=100,mixture=1){
#cat("hello")
cat <- mixture
trace <- parameter$getTraceObject()
proposal <- FALSE
alphaList <- numeric (61)
lambdaPrimeList <- numeric (61)
waitingTimes <- numeric(61)
alpha.ci <- matrix(0, ncol=2, nrow=61)
lambdaPrime.ci <- matrix(0, ncol=2, nrow=61)
psiList <- numeric(length(genome))
ids <- numeric(length(genome))
codonList <- codons()
for (i in 1:61)
{
codon <- codonList[i]
alphaList[i] <- parameter$getCodonSpecificPosteriorMean(cat, samples * 0.5, codon, 0, FALSE,log_scale=F)
alphaTrace <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(1, codon, 0, FALSE)
alpha.ci[i,] <- quantile(alphaTrace[(samples * 0.5):samples], probs = c(0.025,0.975))
lambdaPrimeList[i] <- parameter$getCodonSpecificPosteriorMean(cat, samples * 0.5, codon, 1, FALSE, log_scale=F)
lambdaPrimeTrace <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(1, codon, 1, FALSE)
lambdaPrime.ci[i,] <- quantile(lambdaPrimeTrace[(samples * 0.5):samples], probs = c(0.025,0.975))
waitingTimes[i] <- alphaList[i] * lambdaPrimeList[i]
}
waitRates <- numeric(61)
for (i in 1:61) {
waitRates[i] <- (1.0/waitingTimes[i])
}
for (geneIndex in 1:length(genome)) {
psiList[geneIndex] <- parameter$getSynthesisRatePosteriorMeanForGene(samples * 0.5, geneIndex, 1)
}
for (i in 1:length(genome))
{
g <- genome$getGeneByIndex(i, FALSE)
ids[i] <- g$id
}
#Plot confidence intervals for alpha and lambda prime
plot(NULL, NULL, xlim=range(1:61, na.rm = T), ylim=range(alpha.ci),
main = "Confidence Intervals for Alpha Parameter", xlab = "Codons",
ylab = "Estimated values", axes=F)
confidenceInterval.plot(x = 1:61, y = alphaList, sd.y = alpha.ci)
axis(2)
axis(1, tck = 0.02, labels = codonList[1:61], at=1:61, las=2, cex.axis=.6)
plot(NULL, NULL, xlim=range(1:61, na.rm = T), ylim=range(lambdaPrime.ci),
main = "Confidence Intervals for LambdaPrime Parameter", xlab = "Codons",
ylab = "Estimated values", axes=F)
confidenceInterval.plot(x = 1:61, y = lambdaPrimeList, sd.y = lambdaPrime.ci)
axis(2)
axis(1, tck = 0.02, labels = codonList[1:61], at=1:61, las=2, cex.axis=.6)
#dev.off()
}
#' Plots ACF for codon specific parameter traces
#'
#' @param parameter object of class Parameter
#' @param csp indicates which parameter to calculate the autocorrelation. Must be Mutation (the default, ROC, FONSE), Selection (ROC, FONSE), Alpha (PA, PANSE), LambdaPrime (PA, PANSE), NSERate (PA, PANSE)"
#' @param numMixtures indicates the number of CSP mixtures used
#' @param samples number of samples at the end of the trace used to calculate the acf
#' @param lag.max Maximum amount of lag to calculate acf. Default is 10*log10(N), where N i the number of observations.
#' @param plot logical. If TRUE (default) a plot of the acf is created
#'
#' @description The function calculates and by defaults plots the acf and estimates the autocorrelation in the trace
#'
#'
#' @seealso \code{\link{acfMCMC}}
acfCSP <- function(parameter, csp = "Mutation", numMixtures = 1, samples = NULL, lag.max = 40, plot=TRUE)
{
if (csp == "Mutation" || csp == "Alpha")
{
paramType <- 0
} else if (csp == "Selection" || csp == "LambdaPrime" || csp == "Lambda"){
paramType <- 1
} else if (csp == "NSERate"){
paramType <- 2
} else{
stop("csp must take one of the following values: Mutation (ROC, FONSE), Selection (ROC, FONSE), Alpha (PA, PANSE), LambdaPrime (PA, PANSE), NSERate (PA, PANSE)")
}
acf.list <- list()
names.aa <- aminoAcids()
trace <- parameter$getTraceObject()
if(is.null(samples))
{
samples <- round(10*log10(length(trace)))
}
ref.codon <- ifelse(csp %in% c("Selection","Mutation"),TRUE,FALSE)
for (aa in names.aa)
{
if (aa == "X")
next
if ((aa == "M" || aa == "W") && ref.codon) ## If ROC or FONSE, skip amino acids without synonyms
next
codons <- AAToCodon(aa, ref.codon) ## If ROC or FONSE, skip reference codon
codon.list <- list()
for (i in 1:length(codons))
{
mix.list <- list()
for (j in 1:numMixtures)
{
csp.trace <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(j, codons[i], paramType, ref.codon)
csp.trace <- csp.trace[(length(csp.trace)-samples):length(csp.trace)]
csp.acf <- acf(x = csp.trace, lag.max = lag.max, plot = FALSE)
mix.list[[j]] <- csp.acf
if (plot)
{
header <- paste(csp, aa, codons[i], "Mixture:", j, sep = " ")
plot(x = csp.acf, xlab = "Lag time", ylab = "Autocorrelation", main = header)
}
}
codon.list[[codons[i]]] <-mix.list
}
acf.list[[aa]] <- codon.list
}
return(acf.list)
}
# NOT EXPOSED
# plots to data.frames / matrices with CSP estimates.
plotCSPdf <- function(df1, df2, xlab = "", ylab = "", main = "")
{
fill.in <- data.frame(Codon=as.character(codons()))
df1 <- merge(x = df1, y = fill.in, by = "Codon", all = T)
df2 <- merge(x = df2, y = fill.in, by = "Codon", all = T)
for(i in 1:64){
df1$AA[i] <- codonToAA(df1$Codon[i])
df2$AA[i] <- codonToAA(df2$Codon[i])
}
df1[is.na(df1)] <- 0
df1 <- df1[order(df1$AA), ]
df2[is.na(df2)] <- 0
df2 <- df2[order(df2$AA), ]
aas <- aminoAcids()
n.aa <- length(aas)
kt <- rep(0, n.aa)
dn <- rep(0, n.aa)
for(j in 1:n.aa)
{
aa <- aas[j]
if(aa == "W" || aa == "M" || aa == "X") next
aa.pos <- which(df1$AA == aa)
df1[aa.pos,4] <- df1[aa.pos,4] - mean(df1[aa.pos,3])
df2[aa.pos,4] <- df2[aa.pos,4] - mean(df2[aa.pos,3])
df1[aa.pos,5] <- df1[aa.pos,5] - mean(df1[aa.pos,3])
df2[aa.pos,5] <- df2[aa.pos,5] - mean(df2[aa.pos,3])
df1[aa.pos,3] <- df1[aa.pos,3] - mean(df1[aa.pos,3])
df2[aa.pos,3] <- df2[aa.pos,3] - mean(df2[aa.pos,3])
}
xlim <- range(df1[, 4:5])
ylim <- range(df2[, 4:5])
plot(NULL, NULL, axes=F, xlab = "", ylab = "", xlim = xlim, ylim = ylim)
points(df1[, 3], df2[,3], pch = 19, col = rgb(0,0,0,0.7))
segments(x0 = df1[,3], y0 = df2[,4], x1 = df1[,3], y1 = df2[,5], lwd=2, col=adjustcolor("black", 0.5))
segments(x0 = df1[,4], y0 = df2[,3], x1 = df1[,5], y1 = df2[,3], lwd=2, col=adjustcolor("black", 0.5))
type2.reg <- lmodel2::lmodel2(df2[, 3] ~ df1[,3], nperm = 10, range.y = "interval", range.x = "interval")
intercept <- type2.reg$regression.results[4, 2]
slope <- type2.reg$regression.results[4, 3]
r <- type2.reg$r
if(slope > 0){
text(x = xlim[1] + abs(xlim[1])*0.1, y = ylim[2] - abs(ylim[2])*0.2, adj = 0,
labels = bquote("y = " ~.(round(intercept, 2)) ~ " + " ~.(round(slope, 2)) ~ "x"), cex = 0.75)
text(x = xlim[1] + abs(xlim[1])*0.1, y = (ylim[2] - abs(ylim[2])*0.2) - 0.4, adj = 0,
labels = bquote(rho ~ " = " ~.(round(r, 2))), cex = 0.75)
}else{
text(x = xlim[1] + abs(xlim[1])*0.2, y = ylim[1] + abs(ylim[1])*0.2 + 0.4, adj = 0,
labels = bquote("y = " ~.(round(intercept, 2)) ~ " + " ~.(round(slope, 2)) ~ "x"), cex = 0.75)
text(x = xlim[1] + abs(xlim[1])*0.2, y = (ylim[1] + abs(ylim[1])*0.2), adj = 0,
labels = bquote(rho ~ " = " ~.(round(r, 2))), cex = 0.75)
}
abline(a = intercept, b = slope, lty = 1, lwd = 2, col = adjustcolor("red", 0.7))
abline(a = 0, b = 1, lty = 2, lwd = 1)
abline(h = 0, lty = 3, lwd = 1)
abline(v = 0, lty = 3, lwd = 1)
title(main = main)
mtext(text = xlab, side = 1, line = 2.5, font = 2, cex=1.5)
mtext(text = ylab, side = 2, line = 2.5, font = 2, cex=1.5)
axis(side = 1, tick = T, font.axis = 2, lwd=2, las=1)
axis(side = 2, tick = T, font.axis = 2, lwd=2, las=1)
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/plotParameterObject.R |
# Plot functions for trace object
# The generic plot function expects the trace object
# and a string to the the function what has to be ploted.
# additional arguments are geneIndex, and category to index function like
# getExpressionTraceForGene or plotCodonSpecificParameters
#' Plot Trace Object
#' @param x An Rcpp trace object initialized with \code{initializeTraceObject}.
#' @param what A string containing one of the following to graph: \code{Mutation, Selection, Alpha, LambdaPrime, MeanWaitingTime, VarWaitingTime
#' MixtureProbability, Sphi, Mphi, Aphi, Spesilon, ExpectedPhi, Expression}.
#' @param geneIndex When plotting expression, the index of the gene to be plotted.
#' @param mixture The mixture for which to plot values.
#' @param log.10.scale A logical value determining if figures should be plotted on the log.10.scale (default=F). Should not be applied to mutation and selection parameters estimated by ROC/FONSE.
#'
#' @param ... Optional, additional arguments.
#' For this function, may be a logical value determining if the trace is ROC-based or not.
#'
#' @return This function has no return value.
#'
#' @description Plots different traces, specified with the \code{what} parameter.
#'
plot.Rcpp_Trace <- function(x, what=c("Mutation", "Selection", "MixtureProbability" ,"Sphi", "Mphi", "Aphi", "Sepsilon", "ExpectedPhi", "Expression","NSEProb","NSERate","InitiationCost","PartitionFunction"),
geneIndex=1, mixture = 1,log.10.scale=F,...)
{
if(what[1] == "Mutation")
{
plotCodonSpecificParameters(x, mixture, "Mutation", main="Mutation Parameter Traces")
}
if(what[1] == "Selection")
{
plotCodonSpecificParameters(x, mixture, "Selection", main="Selection Parameter Traces")
}
if(what[1] == "Alpha")
{
plotCodonSpecificParameters(x, mixture, "Alpha", main="Alpha Parameter Traces", ROC.or.FONSE=FALSE,log.10.scale=log.10.scale)
}
if(what[1] == "Lambda")
{
plotCodonSpecificParameters(x, mixture, "Lambda", main="Lambda Parameter Traces", ROC.or.FONSE=FALSE,log.10.scale=log.10.scale)
}
if(what[1] == "MeanWaitingTime")
{
plotCodonSpecificParameters(x, mixture, "MeanWaitingTime", main="Mean Waiting Time Parameter Traces", ROC.or.FONSE=FALSE,log.10.scale=log.10.scale)
}
if(what[1] == "VarWaitingTime")
{
plotCodonSpecificParameters(x, mixture, "VarWaitingTime", main="Variance Waiting Time Parameter Traces", ROC.or.FONSE=FALSE)
}
if(what[1] == "NSEProb")
{
plotCodonSpecificParameters(x, mixture, "NSEProb", main="Nonsense Error Probability Parameter Traces", ROC.or.FONSE=FALSE,log.10.scale=log.10.scale)
}
if(what[1] == "MixtureProbability")
{
plotMixtureProbability(x)
}
if(what[1] == "Sphi")
{
plotHyperParameterTrace(x, what = what[1])
}
if(what[1] == "Mphi")
{
plotHyperParameterTrace(x, what = what[1])
}
if(what[1] == "Aphi")
{
plotHyperParameterTrace(x, what = what[1])
}
if(what[1] == "InitiationCost")
{
plotFONSEHyperParameterTrace(x,what=what[1])
}
if(what[1] == "PartitionFunction")
{
plotPANSEHyperParameterTrace(x,what=what[1])
}
if(what[1] == "Sepsilon")
{
plotHyperParameterTrace(x, what = what[1])
}
if(what[1] == "ExpectedPhi")
{
plotExpectedPhiTrace(x)
}
if(what[1] == "Expression")
{
plotExpressionTrace(x, geneIndex)
}
if(what[1] == "AcceptanceRatio")
{
plotAcceptanceRatios(x)
}
if(what[1] == "NSERate")
{
plotCodonSpecificParameters(x, mixture, "NSERate", main="NSERate", ROC.or.FONSE=FALSE,log.10.scale=log.10.scale)
}
}
# Called from Plot Trace Object (plot for trace)
# NOT EXPOSED
#
#' Plot Codon Specific Parameter
#' @param trace An Rcpp trace object initialized with \code{initializeTraceObject}.
#'
#' @param mixture The mixture for which to plot values.
#'
#' @param type A string containing one of the following to graph: \code{Mutation, Selection, Alpha, LambdaPrime, MeanWaitingTime, VarWaitingTime}.
#'
#' @param main The title of the plot.
#'
#' @param ROC.or.FONSE A logical value determining if the Parameter was ROC/FONSE or not.
#'
#' @param log.10.scale A logical value determining if figures should be plotted on the log.10.scale (default=F). Should not be applied to mutation and selection parameters estimated by ROC/FONSE.
#'
#' @return This function has no return value.
#'
#' @description Plots a codon-specific set of traces, specified with the \code{type} parameter.
#'
plotCodonSpecificParameters <- function(trace, mixture, type="Mutation", main="Mutation Parameter Traces", ROC.or.FONSE=TRUE,log.10.scale=F)
{
opar <- par(no.readonly = T)
### Trace plot.
if (ROC.or.FONSE)
{
nf <- layout(matrix(c(rep(1, 4), 2:21), nrow = 6, ncol = 4, byrow = TRUE),
rep(1, 4), c(2, 8, 8, 8, 8, 8), respect = FALSE)
}else
{
nf <- layout(matrix(c(rep(1, 4), 2:25), nrow = 7, ncol = 4, byrow = TRUE),
rep(1, 4), c(2, 8, 8, 8, 8, 8, 8), respect = FALSE)
}
### Plot title.
if (ROC.or.FONSE){
par(mar = c(0, 0, 0, 0))
}else{
par(mar = c(1,1,1,1))
}
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.6, main)
text(0.5, 0.4, date(), cex = 0.6)
par(mar = c(5.1, 4.1, 4.1, 2.1))
# TODO change to groupList -> checks for ROC like model is not necessary!
names.aa <- aminoAcids()
with.ref.codon <- ifelse(ROC.or.FONSE, TRUE, FALSE)
for(aa in names.aa)
{
codons <- AAToCodon(aa, with.ref.codon)
if(length(codons) == 0) next
if (!ROC.or.FONSE){
if(aa == "X") next
}
cur.trace <- vector("list", length(codons))
paramType <- 0
if(type == "Mutation"){
ylab <- expression(Delta~"M")
paramType <- 0
special <- FALSE
}else if (type == "Selection"){
ylab <- expression(Delta~eta)
paramType <- 1
special <- FALSE
}else if (type == "Alpha"){
if (log.10.scale)
{
ylab <- expression("log"[10]*alpha)
} else{
ylab <- expression(alpha)
}
paramType <- 0
special <- FALSE
}else if (type == "Lambda"){
if (log.10.scale)
{
ylab <- expression("log"[10]*lambda)
} else{
ylab <- expression(lambda)
}
paramType <- 1
special <- FALSE
}else if (type == "MeanWaitingTime"){
if (log.10.scale)
{
ylab <- expression("log"[10]*alpha/lambda)
}else{
ylab <- expression(alpha/lambda)
}
special <- TRUE
}else if (type == "VarWaitingTime"){
ylab <- expression(alpha/lambda^"2")
special <- TRUE
}else if (type == "NSEProb"){
if (log.10.scale)
{
ylab <- expression("log"[10]*"Pr(NSE)")
} else{
ylab <- expression("E[Pr(NSE)]")
}
special <- TRUE
}else if (type == "VarNSEProb"){
ylab <- expression("Var[Pr(NSE)]")
special <- TRUE
}else if (type == "NSERate"){
if (log.10.scale)
{
ylab <- expression("log"[10]*"NSERate")
} else{
ylab <- expression("NSERate")
}
paramType <- 2
special <- FALSE
}else{
stop("Parameter 'type' not recognized! Must be one of: 'Mutation', 'Selection', 'Alpha', 'Lambda', 'MeanWaitingTime', 'VarWaitingTime', 'NSEProb', 'NSERate'.")
}
for(i in 1:length(codons)){
if(special){
tmpAlpha <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], 0, with.ref.codon)
tmpLambdaPrime <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], 1, with.ref.codon)
if (type == "MeanWaitingTime"){
cur.trace[[i]] <- tmpAlpha / tmpLambdaPrime
}else if (type == "VarWaitingTime"){
cur.trace[[i]] <- tmpAlpha / (tmpLambdaPrime * tmpLambdaPrime)
} else if (type == "NSEProb" || type == "VarNSEProb"){
tmpNSERate <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], 2, with.ref.codon)
if (type == "NSEProb")
{
cur.trace[[i]] <- tmpNSERate*(tmpAlpha/tmpLambdaPrime)
} else {
cur.trace[[i]] <- tmpNSERate*tmpNSERate*(tmpAlpha/(tmpLambdaPrime * tmpLambdaPrime))
}
}
if (log.10.scale)
{
cur.trace[[i]] <- log10(cur.trace[[i]])
}
}
else{
cur.trace[[i]] <- trace$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], paramType, with.ref.codon)
if (log.10.scale)
{
cur.trace[[i]] <- log10(cur.trace[[i]])
}
}
}
cur.trace <- do.call("cbind", cur.trace)
if(length(cur.trace) == 0) next
x <- 1:dim(cur.trace)[1]
xlim <- range(x)
ylim <- range(cur.trace, na.rm=T)
main.aa <- aa #TODO map to three leter code
plot(NULL, NULL, xlim = xlim, ylim = ylim,
xlab = "Samples", ylab = ylab, main = main.aa)
plot.order <- order(apply(cur.trace, 2, sd), decreasing = TRUE)
for(i.codon in plot.order){
lines(x = x, y = cur.trace[, i.codon], col = .codonColors[[codons[i.codon]]])
}
colors <- unlist(.codonColors[codons])
legend("topleft", legend = codons, col = colors,
lty = rep(1, length(codons)), bty = "n", cex = 0.75)
}
par(opar)
}
# Called from Plot Trace Object (plot for trace)
# NOT EXPOSED
#
#' Plot Acceptance ratios
#' @param trace An Rcpp trace object initialized with \code{initializeTraceObject}.
#'
#' @param main The title of the plot.
#'
#' @return This function has no return value.
#'
#' @description Plots acceptance ratios for codon-specific parameters. Will be by amino acid for ROC and FONSE models, but will be by codon for PA and PANSE models. Note assumes estimating parameters for all codons.
plotAcceptanceRatios <- function(trace,main="CSP Acceptance Ratio Traces")
{
opar <- par(no.readonly = T)
### Trace plot.
acceptance.rate.traces <- trace$getCodonSpecificAcceptanceRateTrace()
if (length(acceptance.rate.traces) == 61)
{
ROC.or.FONSE <- FALSE
} else {
ROC.or.FONSE <- TRUE
}
if (ROC.or.FONSE)
{
nf <- layout(matrix(c(rep(1, 4), 2:21), nrow = 6, ncol = 4, byrow = TRUE),
rep(1, 4), c(2, 8, 8, 8, 8, 8), respect = FALSE)
}else
{
nf <- layout(matrix(c(rep(1, 4), 2:25), nrow = 7, ncol = 4, byrow = TRUE),
rep(1, 4), c(2, 8, 8, 8, 8, 8, 8), respect = FALSE)
}
### Plot title.
if (ROC.or.FONSE){
par(mar = c(0, 0, 0, 0))
}else{
par(mar = c(1,1,1,1))
}
plot(NULL, NULL, xlim = c(0, 1), ylim = c(0, 1), axes = FALSE)
text(0.5, 0.6, main)
text(0.5, 0.4, date(), cex = 0.6)
par(mar = c(5.1, 4.1, 4.1, 2.1))
# TODO change to groupList -> checks for ROC like model is not necessary!
names.aa <- aminoAcids()
with.ref.codon <- ifelse(ROC.or.FONSE, TRUE, FALSE)
for(aa in names.aa)
{
codons <- AAToCodon(aa, with.ref.codon)
if(length(codons) == 0) next
if (!ROC.or.FONSE){
if(aa == "X") next
}
if (!ROC.or.FONSE)
{
cur.trace <- vector("list", length(codons))
for(i in 1:length(codons))
{
cur.trace[[i]] <- trace$getCodonSpecificAcceptanceRateTraceForCodon(codons[i])
}
} else {
cur.trace <- vector("list", 1)
cur.trace[[1]] <- trace$getCodonSpecificAcceptanceRateTraceForAA(aa)
}
cur.trace <- do.call("cbind", cur.trace)
if(length(cur.trace) == 0) next
x <- 1:dim(cur.trace)[1]
xlim <- range(x)
ylim <- range(cur.trace, na.rm=T)
main.aa <- aa #TODO map to three leter code
plot(NULL, NULL, xlim = xlim, ylim = ylim,
xlab = "Samples", ylab = "Accept. Rat.", main = main.aa)
plot.order <- order(apply(cur.trace, 2, sd), decreasing = TRUE)
for(i.codon in plot.order){
lines(x = x, y = cur.trace[, i.codon], col = .codonColors[[codons[i.codon]]])
}
colors <- unlist(.codonColors[codons])
legend("topleft", legend = codons, col = colors,
lty = rep(1, length(codons)), bty = "n", cex = 0.75)
}
par(opar)
}
# NOT EXPOSED
plotExpressionTrace <- function(trace, geneIndex)
{
plot(log10(trace$getSynthesisRateTraceForGene(geneIndex)), type= "l", xlab = "Sample", ylab = expression("log"[10]~"("~phi~")"))
}
# NOT EXPOSED
plotExpectedPhiTrace <- function(trace)
{
par(mar=c(5,5,4,2))
plot(trace$getExpectedSynthesisRateTrace()[-1], type="l", xlab = "Sample", ylab = expression(bar(phi)),
main = expression("Trace of the Expected value of "~phi))
abline(h=1, col="red", lwd=1.5, lty=2)
}
# NOT EXPOSED
# Currently can only be one of Sphi, Mphi, Aphi, and Sepsilon.
plotHyperParameterTrace <- function(trace, what = c("Sphi", "Mphi", "Aphi", "Sepsilon"))
{
# opar <- par(no.readonly = T)
# par(oma=c(1,1,2,1), mgp=c(2,1,0), mar = c(3,4,2,1), mfrow=c(2, 1))
xlab <- "Sample"
if (what[1] == "Sphi")
{
sphi <- trace$getStdDevSynthesisRateTraces();
numMixtures <- length(sphi)
sphi <- do.call("cbind", sphi)
ylimit <- range(sphi) + c(-0.1, 0.1)
xlimit <- c(1, nrow(sphi))
ylab <- expression("s"[phi])
main <- expression("s"[phi]*"Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
for(i in 1:ncol(sphi))
{
lines(sphi[-1,i], col = .mixtureColors[i])
}
legend("topleft", legend = paste0("Mixture Element", 1:numMixtures),
col = .mixtureColors[1:numMixtures], lty = rep(1, numMixtures), bty = "n")
}
else if (what[1] == "Mphi")
{
sphi <- trace$getStdDevSynthesisRateTraces();
numMixtures <- length(sphi)
sphi <- do.call("cbind", sphi)
mphi <- -(sphi * sphi) / 2;
ylimit <- range(mphi) + c(-0.1, 0.1)
xlimit <- c(1, nrow(mphi))
ylab <- expression("m"[phi])
main <- expression("m"[phi]*"Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
for(i in 1:ncol(mphi))
{
lines(mphi[-1,i], col= .mixtureColors[i])
}
legend("topleft", legend = paste0("Mixture Element", 1:numMixtures),
col = .mixtureColors[1:numMixtures], lty = rep(1, numMixtures), bty = "n")
}
else if (what[1] == "Aphi")
{
aphi <- trace$getSynthesisOffsetTrace();
aphi <- do.call("cbind", aphi)
ylimit <- range(aphi) + c(-0.1, 0.1)
xlimit <- c(1, nrow(aphi))
ylab <- expression("A"[phi])
main <- expression("A"[phi]~"Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
num.obs.data <- ncol(aphi)
for(i in 1:num.obs.data)
{
lines(aphi[-1,i], col = .mixtureColors[i])
}
legend("topleft", legend = paste0("Observed Data", 1:num.obs.data),
col = .mixtureColors[1:num.obs.data], lty = rep(1, num.obs.data), bty = "n")
}
else if (what[1] == "Sepsilon")
{
sepsilon <- trace$getObservedSynthesisNoiseTrace();
sepsilon <- do.call("cbind", sepsilon)
ylimit <- range(sepsilon) + c(-0.1, 0.1)
xlimit <- c(1, nrow(sepsilon))
ylab <- expression("s"[epsilon])
main <- expression("s"[epsilon]~"Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
num.obs.data <- ncol(sepsilon)
for(i in 1:num.obs.data)
{
lines(sepsilon[-1,i], col = .mixtureColors[i])
}
legend("topleft", legend = paste0("Observed Data", 1:num.obs.data),
col = .mixtureColors[1:num.obs.data], lty = rep(1, num.obs.data), bty = "n")
}
#par(opar)
}
plotFONSEHyperParameterTrace <- function(trace, what = c("InitiationCost"))
{
# opar <- par(no.readonly = T)
# par(oma=c(1,1,2,1), mgp=c(2,1,0), mar = c(3,4,2,1), mfrow=c(2, 1))
xlab <- "Sample"
if (what[1] == "InitiationCost")
{
a1 <- unlist(trace$getInitiationCostTrace())
a1 <- a1[2:length(a1)]
ylimit <- range(a1) + c(-0.1, 0.1)
xlimit <- c(1, length(a1))
ylab <- expression("a"[1])
main <- expression("a"[1]*"Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
lines(a1, col = "black")
}
#par(opar)
}
plotPANSEHyperParameterTrace <- function(trace, what = c("PartitionFunction"))
{
# opar <- par(no.readonly = T)
# par(oma=c(1,1,2,1), mgp=c(2,1,0), mar = c(3,4,2,1), mfrow=c(2, 1))
xlab <- "Sample"
if (what[1] == "PartitionFunction")
{
pf <- trace$getPartitionFunctionTraces();
numMixtures <- length(pf)
pf <- do.call("cbind", pf)
ylimit <- range(pf) + c(-0.1, 0.1)
xlimit <- c(1, nrow(pf))
ylab <- expression("Partition Function")
main <- expression("Partition Function Trace")
plot(NULL, NULL, type="l", xlab = xlab, ylab = ylab, xlim = xlimit, ylim = ylimit, main = main)
for(i in 1:ncol(pf))
{
lines(pf[-1,i], col = .mixtureColors[i])
}
legend("topleft", legend = paste0("Mixture Element", 1:numMixtures),
col = .mixtureColors[1:numMixtures], lty = rep(1, numMixtures), bty = "n")
}
#par(opar)
}
# NOT EXPOSED
plotMixtureProbability <- function(trace)
{
samples <- length(trace$getMixtureProbabilitiesTraceForMixture(1))
numMixtures <- trace$getNumberOfMixtures()
plot(NULL, NULL, xlim = c(0, samples), ylim=c(0, 1), xlab = "Samples", ylab = "Mixture Probability", main = "Mixture Probability")
for (i in 1:numMixtures)
{
lines(trace$getMixtureProbabilitiesTraceForMixture(i)[-1], col = .mixtureColors[i])
}
legend("topleft", legend = paste0("Mixture Element", 1:numMixtures),
col = .mixtureColors[1:numMixtures], lty = rep(1, numMixtures), bty = "n")
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/plotTraceObject.R |
#### TODO, lets move it into parameterObject.R and use a parameter instead of trace. thats how it is done for the acf function
# see mcmc Object.R convergence.test function for documentation
convergence.test.Rcpp_Trace <- function(object, samples = 10, frac1 = 0.1,
frac2 = 0.5, thin = 1, plot = FALSE, what = "Mutation", mixture = 1)
{
current.trace <- 0
if(what[1] == "Mutation" || what[1] == "Selection")
{
names.aa <- aminoAcids()
numCodons <- 0
for(aa in names.aa)
{
if (aa == "M" || aa == "W" || aa == "X") next
codons <- AAToCodon(aa, T)
numCodons <- numCodons + length(codons)
}
index <- 1
cur.trace <- vector("list", numCodons)
for(aa in names.aa)
{
if (aa == "M" || aa == "W" || aa == "X") next
codons <- AAToCodon(aa, T)
for(i in 1:length(codons))
{
if(what[1] == "Mutation"){
cur.trace[[index]]<- object$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], 0, T)
}else{
cur.trace[[index]] <- object$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codons[i], 1, T)
}
index <- index + 1
}
}
current.trace <- do.call("rbind", cur.trace)
## Transpose matrix to get in correct format for coda::mcmc. Transposing results in same output from coda::geweke.test as performing the test separately on each codon specific parameter
current.trace <- t(current.trace)
}
if(what[1] == "Alpha" || what[1] == "Lambda" || what[1] == "NSERate" || what[1] == "LambdaPrime")
{
codon.list <- codons()
codon.list <- codon.list[1:(length(codon.list)-3)]
cur.trace <- vector("list",length(codon.list))
for (i in 1:length(codon.list))
{
if (what[1]=="Alpha")
{
cur.trace[[i]]<- object$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon.list[i], 0, F)
} else if (what[1]=="Lambda" || what[1]=="LambdaPrime"){
cur.trace[[i]]<- object$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon.list[i], 1, F)
} else if (what[1]=="NSERate"){
cur.trace[[i]]<- object$getCodonSpecificParameterTraceByMixtureElementForCodon(mixture, codon.list[i], 2, F)
}
}
current.trace <- do.call("rbind", cur.trace)
## Transpose matrix to get in correct format for coda::mcmc. Transposing results in same output from coda::geweke.test as performing the test separately on each codon specific parameter
current.trace <- t(current.trace)
}
if(what[1] == "MixtureProbability")
{
numMixtures <- object$getNumberOfMixtures()
cur.trace <- vector("list", numMixtures)
for(i in 1:numMixtures)
{
cur.trace[[i]] <- object$getMixtureProbabilitiesTraceForMixture(i)
}
current.trace <- do.call("rbind", cur.trace)
current.trace <- t(current.trace)
}
if(what[1] == "Sphi")
{
sphi <- object$getStdDevSynthesisRateTraces()
current.trace <- do.call("rbind", sphi)
current.trace <- t(current.trace)
}
if(what[1] == "Mphi")
{
sphi <- object$getStdDevSynthesisRateTraces()
sphi <- do.call("rbind", sphi)
mphi <- -(sphi * sphi) / 2;
current.trace <- t(mphi)
}
if(what[1] == "Aphi")
{
# TODO need way to determine number of Aphi traces
}
if(what[1] == "Sepsilon")
{
# TODO need way to determine number of Sepsilon traces
}
if(what[1] == "ExpectedPhi")
{
current.trace <- object$getExpectedSynthesisRateTrace()
}
if(what[1] == "InitiationCost")
{
current.trace <- object$getInitiationCostTrace()
}
if(what[1] == "Expression")
{
# TODO need way to determine number of expression traces
}
if(what[1] == "AcceptanceCSP")
{
names.aa <- aminoAcids()
index <- 1
cur.trace <- vector("list", length(names.aa) - length(c("M","W","X")))
for(aa in names.aa)
{
if (aa == "M" || aa == "W" || aa == "X") next
cur.trace[[index]] <- object$getCodonSpecificAcceptanceRateTraceForAA(aa)
index <- index + 1
}
current.trace <- do.call("rbind", cur.trace)
current.trace <- t(current.trace)
}
trace.length <- length(current.trace)
start <- max(0, trace.length - samples)
mcmcobj <- coda::mcmc(data=current.trace, start=start, thin=thin)
if(plot){
coda::geweke.plot(mcmcobj, frac1=frac1, frac2=frac2)
} else{
diag <- coda::geweke.diag(mcmcobj, frac1=frac1, frac2=frac2)
return(diag)
}
}
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/R/traceObject.R |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# genome <- initializeGenomeObject(file = "genome.fasta")
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
# model <- initializeModelObject(parameter = parameter, model = "ROC")
# mcmc <- initializeMCMCObject(samples = 5000, thinning = 10, adaptive.width=50)
# runMCMC(mcmc = mcmc, genome = genome, model = model)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T))
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# genome <- initializeGenomeObject(file = "genome.fasta", observed.expression.file = "synthesis_values.csv")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # One case of observed data
# sepsilon <- 0.1
# # Two cases of observed data
# sepsilon <- c(0.1, 0.5)
# # ...
# # Five cases of observed data
# sepsilon <- c(0.1, 0.5, 1, 0.8, 3)
#
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)), init.sepsilon = sepsilon)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# model <- initializeModelObject(parameter = parameter, model = "ROC", fix.observation.noise = TRUE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# genome <- initializeGenomeObject(file = "genome.fasta")
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
# model <- initializeModelObject(parameter = parameter, model = "ROC")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=FALSE, est.csp=TRUE, est.hyper=TRUE, est.mix=TRUE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=FALSE, est.hyper=TRUE, est.mix=TRUE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=FALSE, est.mix=TRUE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix=FALSE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# genome <- initializeGenomeObject(file = "genome.fasta")
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
# parameter$fixDM()
# parameter$fixDEta()
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# genome <- initializeGenomeObject(file = "genome.fasta")
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
#
# fix_dm <- TRUE
# fix_deta <- FALSE
#
# parameter$initMutationCategories(dM.file,1,fix_dm)
# parameter$initSelectionCategories(dEta.file,1,fix_deta)
#
# parameter <- initializeParameterObject(genome = genome, sphi = c(1,1), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T), mixture.definition = "mutationShared")
#
# fix_dm <- TRUE
# fix_deta <- FALSE
#
# parameter$initMutationCategories(dM.file,1,fix_dm)
# parameter$initSelectionCategories(dEta.file,2,fix_deta)
#
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
# , geneAssignment = sample.int(2, length(genome), replace = T),
# mixture.definition = "allUnique")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
# , geneAssignment = sample.int(2, length(genome), replace = T),
# mixture.definition = "mutationShared")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
# , geneAssignment = sample.int(2, length(genome), replace = T),
# mixture.definition = "selectionShared")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # [,1] [,2]
# #[1,] 1 1
# #[2,] 1 2
# #[3,] 1 3
# def.matrix <- matrix(c(1,1,1,1,2,3), ncol=2)
# parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2, 1), num.mixtures = 3,
# geneAssignment = sample.int(3, length(genome), replace = T),
# mixture.definition.matrix = def.matrix)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # [,1] [,2]
# #[1,] 1 1
# #[2,] 2 2
# #[3,] 3 3
# def.matrix <- matrix(c(1,2,3,1,2,3), ncol=2)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # [,1] [,2]
# #[1,] 1 1
# #[2,] 2 1
# #[3,] 1 2
# def.matrix <- matrix(c(1,2,1,1,1,2), ncol=2)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # writing a restart file every 1000 samples
# setRestartSettings(mcmc, "restart_file", 1000, write.multiple=TRUE)
# # writing a restart file every 1000 samples but overwriting it every time
# setRestartSettings(mcmc, "restart_file", 1000, write.multiple=FALSE)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# initializeParameterObject(init.with.restart.file = "restart_file.rst")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# #save objects after a run
# runMCMC(mcmc = mcmc, genome = genome, model = model)
# writeParameterObject(parameter = parameter, file = "parameter_out.Rda")
# writeMCMCObject(mcmc = mcmc, file = "mcmc_out.Rda")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# #save objects after a run
# parameter <- loadParameterObject(file = "parameter_out.Rda")
# mcmc <- loadMCMCObject(file = "mcmc_out.Rda")
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)),model="FONSE",init.initiation.cost=a1)
#
# model <- initializeModelObject(parameter,"FONSE")
#
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
#
# trace <- parameter$getTraceObject()
#
# plot(trace,what="InitiationCost")
#
# trace_a1 <- trace$getInitiationCostTrace()
# mean_a1 <- mean(trace_a1)
# sd_a1 <- sd(trace_a1)
# ci_a1 <- quantile(trace_a1,probs = c(0.025,0.975))
#
## ----echo=TRUE,eval=FALSE-----------------------------------------------------
#
# genome.pa <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=FALSE)
# genome.panse <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=TRUE)
#
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# csp_mat <- getCSPEstimates(parameter = parameter, CSP="Mutation", mixture = 1, samples = 1000)
# head(csp_mat)
# # AA Codon Posterior 0.025% 0.975%
# #1 A GCA -0.2435340 -0.2720696 -0.2165220
# #2 A GCC 0.4235546 0.4049132 0.4420680
# #3 A GCG 0.7004484 0.6648690 0.7351707
# #4 C TGC 0.2016298 0.1679025 0.2387024
# #5 D GAC 0.5775052 0.5618199 0.5936979
# #6 E GAA -0.4524295 -0.4688044 -0.4356677
#
# getCSPEstimates(parameter = parameter, filename = "mutation.csv", CSP="Mutation", mixture = 1, samples = 1000)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# phi_mat <- getExpressionEstimates(parameter = parameter,
# gene.index = 1:length(genome),
# samples = 1000)
# head(phi_mat)
# # Mean Mean.log10 Std.Dev log10.Std.Dev 0.025 0.975 log10.0.025 log10.0.975
# #[1,] 0.2729446 -0.6188447 0.0001261525 2.362358e-04 0.07331819 0.5455295 -1.13478830 -0.26319141
# #[2,] 1.4221716 0.1498953 0.0001669425 5.194123e-05 1.09593642 1.7562065 0.03978491 0.24457557
# #[3,] 0.7459888 -0.1512764 0.0002313539 1.529267e-04 0.31559618 1.2198282 -0.50086958 0.08629407
# #[4,] 0.6573082 -0.2030291 0.0001935466 1.400333e-04 0.31591233 1.0699855 -0.50043989 0.02937787
# #[5,] 1.6316901 0.2098120 0.0001846631 4.986347e-05 1.28410352 2.0035207 0.10860000 0.30179215
# #[6,] 0.6179711 -0.2286806 0.0001744928 1.374863e-04 0.28478950 0.9683327 -0.54550116 -0.01397541
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # sampling 100 genes at random
# phi_mat <- getExpressionEstimates(parameter = parameter,
# gene.index = sample(1:length(genome), 100),
# samples = 1000)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# selection.coefficients <- getSelectionCoefficients(genome = genome,
# parameter = parameter,
# samples = 1000)
# head(selection.coefficients)
# # GCA GCC GCG GCT TGC TGT GAC GAT ...
# #SAKL0A00132g -0.1630284 -0.008695144 -0.2097771 0 -0.1014373 0 0 -0.05092397 ...
# #SAKL0A00154g -0.8494558 -0.045305847 -1.0930388 0 -0.5285367 0 0 -0.26533820 ...
# #SAKL0A00176g -0.4455753 -0.023764823 -0.5733448 0 -0.2772397 0 0 -0.13918105 ...
# #SAKL0A00198g -0.3926068 -0.020939740 -0.5051875 0 -0.2442824 0 0 -0.12263567 ...
# #SAKL0A00220g -0.9746002 -0.051980440 -1.2540685 0 -0.6064022 0 0 -0.30442861 ...
# #SAKL0A00242g -0.3691110 -0.019686586 -0.4749542 0 -0.2296631 0 0 -0.11529644 ...
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# cai.weights <- getCAIweights(referenceGenome = genome)
# head(cai.weights)
# # GCA GCC GCG GCT TGC TGT
# #0.7251276 0.6282192 0.2497737 1.0000000 0.6222628 1.0000000
#
# nc.per.aa <- getNcAA(genome = genome)
# head(nc.per.aa)
# # A C D E F G ...
# #SAKL0A00132g 3.611111 1.000000 2.200000 2.142857 1.792453 4.109589 ...
# #SAKL0A00154g 1.843866 2.500000 2.035782 1.942505 1.986595 2.752660 ...
# #SAKL0A00176g 5.142857 NA 1.857143 1.652174 1.551724 3.122449 ...
# #SAKL0A00198g 3.800000 NA 1.924779 1.913043 2.129032 4.136364 ...
# #SAKL0A00220g 3.198529 1.666667 1.741573 1.756757 2.000000 1.371638 ...
# #SAKL0A00242g 4.500000 NA 2.095890 2.000000 1.408163 3.734043 ...
#
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# selection.coefficients <- getSelectionCoefficients(genome = genome,
# parameter = parameter,
# samples = 1000)
# s <- exp(selection.coefficients)
# cai.weights <- getCAIweights(referenceGenome = ref.genome)
#
# codon.names <- colnames(s)
# h <- hist(s[, 1], plot = F)
# plot(NULL, NULL, axes = F, xlim = c(0,1), ylim = range(c(0,h$counts)),
# xlab = "s", ylab = "Frequency", main = codon.names[1], cex.lab = 1.2)
# lines(x = h$breaks, y = c(0,h$counts), type = "S", lwd=2)
# abline(v = cai.weights[1], lwd=2, lty=2)
# axis(1, lwd = 3, cex.axis = 1.2)
# axis(2, lwd = 3, cex.axis = 1.2)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# trace <- getTrace(parameter)
# plot(x = trace, what = "Mutation", mixture = 1)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# trace <- parameter$getTraceObject()
# plot(x = trace, what = "Expression", mixture = 1, geneIndex = 669)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# plot(mcmc, what = "LogPosterior", zoom.window = c(9000, 10000))
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# # use the last 500 samples from mixture 1 for posterior estimate.
# plot(x = model, genome = genome, samples = 500, mixture = 1)
## ---- echo = TRUE, eval = FALSE-----------------------------------------------
# plot(parameter, what = "Selection", samples = 500)
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/inst/doc/anacoda.R |
---
title: "AnaCoDa: Analyzing Codon Data"
author: "Cedric Landerer & Alexander L. Cope"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
bibliography: anacoda.bib
vignette: >
%\VignetteIndexEntry{Analyzing Codon Data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
AnaCoDa allows for the estimation of biologically relevant parameters like mutation bias or ribosome pausing time, depending on the model employed. Bayesian estimation of parameters is performed using an adaptive Metropolis-Hasting within Gibbs sampling approach. Models implemented in AnaCoDa are currently able to handle gene coding sequences and ribosome footprinting data.
## The AnaCoDa framework
The AnaCoDa framework works with gene specific data such as codon frequencies or position specific footprint counts.
Conceptually, AnaCoDa uses three different types of parameters.
* The first type of parameters are **gene specific parameters** such as gene expression level or functionality.
Gene-specific parameters are estimated separately for each gene and can vary between potential gene categories or sets.
* The second type of parameters are **gene-set specific parameters**, such as mutation bias terms or translation error rates.
These parameters are shared across genes within a set and can be exclusive to a single set or shared with other sets.
While the number of gene sets must be pre-defined by the user, set assignment of genes can be pre-defined or estimated as part of the model fitting.
Estimation of the set assignment provides the probability of a gene being assigned to a set allowing the user to asses the uncertainty in each assignment.
* The third type of parameters are **hyperparameters**, such as parameters controlling the prior distribution for mutation bias or error rate.
Hyperparameters can be set specific or shared across multiple sets and allow for the construction and analysis of hierarchical models, by controlling prior distributions for gene or gene-set specific parameters.
## Analyzing protein coding gene sequences
AnaCoDa always requires the following four objects:
* **Genome** contains the codon data read from a fasta file as well as empirical protein synthesis rate in the form of a comma separated (.csv) ID/Value pairs.
* **Parameter** represents the parameter set (including parameter traces) for a given genome. The parameter object also hold the mapping of parameters to specified sets.
* **Model** allows you to specify which model should be applied to the genome and the parameter object.
* **MCMC** specifies how many samples from the posterior distribution of the specified model should be stored to obtain parameter estimates.
## Setup of AnaCoDa
### Application of codon model to single genome
In this example we are assuming a genome with only one set of gene-set specific parameters, hence `num.mixtures = 1`.
We assign all genes the same gene-set, and provide an initial value for the hyperparameter sphi ($s_\phi$).
$s_\phi$ controls the lognormal prior distribution on the gene specific parameters like the protein synthesis rate $\phi$.
To ensure identifiability the expected value of the prior distribution is assumed to be 1.
\begin{align}
E[\phi] = \exp\left(m_\phi+\frac{s_\phi^2}{2}\right) = 1
\end{align}
Therefor the mean $m_\phi$ is set to be $-\frac{s_\phi^2}{2}$.
For more details see [@gilchrist2015]
After choosing the model and specifying the necessary arguments for the MCMC routine, the MCMC is run.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
model <- initializeModelObject(parameter = parameter, model = "ROC")
mcmc <- initializeMCMCObject(samples = 5000, thinning = 10, adaptive.width=50)
runMCMC(mcmc = mcmc, genome = genome, model = model)
```
`runMCMC` does not return a value, the results of the MCMC are stored automatically in the `mcmc` and `parameter` objects created earlier.
> **Please note that AnaCoDa utilizes C++ obejct orientation and therefore employs pointer structures.
> This means that no return value is necessary for such objects as they are modified within the the `runMCMC`
> routine.
> You will find that after a completed run, the `parameter`
> object will contain all necessary information without being directly passed into the MCMC routine. This might be confusing at first as it is not default R behaviour.**
### Application of codon model to a mixture of genomes
This case applies if we assume that parts of the genome differ in their gene-set specific parameters. This could be due to introgression events or strand specific mutation difference or other stuff. We make the assumption that all sets of genes are independent of one another.
For two sets of gene-set specific parameter with a random gene assignment we can use:
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T))
```
To accommodate for this mixing we only have to adjust `sphi`, which is now a vector of length 2, `num.mixtures`, and `geneAssignment`, which is chosen at random here.
### Using empirical protein synthesis rate values
To use empirical values as prior information one can simply specify an `observed.expression.file` when initializing the genome object.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta", observed.expression.file = "synthesis_values.csv")
```
These observed expression or synthesis values ($\Phi$) are independent of the number of gene-sets.
The error in the observed $\Phi$ values is estimated and described by sepsilon ($s_\epsilon$).
The csv file can contain multiple observation sets separated by comma.
For each set of observations an initial $s_\epsilon$ has to be specified.
```{r, echo = TRUE, eval = FALSE}
# One case of observed data
sepsilon <- 0.1
# Two cases of observed data
sepsilon <- c(0.1, 0.5)
# ...
# Five cases of observed data
sepsilon <- c(0.1, 0.5, 1, 0.8, 3)
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)), init.sepsilon = sepsilon)
```
In addition one can choose to keep the noise in the observations ($s_\epsilon$) constant by using the `fix.observation.noise` flag in the model object.
```{r, echo = TRUE, eval = FALSE}
model <- initializeModelObject(parameter = parameter, model = "ROC", fix.observation.noise = TRUE)
```
### Keeping parameter types fixed
It can sometime be advantages to fix certain parameters, like the gene specific parameters.
For example in cases where only few sequences are available but gene expression measurements are at hand we can fix the gene specific parameters to increase confidence in our estimates of gene-set specific parameters.
We again initialize our `genome`, `parameter`, and `model` objects.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
model <- initializeModelObject(parameter = parameter, model = "ROC")
```
To fix gene specific parameters we will set the `est.expression` flag to `FALSE`.
This will estimate only gene-set specific parameters, hyperparameters, and the assignments of genes to various sets.
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=FALSE, est.csp=TRUE, est.hyper=TRUE, est.mix=TRUE)
```
If we would like to fix gene-set specific parameters we instead disable the `est.csp` flag.
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=FALSE, est.hyper=TRUE, est.mix=TRUE)
```
The same applies to the hyper parameters (`est.hyper`),
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=FALSE, est.mix=TRUE)
```
and gene set assignment (`est.mix`).
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix=FALSE)
```
We can use these flags to fix parameters in any combination.
For some analyses, it may be useful to keep specific parameter types fixed. Parameter objects have code for keeping some gene-set specific parameters fixed while estimating others. For example, when using a ROC parameter object, fixDM() or fixDEta() can be used to fix mutation bias and selection gene-set specific parameters respectively.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
parameter$fixDM()
parameter$fixDEta()
```
###Initializing Starting Parameters
If you're choosing to fix certain parameters, then it is a good idea to set these parameters to good values based on empirical data or previous analyses. There are many ways to initialize these values. Most hyperparameters and gene-specific parameters can actually be initialized when initializing the Parameter object, as demonstrated with $s_\phi$. Protein production rates $\phi$ can also be initialized in the Parameter object using the argument initial.expression.values, which takes a vector (must equal the length of the genome) of values to use as $\phi$. Note that these should be in the same order as the genes in the Genome object.
Gene-set specific parameters are initialized using functions after the Parameter object has been initialized. For ROC and FONSE, initMutationCategories and initSelectionCategories takes in a vector of file names (must equal the number of corresponding categories), the number of categories for that gene-set specific parameter (see Combining various gene-set specific parameters to a gene-set description), and an optional argument for keeping a parameter fixed at these values (an alternative to fixDM() and fixDEta). Note that if you use a mixture.definition value (see Combining various gene-set specific parameters to a gene-set description) such as "mutationShared", then there would be only one mutation category, but multiple selection categories.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
fix_dm <- TRUE
fix_deta <- FALSE
parameter$initMutationCategories(dM.file,1,fix_dm)
parameter$initSelectionCategories(dEta.file,1,fix_deta)
parameter <- initializeParameterObject(genome = genome, sphi = c(1,1), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T), mixture.definition = "mutationShared")
fix_dm <- TRUE
fix_deta <- FALSE
parameter$initMutationCategories(dM.file,1,fix_dm)
parameter$initSelectionCategories(dEta.file,2,fix_deta)
```
### Combining various gene-set specific parameters to a gene-set description.
We distinguish between three simple cases of gene-set descriptions, and the ability to customize the parameter mapping.
The specification is done when initializing the parameter object with the `mixture.definition` argument.
We encounter the simplest case when we assume that all gene sets are independent.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "allUnique")
```
The `allUnique` keyword allows each type of gene-set specific parameter to be estimated independent of parameters describing other gene sets.
In case we want to share mutation parameter between gene sets we can use the keyword `mutationShared`.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "mutationShared")
```
This will force all gene sets to share the same mutation parameters.
The same can be done with parameters describing selection, using the keyword `selectionShared`
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "selectionShared")
```
For more intricate compositions of gene sets, one can specify a custom $n\times2$ matrix, where $n$ is the number of gene sets, to describe how gene-set specific parameters should be shared.
Instead of using the `mixture.definition` argument one uses the `mixture.definition.matrix` argument.
The matrix representation of `mutationShared` can be obtained by
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 1 2
#[3,] 1 3
def.matrix <- matrix(c(1,1,1,1,2,3), ncol=2)
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2, 1), num.mixtures = 3,
geneAssignment = sample.int(3, length(genome), replace = T),
mixture.definition.matrix = def.matrix)
```
Columns represent mutation and selection, while each row represents a gene set.
In this case we have three gene sets, each sharing the same mutation category and three different selection categories.
In the same way one can produce the matrix for three independent gene sets equivalent to the `allUnique` keyword.
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 2 2
#[3,] 3 3
def.matrix <- matrix(c(1,2,3,1,2,3), ncol=2)
```
We can also use this matrix to produce more complex gene set compositions.
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 2 1
#[3,] 1 2
def.matrix <- matrix(c(1,2,1,1,1,2), ncol=2)
```
In this case gene set one and three share their mutation parameters, while gene set one and two share their selection parameters.
### Checkpointing
AnaCoDa does provide checkpointing functionality in case runtime has to be restiricted.
To enable checkpointing, one can use the function `setRestartSettings`.
```{r, echo = TRUE, eval = FALSE}
# writing a restart file every 1000 samples
setRestartSettings(mcmc, "restart_file", 1000, write.multiple=TRUE)
# writing a restart file every 1000 samples but overwriting it every time
setRestartSettings(mcmc, "restart_file", 1000, write.multiple=FALSE)
```
To re-initialize a parameter object from a restart file one can simply pass the restart file to the initialization function
```{r, echo = TRUE, eval = FALSE}
initializeParameterObject(init.with.restart.file = "restart_file.rst")
```
### Load and save parameter objects
AnaCoDa is based on C++ objects using the Rcpp [@rcpp_article]. This comes with the problem that C++ objects are by default not serializable and can therefore not be saved/loaded with the default R save/load functions.
AnaCoDa however, does provide functions to load and save `parameter` and `mcmc` objects. These are the only two objects that store information during a run.
```{r, echo = TRUE, eval = FALSE}
#save objects after a run
runMCMC(mcmc = mcmc, genome = genome, model = model)
writeParameterObject(parameter = parameter, file = "parameter_out.Rda")
writeMCMCObject(mcmc = mcmc, file = "mcmc_out.Rda")
```
As `genome`, and `model` objects are purely storage containers, no save/load function is provided at this point, but will be added in the future.
```{r, echo = TRUE, eval = FALSE}
#save objects after a run
parameter <- loadParameterObject(file = "parameter_out.Rda")
mcmc <- loadMCMCObject(file = "mcmc_out.Rda")
```
## File formats
### Protein coding sequence
Protein coding sequences are provided by fasta file with the default format.
One line containing the sequence id starting with `>` followed by the id and one or more lines containing the sequence.
The sequences are expected to have a length that is a multiple of three. If a codon can not be recognized (e.g AGN) it is ignored.
```
>YAL001C
TTGGTTCTGACTCATTAGCCAGACGAACTGGTTCAA
CATGTTTCTGACATTCATTCTAACATTGGCATTCAT
ACTCTGAACCAACTGTAAGACCATTCTGGCATTTAG
>YAL002W
TTGGAACAAAACGGCCTGGACCACGACTCACGCTCT
TCACATGACACTACTCATAACGACACTCAAATTACT
TTCCTGGAATTCCGCTCTTAGACTCAACTGTCAGAA
```
### Empirical expression
Empirical expression or gene specific parameters are provided in a csv file format.
The first line is expected to be a header describing each column.
The first column is expected be the gene id, and every additional column is expected to be represent a measurement.
Each row corresponds to one gene and contains all measurements for that gene, including missing values.
```
ORF,DATA_1,DATA_2,...DATA_N
YAL001C,0.254,0.489,...,0.156
YAL002W,1.856,1.357,...,2.014
YAL003W,10.45,NA,...,9.564
YAL005C,0.556,0.957,...,0.758
```
## Estimating Nonsense Error Probabilities from codon data.
NOTE: The model described here is unpublished and currently in beta testing. Please use with caution.
A common observation is tendency for inefficient codons to be more frequent at the 5'-ends of transcripts. One explanation for this is selection against ribosome drop-off, or nonsense errors, during translation. It is expected that nonsense errors are less costly towards the beginning of translation. AnaCoDa contains a model, the First Order Nonsense Error (FONSE) model, for estimating codon-specific nonsense error probabilities from codon count data. This models functionality is very similar to ROC. The primary difference for fitting a FONSE model instead of a ROC model to genomic data is specifying FONSE when initializing the Parameter and Model objects.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)),model="FONSE",init.initiation.cost=a1)
model <- initializeModelObject(parameter,"FONSE")
```
FONSE also estimates the indirect cost of translation initiation. This value can be initialized with the parameter object with init.initiation.cost. After the MCMC has run, estimates for the initiation cost can be pulled from the trace object.
```{r, echo = TRUE, eval = FALSE}
trace <- parameter$getTraceObject()
plot(trace,what="InitiationCost")
trace_a1 <- trace$getInitiationCostTrace()
mean_a1 <- mean(trace_a1)
sd_a1 <- sd(trace_a1)
ci_a1 <- quantile(trace_a1,probs = c(0.025,0.975))
```
### Application of AnaCoDa to Ribo-Seq data
NOTE: The models described here are unpublished and currently in beta testing. Please use with caution.
Ribosome footprinting, or Ribo-Seq, is a relatively new technology which has allowed researchers to explore the translatome (mRNA being actively translated in the cell).
The current version of AnaCoDa contains two models for estimating parameters from RFP data: The Pausing Model (PA) and the Pausing and Nonsense Error (PANSE) model.
Although provided with the initial release of AnaCoDa, these models were still in beta testing.
Unlike ROC and FONSE, the Genome object for PA/PANSE takes in Ribosome Foot-Printing (RFP) counts in a csv file format.
The first line is expected to be a header describing each column.
The columns are expected in the following order gene id, position, codon, rfpcount.
Each row corresponds to a single codon with an associated number of ribosome footprints.
```
GeneID,Position,Codon,rfpCount
YBR177C, 0, ATA, 8
YBR177C, 1, CGG, 1
YBR177C, 2, GTT, 8
YBR177C, 3, CGC, 1
```
```{r,echo=TRUE,eval=FALSE}
genome.pa <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=FALSE)
genome.panse <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=TRUE)
```
## Analyzing and Visualizating results
### Parameter estimates
After we have completed the model fitting, we are interested in the results.
AnaCoDa provides functions to obtain the posterior estimate for each parameter.
For gene-set specific parameters or codon specific parameters we can use the function `getCSPEstimates`.
Again we can specify for which mixture we would like the posterior estimate and how many samples should be used.
`getCSPEstimates` has an optional argument `filename` which will cause the routine to write the result as a csv file instead of returning a `data.frame`.
```{r, echo = TRUE, eval = FALSE}
csp_mat <- getCSPEstimates(parameter = parameter, CSP="Mutation", mixture = 1, samples = 1000)
head(csp_mat)
# AA Codon Posterior 0.025% 0.975%
#1 A GCA -0.2435340 -0.2720696 -0.2165220
#2 A GCC 0.4235546 0.4049132 0.4420680
#3 A GCG 0.7004484 0.6648690 0.7351707
#4 C TGC 0.2016298 0.1679025 0.2387024
#5 D GAC 0.5775052 0.5618199 0.5936979
#6 E GAA -0.4524295 -0.4688044 -0.4356677
getCSPEstimates(parameter = parameter, filename = "mutation.csv", CSP="Mutation", mixture = 1, samples = 1000)
```
To obtain posterior estimates for the gene specific parameters, we can use the function `getExpressionEstimatesForMixture`.
In the case below we ask to get the gene specific parameters for all genes, and under the assumption each gene is assigned to mixture 1.
```{r, echo = TRUE, eval = FALSE}
phi_mat <- getExpressionEstimates(parameter = parameter,
gene.index = 1:length(genome),
samples = 1000)
head(phi_mat)
# Mean Mean.log10 Std.Dev log10.Std.Dev 0.025 0.975 log10.0.025 log10.0.975
#[1,] 0.2729446 -0.6188447 0.0001261525 2.362358e-04 0.07331819 0.5455295 -1.13478830 -0.26319141
#[2,] 1.4221716 0.1498953 0.0001669425 5.194123e-05 1.09593642 1.7562065 0.03978491 0.24457557
#[3,] 0.7459888 -0.1512764 0.0002313539 1.529267e-04 0.31559618 1.2198282 -0.50086958 0.08629407
#[4,] 0.6573082 -0.2030291 0.0001935466 1.400333e-04 0.31591233 1.0699855 -0.50043989 0.02937787
#[5,] 1.6316901 0.2098120 0.0001846631 4.986347e-05 1.28410352 2.0035207 0.10860000 0.30179215
#[6,] 0.6179711 -0.2286806 0.0001744928 1.374863e-04 0.28478950 0.9683327 -0.54550116 -0.01397541
```
However we can decide to only obtain certain gene parameters. in the first case we sample 100 random genes.
```{r, echo = TRUE, eval = FALSE}
# sampling 100 genes at random
phi_mat <- getExpressionEstimates(parameter = parameter,
gene.index = sample(1:length(genome), 100),
samples = 1000)
```
Furthermore, AnaCoDa allows to calculate the selection coefficient $s$ for each codon and each gene.
We can use the function `getSelectionCoefficients` to do so. Please note, that this function returns the $log(s)$.
`getSelectionCoefficients` returns a matrix with $log(s)$ relative to the most efficient synonymous codon.
```{r, echo = TRUE, eval = FALSE}
selection.coefficients <- getSelectionCoefficients(genome = genome,
parameter = parameter,
samples = 1000)
head(selection.coefficients)
# GCA GCC GCG GCT TGC TGT GAC GAT ...
#SAKL0A00132g -0.1630284 -0.008695144 -0.2097771 0 -0.1014373 0 0 -0.05092397 ...
#SAKL0A00154g -0.8494558 -0.045305847 -1.0930388 0 -0.5285367 0 0 -0.26533820 ...
#SAKL0A00176g -0.4455753 -0.023764823 -0.5733448 0 -0.2772397 0 0 -0.13918105 ...
#SAKL0A00198g -0.3926068 -0.020939740 -0.5051875 0 -0.2442824 0 0 -0.12263567 ...
#SAKL0A00220g -0.9746002 -0.051980440 -1.2540685 0 -0.6064022 0 0 -0.30442861 ...
#SAKL0A00242g -0.3691110 -0.019686586 -0.4749542 0 -0.2296631 0 0 -0.11529644 ...
```
We can compare these values to the weights from the codon adaptatoin index (CAI) [@SharpLi1987] or effective number of codons (Nc) [@Wright1990] by using the functions
`getCAIweights` and `getNcAA`.
```{r, echo = TRUE, eval = FALSE}
cai.weights <- getCAIweights(referenceGenome = genome)
head(cai.weights)
# GCA GCC GCG GCT TGC TGT
#0.7251276 0.6282192 0.2497737 1.0000000 0.6222628 1.0000000
nc.per.aa <- getNcAA(genome = genome)
head(nc.per.aa)
# A C D E F G ...
#SAKL0A00132g 3.611111 1.000000 2.200000 2.142857 1.792453 4.109589 ...
#SAKL0A00154g 1.843866 2.500000 2.035782 1.942505 1.986595 2.752660 ...
#SAKL0A00176g 5.142857 NA 1.857143 1.652174 1.551724 3.122449 ...
#SAKL0A00198g 3.800000 NA 1.924779 1.913043 2.129032 4.136364 ...
#SAKL0A00220g 3.198529 1.666667 1.741573 1.756757 2.000000 1.371638 ...
#SAKL0A00242g 4.500000 NA 2.095890 2.000000 1.408163 3.734043 ...
```
We can compare the distribution of selection coefficients to the CAI values estimated from a reference set of genes.
```{r, echo = TRUE, eval = FALSE}
selection.coefficients <- getSelectionCoefficients(genome = genome,
parameter = parameter,
samples = 1000)
s <- exp(selection.coefficients)
cai.weights <- getCAIweights(referenceGenome = ref.genome)
codon.names <- colnames(s)
h <- hist(s[, 1], plot = F)
plot(NULL, NULL, axes = F, xlim = c(0,1), ylim = range(c(0,h$counts)),
xlab = "s", ylab = "Frequency", main = codon.names[1], cex.lab = 1.2)
lines(x = h$breaks, y = c(0,h$counts), type = "S", lwd=2)
abline(v = cai.weights[1], lwd=2, lty=2)
axis(1, lwd = 3, cex.axis = 1.2)
axis(2, lwd = 3, cex.axis = 1.2)
```

### Diagnostic plots
A first step after every run should be to determine if the sampling routine has converged.
To do that, AnaCoDa provides plotting routines to visualize all sampled parameter traces from which the posterior sample is obtained.
First we have to obtain the trace object stored within our parameter object.
Now we can just plot the trace object. The argument 'what' specifies which type of parameter should be plotted.
Here we plot the selection parameter $\Delta \eta$ of the ROC model. These parameters are mixture specific and one can decide which mixture set to visualize using the argument `mixture`.
```{r, echo = TRUE, eval = FALSE}
trace <- getTrace(parameter)
plot(x = trace, what = "Mutation", mixture = 1)
```

A special case is the plotting of traces of the protein synthesis rate $\phi$. As the number of traces for the different $phi$ traces is usually in the thousands, a `geneIndex` has to be passed to determine for which gene the trace should be plotted.
This allows to inspect the trace of every gene under every mixture assignment.
```{r, echo = TRUE, eval = FALSE}
trace <- parameter$getTraceObject()
plot(x = trace, what = "Expression", mixture = 1, geneIndex = 669)
```

We can find the likelihood and posterior trace of the model fit in the mcmc object.
The trace can be plotted by just passing the `mcmc` object to the `plot` routine.
Again we can switch between log(likelihood) and log(posterior) using the argument `what`.
The argument `zoom.window` is used to inspect a specified window in more detail. It defaults to the last 10% of the trace.
The log(posterior) displayed in the figure title is also estimated over the `zoom.window`.
```{r, echo = TRUE, eval = FALSE}
plot(mcmc, what = "LogPosterior", zoom.window = c(9000, 10000))
```

### Model visualization
We can visualize the results of the model fit by plotting the model object.
For this we require the model and the genome object.
We can adjust which mixture set we would like to visualize and how many samples should be used to obtain the posterior estimate for each parameter. For more details see [@gilchrist2015].
```{r, echo = TRUE, eval = FALSE}
# use the last 500 samples from mixture 1 for posterior estimate.
plot(x = model, genome = genome, samples = 500, mixture = 1)
```

As AnaCoDa is designed with the idea to allow gene-sets to have independent gene-set specific parameters, AnaCoDa also provides the option to compare different gene-sets by plotting the `parameter` object.
Here we compare the selection parameter estimated by ROC for seven yeast species.
```{r, echo = TRUE, eval = FALSE}
plot(parameter, what = "Selection", samples = 500)
```

# References
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/inst/doc/anacoda.Rmd |
---
title: "AnaCoDa: Analyzing Codon Data"
author: "Cedric Landerer & Alexander L. Cope"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
bibliography: anacoda.bib
vignette: >
%\VignetteIndexEntry{Analyzing Codon Data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
AnaCoDa allows for the estimation of biologically relevant parameters like mutation bias or ribosome pausing time, depending on the model employed. Bayesian estimation of parameters is performed using an adaptive Metropolis-Hasting within Gibbs sampling approach. Models implemented in AnaCoDa are currently able to handle gene coding sequences and ribosome footprinting data.
## The AnaCoDa framework
The AnaCoDa framework works with gene specific data such as codon frequencies or position specific footprint counts.
Conceptually, AnaCoDa uses three different types of parameters.
* The first type of parameters are **gene specific parameters** such as gene expression level or functionality.
Gene-specific parameters are estimated separately for each gene and can vary between potential gene categories or sets.
* The second type of parameters are **gene-set specific parameters**, such as mutation bias terms or translation error rates.
These parameters are shared across genes within a set and can be exclusive to a single set or shared with other sets.
While the number of gene sets must be pre-defined by the user, set assignment of genes can be pre-defined or estimated as part of the model fitting.
Estimation of the set assignment provides the probability of a gene being assigned to a set allowing the user to asses the uncertainty in each assignment.
* The third type of parameters are **hyperparameters**, such as parameters controlling the prior distribution for mutation bias or error rate.
Hyperparameters can be set specific or shared across multiple sets and allow for the construction and analysis of hierarchical models, by controlling prior distributions for gene or gene-set specific parameters.
## Analyzing protein coding gene sequences
AnaCoDa always requires the following four objects:
* **Genome** contains the codon data read from a fasta file as well as empirical protein synthesis rate in the form of a comma separated (.csv) ID/Value pairs.
* **Parameter** represents the parameter set (including parameter traces) for a given genome. The parameter object also hold the mapping of parameters to specified sets.
* **Model** allows you to specify which model should be applied to the genome and the parameter object.
* **MCMC** specifies how many samples from the posterior distribution of the specified model should be stored to obtain parameter estimates.
## Setup of AnaCoDa
### Application of codon model to single genome
In this example we are assuming a genome with only one set of gene-set specific parameters, hence `num.mixtures = 1`.
We assign all genes the same gene-set, and provide an initial value for the hyperparameter sphi ($s_\phi$).
$s_\phi$ controls the lognormal prior distribution on the gene specific parameters like the protein synthesis rate $\phi$.
To ensure identifiability the expected value of the prior distribution is assumed to be 1.
\begin{align}
E[\phi] = \exp\left(m_\phi+\frac{s_\phi^2}{2}\right) = 1
\end{align}
Therefor the mean $m_\phi$ is set to be $-\frac{s_\phi^2}{2}$.
For more details see [@gilchrist2015]
After choosing the model and specifying the necessary arguments for the MCMC routine, the MCMC is run.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
model <- initializeModelObject(parameter = parameter, model = "ROC")
mcmc <- initializeMCMCObject(samples = 5000, thinning = 10, adaptive.width=50)
runMCMC(mcmc = mcmc, genome = genome, model = model)
```
`runMCMC` does not return a value, the results of the MCMC are stored automatically in the `mcmc` and `parameter` objects created earlier.
> **Please note that AnaCoDa utilizes C++ obejct orientation and therefore employs pointer structures.
> This means that no return value is necessary for such objects as they are modified within the the `runMCMC`
> routine.
> You will find that after a completed run, the `parameter`
> object will contain all necessary information without being directly passed into the MCMC routine. This might be confusing at first as it is not default R behaviour.**
### Application of codon model to a mixture of genomes
This case applies if we assume that parts of the genome differ in their gene-set specific parameters. This could be due to introgression events or strand specific mutation difference or other stuff. We make the assumption that all sets of genes are independent of one another.
For two sets of gene-set specific parameter with a random gene assignment we can use:
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T))
```
To accommodate for this mixing we only have to adjust `sphi`, which is now a vector of length 2, `num.mixtures`, and `geneAssignment`, which is chosen at random here.
### Using empirical protein synthesis rate values
To use empirical values as prior information one can simply specify an `observed.expression.file` when initializing the genome object.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta", observed.expression.file = "synthesis_values.csv")
```
These observed expression or synthesis values ($\Phi$) are independent of the number of gene-sets.
The error in the observed $\Phi$ values is estimated and described by sepsilon ($s_\epsilon$).
The csv file can contain multiple observation sets separated by comma.
For each set of observations an initial $s_\epsilon$ has to be specified.
```{r, echo = TRUE, eval = FALSE}
# One case of observed data
sepsilon <- 0.1
# Two cases of observed data
sepsilon <- c(0.1, 0.5)
# ...
# Five cases of observed data
sepsilon <- c(0.1, 0.5, 1, 0.8, 3)
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)), init.sepsilon = sepsilon)
```
In addition one can choose to keep the noise in the observations ($s_\epsilon$) constant by using the `fix.observation.noise` flag in the model object.
```{r, echo = TRUE, eval = FALSE}
model <- initializeModelObject(parameter = parameter, model = "ROC", fix.observation.noise = TRUE)
```
### Keeping parameter types fixed
It can sometime be advantages to fix certain parameters, like the gene specific parameters.
For example in cases where only few sequences are available but gene expression measurements are at hand we can fix the gene specific parameters to increase confidence in our estimates of gene-set specific parameters.
We again initialize our `genome`, `parameter`, and `model` objects.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
model <- initializeModelObject(parameter = parameter, model = "ROC")
```
To fix gene specific parameters we will set the `est.expression` flag to `FALSE`.
This will estimate only gene-set specific parameters, hyperparameters, and the assignments of genes to various sets.
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=FALSE, est.csp=TRUE, est.hyper=TRUE, est.mix=TRUE)
```
If we would like to fix gene-set specific parameters we instead disable the `est.csp` flag.
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=FALSE, est.hyper=TRUE, est.mix=TRUE)
```
The same applies to the hyper parameters (`est.hyper`),
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=FALSE, est.mix=TRUE)
```
and gene set assignment (`est.mix`).
```{r, echo = TRUE, eval = FALSE}
mcmc <- initializeMCMCObject(samples, thinning=1, adaptive.width=100, est.expression=TRUE, est.csp=TRUE, est.hyper=TRUE, est.mix=FALSE)
```
We can use these flags to fix parameters in any combination.
For some analyses, it may be useful to keep specific parameter types fixed. Parameter objects have code for keeping some gene-set specific parameters fixed while estimating others. For example, when using a ROC parameter object, fixDM() or fixDEta() can be used to fix mutation bias and selection gene-set specific parameters respectively.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
parameter$fixDM()
parameter$fixDEta()
```
###Initializing Starting Parameters
If you're choosing to fix certain parameters, then it is a good idea to set these parameters to good values based on empirical data or previous analyses. There are many ways to initialize these values. Most hyperparameters and gene-specific parameters can actually be initialized when initializing the Parameter object, as demonstrated with $s_\phi$. Protein production rates $\phi$ can also be initialized in the Parameter object using the argument initial.expression.values, which takes a vector (must equal the length of the genome) of values to use as $\phi$. Note that these should be in the same order as the genes in the Genome object.
Gene-set specific parameters are initialized using functions after the Parameter object has been initialized. For ROC and FONSE, initMutationCategories and initSelectionCategories takes in a vector of file names (must equal the number of corresponding categories), the number of categories for that gene-set specific parameter (see Combining various gene-set specific parameters to a gene-set description), and an optional argument for keeping a parameter fixed at these values (an alternative to fixDM() and fixDEta). Note that if you use a mixture.definition value (see Combining various gene-set specific parameters to a gene-set description) such as "mutationShared", then there would be only one mutation category, but multiple selection categories.
```{r, echo = TRUE, eval = FALSE}
genome <- initializeGenomeObject(file = "genome.fasta")
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)))
fix_dm <- TRUE
fix_deta <- FALSE
parameter$initMutationCategories(dM.file,1,fix_dm)
parameter$initSelectionCategories(dEta.file,1,fix_deta)
parameter <- initializeParameterObject(genome = genome, sphi = c(1,1), num.mixtures = 2, geneAssignment = sample.int(2, length(genome), replace = T), mixture.definition = "mutationShared")
fix_dm <- TRUE
fix_deta <- FALSE
parameter$initMutationCategories(dM.file,1,fix_dm)
parameter$initSelectionCategories(dEta.file,2,fix_deta)
```
### Combining various gene-set specific parameters to a gene-set description.
We distinguish between three simple cases of gene-set descriptions, and the ability to customize the parameter mapping.
The specification is done when initializing the parameter object with the `mixture.definition` argument.
We encounter the simplest case when we assume that all gene sets are independent.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "allUnique")
```
The `allUnique` keyword allows each type of gene-set specific parameter to be estimated independent of parameters describing other gene sets.
In case we want to share mutation parameter between gene sets we can use the keyword `mutationShared`.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "mutationShared")
```
This will force all gene sets to share the same mutation parameters.
The same can be done with parameters describing selection, using the keyword `selectionShared`
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2), num.mixtures = 2
, geneAssignment = sample.int(2, length(genome), replace = T),
mixture.definition = "selectionShared")
```
For more intricate compositions of gene sets, one can specify a custom $n\times2$ matrix, where $n$ is the number of gene sets, to describe how gene-set specific parameters should be shared.
Instead of using the `mixture.definition` argument one uses the `mixture.definition.matrix` argument.
The matrix representation of `mutationShared` can be obtained by
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 1 2
#[3,] 1 3
def.matrix <- matrix(c(1,1,1,1,2,3), ncol=2)
parameter <- initializeParameterObject(genome = genome, sphi = c(0.5, 2, 1), num.mixtures = 3,
geneAssignment = sample.int(3, length(genome), replace = T),
mixture.definition.matrix = def.matrix)
```
Columns represent mutation and selection, while each row represents a gene set.
In this case we have three gene sets, each sharing the same mutation category and three different selection categories.
In the same way one can produce the matrix for three independent gene sets equivalent to the `allUnique` keyword.
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 2 2
#[3,] 3 3
def.matrix <- matrix(c(1,2,3,1,2,3), ncol=2)
```
We can also use this matrix to produce more complex gene set compositions.
```{r, echo = TRUE, eval = FALSE}
# [,1] [,2]
#[1,] 1 1
#[2,] 2 1
#[3,] 1 2
def.matrix <- matrix(c(1,2,1,1,1,2), ncol=2)
```
In this case gene set one and three share their mutation parameters, while gene set one and two share their selection parameters.
### Checkpointing
AnaCoDa does provide checkpointing functionality in case runtime has to be restiricted.
To enable checkpointing, one can use the function `setRestartSettings`.
```{r, echo = TRUE, eval = FALSE}
# writing a restart file every 1000 samples
setRestartSettings(mcmc, "restart_file", 1000, write.multiple=TRUE)
# writing a restart file every 1000 samples but overwriting it every time
setRestartSettings(mcmc, "restart_file", 1000, write.multiple=FALSE)
```
To re-initialize a parameter object from a restart file one can simply pass the restart file to the initialization function
```{r, echo = TRUE, eval = FALSE}
initializeParameterObject(init.with.restart.file = "restart_file.rst")
```
### Load and save parameter objects
AnaCoDa is based on C++ objects using the Rcpp [@rcpp_article]. This comes with the problem that C++ objects are by default not serializable and can therefore not be saved/loaded with the default R save/load functions.
AnaCoDa however, does provide functions to load and save `parameter` and `mcmc` objects. These are the only two objects that store information during a run.
```{r, echo = TRUE, eval = FALSE}
#save objects after a run
runMCMC(mcmc = mcmc, genome = genome, model = model)
writeParameterObject(parameter = parameter, file = "parameter_out.Rda")
writeMCMCObject(mcmc = mcmc, file = "mcmc_out.Rda")
```
As `genome`, and `model` objects are purely storage containers, no save/load function is provided at this point, but will be added in the future.
```{r, echo = TRUE, eval = FALSE}
#save objects after a run
parameter <- loadParameterObject(file = "parameter_out.Rda")
mcmc <- loadMCMCObject(file = "mcmc_out.Rda")
```
## File formats
### Protein coding sequence
Protein coding sequences are provided by fasta file with the default format.
One line containing the sequence id starting with `>` followed by the id and one or more lines containing the sequence.
The sequences are expected to have a length that is a multiple of three. If a codon can not be recognized (e.g AGN) it is ignored.
```
>YAL001C
TTGGTTCTGACTCATTAGCCAGACGAACTGGTTCAA
CATGTTTCTGACATTCATTCTAACATTGGCATTCAT
ACTCTGAACCAACTGTAAGACCATTCTGGCATTTAG
>YAL002W
TTGGAACAAAACGGCCTGGACCACGACTCACGCTCT
TCACATGACACTACTCATAACGACACTCAAATTACT
TTCCTGGAATTCCGCTCTTAGACTCAACTGTCAGAA
```
### Empirical expression
Empirical expression or gene specific parameters are provided in a csv file format.
The first line is expected to be a header describing each column.
The first column is expected be the gene id, and every additional column is expected to be represent a measurement.
Each row corresponds to one gene and contains all measurements for that gene, including missing values.
```
ORF,DATA_1,DATA_2,...DATA_N
YAL001C,0.254,0.489,...,0.156
YAL002W,1.856,1.357,...,2.014
YAL003W,10.45,NA,...,9.564
YAL005C,0.556,0.957,...,0.758
```
## Estimating Nonsense Error Probabilities from codon data.
NOTE: The model described here is unpublished and currently in beta testing. Please use with caution.
A common observation is tendency for inefficient codons to be more frequent at the 5'-ends of transcripts. One explanation for this is selection against ribosome drop-off, or nonsense errors, during translation. It is expected that nonsense errors are less costly towards the beginning of translation. AnaCoDa contains a model, the First Order Nonsense Error (FONSE) model, for estimating codon-specific nonsense error probabilities from codon count data. This models functionality is very similar to ROC. The primary difference for fitting a FONSE model instead of a ROC model to genomic data is specifying FONSE when initializing the Parameter and Model objects.
```{r, echo = TRUE, eval = FALSE}
parameter <- initializeParameterObject(genome = genome, sphi = 1, num.mixtures = 1, geneAssignment = rep(1, length(genome)),model="FONSE",init.initiation.cost=a1)
model <- initializeModelObject(parameter,"FONSE")
```
FONSE also estimates the indirect cost of translation initiation. This value can be initialized with the parameter object with init.initiation.cost. After the MCMC has run, estimates for the initiation cost can be pulled from the trace object.
```{r, echo = TRUE, eval = FALSE}
trace <- parameter$getTraceObject()
plot(trace,what="InitiationCost")
trace_a1 <- trace$getInitiationCostTrace()
mean_a1 <- mean(trace_a1)
sd_a1 <- sd(trace_a1)
ci_a1 <- quantile(trace_a1,probs = c(0.025,0.975))
```
### Application of AnaCoDa to Ribo-Seq data
NOTE: The models described here are unpublished and currently in beta testing. Please use with caution.
Ribosome footprinting, or Ribo-Seq, is a relatively new technology which has allowed researchers to explore the translatome (mRNA being actively translated in the cell).
The current version of AnaCoDa contains two models for estimating parameters from RFP data: The Pausing Model (PA) and the Pausing and Nonsense Error (PANSE) model.
Although provided with the initial release of AnaCoDa, these models were still in beta testing.
Unlike ROC and FONSE, the Genome object for PA/PANSE takes in Ribosome Foot-Printing (RFP) counts in a csv file format.
The first line is expected to be a header describing each column.
The columns are expected in the following order gene id, position, codon, rfpcount.
Each row corresponds to a single codon with an associated number of ribosome footprints.
```
GeneID,Position,Codon,rfpCount
YBR177C, 0, ATA, 8
YBR177C, 1, CGG, 1
YBR177C, 2, GTT, 8
YBR177C, 3, CGC, 1
```
```{r,echo=TRUE,eval=FALSE}
genome.pa <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=FALSE)
genome.panse <- initializeGenomeObject("rfp.csv",fasta=FALSE,positional=TRUE)
```
## Analyzing and Visualizating results
### Parameter estimates
After we have completed the model fitting, we are interested in the results.
AnaCoDa provides functions to obtain the posterior estimate for each parameter.
For gene-set specific parameters or codon specific parameters we can use the function `getCSPEstimates`.
Again we can specify for which mixture we would like the posterior estimate and how many samples should be used.
`getCSPEstimates` has an optional argument `filename` which will cause the routine to write the result as a csv file instead of returning a `data.frame`.
```{r, echo = TRUE, eval = FALSE}
csp_mat <- getCSPEstimates(parameter = parameter, CSP="Mutation", mixture = 1, samples = 1000)
head(csp_mat)
# AA Codon Posterior 0.025% 0.975%
#1 A GCA -0.2435340 -0.2720696 -0.2165220
#2 A GCC 0.4235546 0.4049132 0.4420680
#3 A GCG 0.7004484 0.6648690 0.7351707
#4 C TGC 0.2016298 0.1679025 0.2387024
#5 D GAC 0.5775052 0.5618199 0.5936979
#6 E GAA -0.4524295 -0.4688044 -0.4356677
getCSPEstimates(parameter = parameter, filename = "mutation.csv", CSP="Mutation", mixture = 1, samples = 1000)
```
To obtain posterior estimates for the gene specific parameters, we can use the function `getExpressionEstimatesForMixture`.
In the case below we ask to get the gene specific parameters for all genes, and under the assumption each gene is assigned to mixture 1.
```{r, echo = TRUE, eval = FALSE}
phi_mat <- getExpressionEstimates(parameter = parameter,
gene.index = 1:length(genome),
samples = 1000)
head(phi_mat)
# Mean Mean.log10 Std.Dev log10.Std.Dev 0.025 0.975 log10.0.025 log10.0.975
#[1,] 0.2729446 -0.6188447 0.0001261525 2.362358e-04 0.07331819 0.5455295 -1.13478830 -0.26319141
#[2,] 1.4221716 0.1498953 0.0001669425 5.194123e-05 1.09593642 1.7562065 0.03978491 0.24457557
#[3,] 0.7459888 -0.1512764 0.0002313539 1.529267e-04 0.31559618 1.2198282 -0.50086958 0.08629407
#[4,] 0.6573082 -0.2030291 0.0001935466 1.400333e-04 0.31591233 1.0699855 -0.50043989 0.02937787
#[5,] 1.6316901 0.2098120 0.0001846631 4.986347e-05 1.28410352 2.0035207 0.10860000 0.30179215
#[6,] 0.6179711 -0.2286806 0.0001744928 1.374863e-04 0.28478950 0.9683327 -0.54550116 -0.01397541
```
However we can decide to only obtain certain gene parameters. in the first case we sample 100 random genes.
```{r, echo = TRUE, eval = FALSE}
# sampling 100 genes at random
phi_mat <- getExpressionEstimates(parameter = parameter,
gene.index = sample(1:length(genome), 100),
samples = 1000)
```
Furthermore, AnaCoDa allows to calculate the selection coefficient $s$ for each codon and each gene.
We can use the function `getSelectionCoefficients` to do so. Please note, that this function returns the $log(s)$.
`getSelectionCoefficients` returns a matrix with $log(s)$ relative to the most efficient synonymous codon.
```{r, echo = TRUE, eval = FALSE}
selection.coefficients <- getSelectionCoefficients(genome = genome,
parameter = parameter,
samples = 1000)
head(selection.coefficients)
# GCA GCC GCG GCT TGC TGT GAC GAT ...
#SAKL0A00132g -0.1630284 -0.008695144 -0.2097771 0 -0.1014373 0 0 -0.05092397 ...
#SAKL0A00154g -0.8494558 -0.045305847 -1.0930388 0 -0.5285367 0 0 -0.26533820 ...
#SAKL0A00176g -0.4455753 -0.023764823 -0.5733448 0 -0.2772397 0 0 -0.13918105 ...
#SAKL0A00198g -0.3926068 -0.020939740 -0.5051875 0 -0.2442824 0 0 -0.12263567 ...
#SAKL0A00220g -0.9746002 -0.051980440 -1.2540685 0 -0.6064022 0 0 -0.30442861 ...
#SAKL0A00242g -0.3691110 -0.019686586 -0.4749542 0 -0.2296631 0 0 -0.11529644 ...
```
We can compare these values to the weights from the codon adaptatoin index (CAI) [@SharpLi1987] or effective number of codons (Nc) [@Wright1990] by using the functions
`getCAIweights` and `getNcAA`.
```{r, echo = TRUE, eval = FALSE}
cai.weights <- getCAIweights(referenceGenome = genome)
head(cai.weights)
# GCA GCC GCG GCT TGC TGT
#0.7251276 0.6282192 0.2497737 1.0000000 0.6222628 1.0000000
nc.per.aa <- getNcAA(genome = genome)
head(nc.per.aa)
# A C D E F G ...
#SAKL0A00132g 3.611111 1.000000 2.200000 2.142857 1.792453 4.109589 ...
#SAKL0A00154g 1.843866 2.500000 2.035782 1.942505 1.986595 2.752660 ...
#SAKL0A00176g 5.142857 NA 1.857143 1.652174 1.551724 3.122449 ...
#SAKL0A00198g 3.800000 NA 1.924779 1.913043 2.129032 4.136364 ...
#SAKL0A00220g 3.198529 1.666667 1.741573 1.756757 2.000000 1.371638 ...
#SAKL0A00242g 4.500000 NA 2.095890 2.000000 1.408163 3.734043 ...
```
We can compare the distribution of selection coefficients to the CAI values estimated from a reference set of genes.
```{r, echo = TRUE, eval = FALSE}
selection.coefficients <- getSelectionCoefficients(genome = genome,
parameter = parameter,
samples = 1000)
s <- exp(selection.coefficients)
cai.weights <- getCAIweights(referenceGenome = ref.genome)
codon.names <- colnames(s)
h <- hist(s[, 1], plot = F)
plot(NULL, NULL, axes = F, xlim = c(0,1), ylim = range(c(0,h$counts)),
xlab = "s", ylab = "Frequency", main = codon.names[1], cex.lab = 1.2)
lines(x = h$breaks, y = c(0,h$counts), type = "S", lwd=2)
abline(v = cai.weights[1], lwd=2, lty=2)
axis(1, lwd = 3, cex.axis = 1.2)
axis(2, lwd = 3, cex.axis = 1.2)
```

### Diagnostic plots
A first step after every run should be to determine if the sampling routine has converged.
To do that, AnaCoDa provides plotting routines to visualize all sampled parameter traces from which the posterior sample is obtained.
First we have to obtain the trace object stored within our parameter object.
Now we can just plot the trace object. The argument 'what' specifies which type of parameter should be plotted.
Here we plot the selection parameter $\Delta \eta$ of the ROC model. These parameters are mixture specific and one can decide which mixture set to visualize using the argument `mixture`.
```{r, echo = TRUE, eval = FALSE}
trace <- getTrace(parameter)
plot(x = trace, what = "Mutation", mixture = 1)
```

A special case is the plotting of traces of the protein synthesis rate $\phi$. As the number of traces for the different $phi$ traces is usually in the thousands, a `geneIndex` has to be passed to determine for which gene the trace should be plotted.
This allows to inspect the trace of every gene under every mixture assignment.
```{r, echo = TRUE, eval = FALSE}
trace <- parameter$getTraceObject()
plot(x = trace, what = "Expression", mixture = 1, geneIndex = 669)
```

We can find the likelihood and posterior trace of the model fit in the mcmc object.
The trace can be plotted by just passing the `mcmc` object to the `plot` routine.
Again we can switch between log(likelihood) and log(posterior) using the argument `what`.
The argument `zoom.window` is used to inspect a specified window in more detail. It defaults to the last 10% of the trace.
The log(posterior) displayed in the figure title is also estimated over the `zoom.window`.
```{r, echo = TRUE, eval = FALSE}
plot(mcmc, what = "LogPosterior", zoom.window = c(9000, 10000))
```

### Model visualization
We can visualize the results of the model fit by plotting the model object.
For this we require the model and the genome object.
We can adjust which mixture set we would like to visualize and how many samples should be used to obtain the posterior estimate for each parameter. For more details see [@gilchrist2015].
```{r, echo = TRUE, eval = FALSE}
# use the last 500 samples from mixture 1 for posterior estimate.
plot(x = model, genome = genome, samples = 500, mixture = 1)
```

As AnaCoDa is designed with the idea to allow gene-sets to have independent gene-set specific parameters, AnaCoDa also provides the option to compare different gene-sets by plotting the `parameter` object.
Here we compare the selection parameter estimated by ROC for seven yeast species.
```{r, echo = TRUE, eval = FALSE}
plot(parameter, what = "Selection", samples = 500)
```

# References
| /scratch/gouwar.j/cran-all/cranData/AnaCoDa/vignettes/anacoda.Rmd |
#' @title database_metazoan_creation
#'
#' @description Create a Database for metazoan kingdom for Global analysis by Taxon_MWU analysis from targeted analysis. Please, run setwd("02_Global_analysis") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, the database will create itself.
#'
#' @return A data frame file named database_metazoan_package_all.tab created from the taxonomy_all_metazoan_QIIME2_and_NCBI_format.txt file and your own taxonomy_RepSeq.tsv file.
#' database_metazoan_creation()
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' # It is important not to write anything between the brackets, the database will create itself.
#' \dontrun{database_metazoan_creation()}
#' # Please, run setwd("02_Global_analysis") after this function.
#' @export
database_metazoan_creation <- function (nothing)
{
oldwd <- getwd()
on.exit(setwd(oldwd))
# Created sub directory "Targeted_analysis" if not already exist
if (!dir.exists("02_Global_analysis")) {dir.create("02_Global_analysis")}
setwd("02_Global_analysis")
file.copy(from = file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")), to ="taxonomy_RepSeq.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")))
all_taxon <- read.table(file.path(original_dir, paste("Working_scripts/taxonomy_all_metazoan_QIIME2_and_NCBI_format.txt", sep="")), sep="\t")
#all_taxon <- read.table(system.file("extdata", "taxonomy_all_metazoan_QIIME2_and_NCBI_format.txt", package="Anaconda"), sep="\t")
taxo <- read.table("taxonomy_RepSeq.tsv", sep="\t", header=T)
taxo_database <- merge(taxo,all_taxon, by.x="Taxon", by.y="V2", all.x=T)
# Delete the duplicated rows that didn't have assigned if the duplicated ID have assignation
i <- taxo_database$V3!=""
taxo_database <- taxo_database[i | !taxo_database$Feature.ID %in% taxo_database$Feature.ID[i],]
#taxo_database_df <- taxo_database[-which(duplicated(taxo_database)), ]
taxon_Annotations <- data.frame(taxo_database$Feature.ID, taxo_database$V3)
write.table(taxon_Annotations, file="database_metazoan_package_all.tab", sep="\t", col.names = F, row.names = F, quote = F)
}
#' @title database_bacteria_creation
#'
#' @description Create a Database for Bacteria kingdom for Global analysis by Taxon_MWU analysis from targeted analysis. Please, run setwd("02_Global_analysis") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, the database will create itself.
#'
#' @return A data frame file named database_bacteria_package_all.tab created from the taxonomy_all_bacteria_QIIME2_and_NCBI_format.txt file and your own taxonomy_RepSeq.tsv file.
#' database_bacteria_creation()
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' # It is important not to write anything between the brackets, the database will create itself.
#' \dontrun{database_bacteria_creation()}
#' # Please, run setwd("02_Global_analysis") after this function.
#' @export
database_bacteria_creation <- function (nothing)
{
oldwd <- getwd()
on.exit(setwd(oldwd))
# Created sub directory "Targeted_analysis" if not already exist
if (!dir.exists("02_Global_analysis")) {dir.create("02_Global_analysis")}
setwd("02_Global_analysis")
file.copy(from = file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")), to ="taxonomy_RepSeq.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")))
all_taxon <- read.table(file.path(original_dir, paste("Working_scripts/taxonomy_all_bacteria_QIIME2_and_NCBI_format.txt", sep="")), sep="\t")
#all_taxon <- read.table(system.file("extdata", "taxonomy_all_bacteria_QIIME2_and_NCBI_format.txt", package="Anaconda"), sep="\t")
taxo <- read.table("taxonomy_RepSeq.tsv", sep="\t", header=T)
taxo_database <- merge(taxo,all_taxon, by.x="Taxon", by.y="V2", all.x=T)
# Delete the duplicated rows that didn't have assigned if the duplicated ID have assignation
i <- taxo_database$V3!=""
taxo_database <- taxo_database[i | !taxo_database$Feature.ID %in% taxo_database$Feature.ID[i],]
#taxo_database_df <- taxo_database[-which(duplicated(taxo_database)), ]
taxon_Annotations <- data.frame(taxo_database$Feature.ID, taxo_database$V3)
write.table(taxon_Annotations, file="database_bacteria_package_all.tab", sep="\t", col.names = F, row.names = F, quote = F)
}
#' @title get_bactotraits_targeted
#'
#' @description Obtain Bacterial Traits for Bacteria kingdom for targeted analysis
#'
#' @param x Object from the Differential ASV abundance (DASVA) analysis
#'
#' @return A data frame file with Bacterial Traits informations for Bacteria kingdom for targeted analysis from the Differential ASV abundance (DASVA) analysis
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{get_bactotraits_targeted(res_forest_vs_long_fallow)}
#' @export
get_bactotraits_targeted <- function(x) {
#file.copy(from = file.path(original_dir, paste("Working_scripts/BactoTraits_database.txt", sep="")), to ="BactoTraits_database.txt",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "BactoTraits_database.txt", package="Anaconda"), to ="BactoTraits_database.txt",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
BactoTraits <- read.table("BactoTraits_database.txt", sep="\t", header=T)
res_taxo <- merge(data.frame(x), taxo, by.x="row.names", by.y="Feature.ID")
res_taxo_DT <- res_taxo
res_taxo_02 <- setDT(res_taxo_DT)[, paste0("Taxon", 1:7) := tstrsplit(Taxon, ";")]
res_taxo_03 <- data.frame(res_taxo_02$Taxon6, res_taxo_02$Row.names)
colnames(res_taxo_03) <- c("Genus", "Row.names")
res_taxo_03$Genus <- sub(" g__", "", res_taxo_03$Genus)
BT <- data.frame(BactoTraits$Genus, BactoTraits$Heterotroph, BactoTraits$Autotroph, BactoTraits$Organotroph, BactoTraits$Lithotroph, BactoTraits$Chemotroph, BactoTraits$Phototroph, BactoTraits$Copiotroph_Diazotroph, BactoTraits$Methylotroph, BactoTraits$Oligotroph)
colnames(BT) <- c("Genus", "Heterotroph", "Autotroph", "Organotroph", "Lithotroph", "Chemotroph", "Phototroph", "Copiotroph_Diazotroph", "Methylotroph", "Oligotroph")
BT_link <- merge(res_taxo_03, BT, by.x="Genus", by.y="Genus", all.x=T)
res_taxo_final <- merge(res_taxo, BT_link, by.x="Row.names", by.y="Row.names")
unlink("BactoTraits_database.txt")
return(res_taxo_final)
}
#' @title get_funguilds_targeted
#'
#' @description Obtain Fungi Guilds for Fungi kingdom for targeted analysis
#'
#' @param x Object from the funguild_input_targeted() output.
#'
#' @return A data frame file with Fungi Guilds informations for Fungi kingdom for targeted analysis from the Differential ASV abundance (DASVA) analysis
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{get_funguilds_targeted(res_forest_vs_long_fallow_guilds)}
#' @export
get_funguilds_targeted <- function(x) {
# file.copy(from = file.path(original_dir, paste("Working_scripts/Guilds_v1.1.py", sep="")), to ="Guilds_v1.1.py",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "Guilds_v1.1.py", package="Anaconda"), to ="Guilds_v1.1.py",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
name = deparse(substitute(x))
write.table(x, file=paste0("input_", name,"_targeted.txt", sep=""), sep="\t", row.names = F)
latin = readLines(paste0("input_", name,"_targeted.txt", sep=""),-1)
latin[1] = "ID\ttaxonomy"
writeLines(latin,paste0("input_", name,"_targeted.txt", sep=""))
# to deal with https://stackoverflow.com/questions/17309288/importerror-no-module-named-requests
# if
#FunGuild v1.1 Beta
#Traceback (most recent call last):
# File "Guilds_v1.1.py", line 119, in <module>
# import requests
#ImportError: No module named requests
system('python -m pip install --user requests')
# https://stackoverflow.com/questions/41638558/how-to-call-python-script-from-r-with-arguments
#system('python Guilds_v1.1.py -otu taxon_list_drawer_input.txt -m -u')
system(paste0('python Guilds_v1.1.py -otu input_',name,'_targeted.txt -m -u', sep=""))
## CLEAN taxon_list_drawer_input.guilds.txt
# dat <- read.table(paste0('python Guilds_v1.1.py -otu input_',name,'_targeted.guilds.txt -m -u', sep=""), sep="\t", header=T)
# https://stackoverflow.com/questions/26289681/r-regex-find-last-occurrence-of-delimiter
# funguilds <- data.frame(sub(".*[__]", "", dat$taxonomy,), dat$Guild, dat$Trophic.Mode,stringsAsFactors = FALSE)
# colnames(funguilds) <- c("Taxon", "Guild", "Trophic_Mode")
delfiles <- dir(path=targeted_analysis_dir ,pattern="*matched.txt")
file.remove(file.path(targeted_analysis_dir, delfiles))
Files <- list.files(pattern="*.guilds.txt")
newName <- sub("input_", "Table_02_03_", Files)
newName_02 <- sub("guilds.", "", newName)
file.rename(Files, newName_02)
unlink("Guilds_v1.1.py")
}
#' @title funguild_input_targeted
#'
#' @description Prepare Object for Fungi Guilds for Fungi kingdom for targeted analysis
#'
#' @param x Object from the Differential ASV abundance (DASVA) analysis
#'
#' @return An Object used for Fungi Guilds informations for Fungi kingdom for targeted analysis from the Differential ASV abundance (DASVA) analysis
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{get_funguilds_targeted(res_forest_vs_long_fallow_guilds)}
#' @export
funguild_input_targeted <- function (x)
{
taxon_for_guild <- data.frame(merge(data.frame(x), taxo, by.x="row.names", by.y="Feature.ID"))$Taxon
taxon_for_guild_02 <- data.frame(rep(1:length(taxon_for_guild), 1), taxon_for_guild )
colnames(taxon_for_guild_02) <- c("ID", "taxonomy")
return(taxon_for_guild_02)
}
#' @title input_global_analysis
#'
#' @description Input files creation for each condition for Global analysis by Taxon_MWU analysis from targeted analysis (I)
#'
#' @param x Object from the Differential ASV abundance (DASVA) analysis
#'
#' @return Input Object for Global analysis by Taxon_MWU analysis from targeted analysis (I)
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{input_global_analysis(res_forest_vs_long_fallow)}
#' @export
input_global_analysis <- function (x)
{
pre_input <- format_input(x)
taxo_repseq <- read.table("taxonomy_RepSeq.tsv", sep="\t", header=T)
pre_input_all <- merge(pre_input, taxo_repseq, by.x="ASV_ID", by.y="Feature.ID", all.y=T)
pre_input_all$Taxon <- NULL
pre_input_all$Confidence <- NULL
pre_input_all$logP[is.na(pre_input_all$logP)] <- 0
return(pre_input_all)
}
#' @title database_fungi_creation_RepSeq
#'
#' @description Create a Database for Fungi kingdom for Global analysis by Taxon_MWU analysis from targeted analysis. Please, run setwd("02_Global_analysis") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, the database will create itself.
#'
#' @return A data frame file named database_fungi_package_all.tab created from the taxonomy_all_bacteria_QIIME2_and_NCBI_format.txt file and your own taxonomy_RepSeq.tsv file.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' # It is important not to write anything between the brackets, the database will create itself.
#' \dontrun{database_fungi_creation_RepSeq()}
#' # Please, run setwd("02_Global_analysis") after this function.
#' @export
database_fungi_creation_RepSeq <- function (nothing)
{
oldwd <- getwd()
on.exit(setwd(oldwd))
# Created sub directory "Targeted_analysis" if not already exist
if (!dir.exists("02_Global_analysis")) {dir.create("02_Global_analysis")}
setwd("02_Global_analysis")
file.copy(from = file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")), to ="taxonomy_RepSeq.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(kingdom, paste("taxonomy_RepSeq.tsv", sep="")))
all_taxon <- read.table(file.path(original_dir, paste("Working_scripts/taxonomy_all_fungi_QIIME2_and_NCBI_format.txt", sep="")), sep="\t")
#all_taxon <- read.table(system.file("extdata", "taxonomy_all_fungi_QIIME2_and_NCBI_format.txt", package="Anaconda"), sep="\t")
taxo <- read.table("taxonomy_RepSeq.tsv", sep="\t", header=T)
taxo_database <- merge(taxo,all_taxon, by.x="Taxon", by.y="V2", all.x=T, all.y=F)
taxo_database_df <- taxo_database[-which(duplicated(taxo_database)), ]
taxon_Annotations <- data.frame(taxo_database_df$Feature.ID, taxo_database_df$V3)
write.table(taxon_Annotations, file="database_fungi_package_all.tab", sep="\t", col.names = F, row.names = F, quote = F)
}
#' @title database_fungi_creation
#'
#' @description Create a Database for Fungi kingdom for Global analysis by Taxon_MWU analysis from targeted analysis only from rarefied ASVs. Please, run setwd("02_Global_analysis") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, the database will create itself.
#'
#' @return A data frame file named database_fungi_package_all.tab created from the taxonomy_all_bacteria_QIIME2_and_NCBI_format.txt file and your own taxonomy.tsv file.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' # It is important not to write anything between the brackets, the database will create itself.
#' \dontrun{database_fungi_creation()}
#' # Please, run setwd("02_Global_analysis") after this function.
#' @export
database_fungi_creation <- function (nothing)
{
oldwd <- getwd()
on.exit(setwd(oldwd))
# Created sub directory "Targeted_analysis" if not already exist
if (!dir.exists("02_Global_analysis")) {dir.create("02_Global_analysis")}
setwd("02_Global_analysis")
file.copy(from = file.path(kingdom, paste("taxonomy.tsv", sep="")), to ="taxonomy.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(kingdom, paste("taxonomy.tsv", sep="")))
#all_taxon <- read.table(file.path(original_dir, paste("Working_scripts/taxonomy_all_fungi_QIIME2_and_NCBI_format.txt", sep="")), sep="\t")
all_taxon <- read.table(system.file("extdata", "taxonomy_all_fungi_QIIME2_and_NCBI_format.txt", package="Anaconda"), sep="\t")
taxo <- read.table("taxonomy.tsv", sep="\t", header=T)
taxo_database <- merge(taxo,all_taxon, by.x="Taxon", by.y="V2", all.x=T, all.y=F)
taxo_database_df <- taxo_database[-which(duplicated(taxo_database)), ]
taxon_Annotations <- data.frame(taxo_database_df$Feature.ID, taxo_database_df$V3)
write.table(taxon_Annotations, file="database_fungi_package_all.tab", sep="\t", col.names = F, row.names = F, quote = F)
}
#' @title target_file
#'
#' @description Imports conditions information from your SampleSheet_comparison.txt file, with focus on iput files.
#'
#' @param nothing It's important not to write anything between the brackets, comparisons will create themselves.
#'
#' @return a data.frame with conditions information from your SampleSheet_comparison.txt file
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{target_file <- target_file()}
#' @export
target_file <- function (nothing)
{
sampleSheet="SampleSheet_comparison.txt"
samplesInfo <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
target_file <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
colnames(target_file) <- c("label","files","condition") ; head(target_file) ; dim(target_file)
#threshold=1 # minimum of ASVs value across samples
return(target_file)
}
#' @title samplesInfo
#'
#' @description Imports conditions information from your SampleSheet_comparison.txt file, with focus on samplesInfo.
#'
#' @param nothing It's important not to write anything between the brackets, comparisons will create themselves.
#'
#' @return a data.frame with conditions information from your SampleSheet_comparison.txt file, with focus on samplesInfo.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{samplesInfo <- samplesInfo()}
#' @export
samplesInfo <- function (nothing)
{
sampleSheet="SampleSheet_comparison.txt"
samplesInfo <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
target_file <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
colnames(target_file) <- c("label","files","condition") ; head(target_file) ; dim(target_file)
#threshold=1 # minimum of ASVs value across samples
return(samplesInfo)
}
#' @title move_files
#'
#' @description Move the file in the good folders. Depending on the previous Kingdom selection (e.g., Fungi 'Fungi()', Bacteria 'Bacteria()', etc.)
#'
#' @param nothing It's important not to write anything between the brackets, files will move in the good folders, depending of the selected Kingdom before.
#'
#' @return Move the file in the good folders.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{move_files()}
#' @export
move_files <- function (nothing)
{
file.copy(from = file.path(original_dir, paste("taxonomy.tsv", sep="")), to ="taxonomy.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(original_dir, paste("taxonomy.tsv", sep="")))
file.copy(from = file.path(original_dir, paste("taxonomy_RepSeq.tsv", sep="")), to ="taxonomy_RepSeq.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(original_dir, paste("taxonomy_RepSeq.tsv", sep="")))
#file.copy(from = file.path(original_dir, paste("SampleSheet_comparison.txt", sep="")), to ="SampleSheet_comparison.txt",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
#file.remove(file.path(original_dir, paste("SampleSheet_comparison.txt", sep="")))
file.copy(from = file.path(original_dir, paste("ASV.tsv", sep="")), to ="ASV.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(original_dir, paste("ASV.tsv", sep="")))
}
#' @title Bacteria
#'
#' @description This function create a new folder named Bacteria and set your working directory into this folder. Please, run setwd("Bacteria") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, a new folder named Bacteria will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#'
#' @return A new folder named Bacteria will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{Bacteria()}
#' # Please, run setwd("Bacteria") after this function.
#' @export
Bacteria <- function (nothing)
{
if (!dir.exists("Bacteria")) {dir.create("Bacteria")}
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd("Bacteria")
Bacteria= getwd()
}
#' @title Fungi
#'
#' @description This function create a new folder named Fungi and set your working directory into this folder. Please, run setwd("Fungi") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, a new folder named Fungi will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#'
#' @return A new folder named Fungi will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{Fungi()}
#' # plaese, run setwd("Fungi") after this function.
#' @export
Fungi <- function (nothing)
{
if (!dir.exists("Fungi")) {dir.create("Fungi")}
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd("Fungi")
Fungi= getwd()
}
#' @title Metazoan
#'
#' @description This function create a new folder named Metazoan and set your working directory into this folder. Please, run setwd("Metazoan") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, a new folder named Metazoan will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#'
#' @return A new folder named Metazoan will be created and your working directory will be set into this folder, depending of the selected Kingdom.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{Metazoan()}
#' # plaese, run setwd("Metazoan") after this function.
#' @export
Metazoan <- function (nothing)
{
if (!dir.exists("Metazoan")) {dir.create("Metazoan")}
oldwd <- getwd()
on.exit(setwd(oldwd))
setwd("Metazoan")
Metazoan= getwd()
}
#' @title format_input
#'
#' @description Apply logP on both positive and negative ASVs FC
#'
#' @param x Object from the Differential ASV abundance (DASVA) analysis
#'
#' @return an input for the input_global_analysis() function
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{format_input(x)}
#' @export
format_input <- function(x)
{
dat <- data.frame(x)
dat$ASV_ID <- rownames(dat)
dat_x <- merge(taxo,dat, by.x="Feature.ID", by.y="ASV_ID", all.x=F)
dat_x[is.na(dat_x)] <- 0
dat <- dat_x
dat_negativ <- dat[ which(dat$log2FoldChange < 0), ]
dat_negativ_02 <- data.frame(dat_negativ$Feature.ID, log10(dat_negativ$padj))
colnames(dat_negativ_02) <- c("ASV_ID", "logP")
dat_positiv <- dat[ which(dat$log2FoldChange >= 0), ]
dat_positiv_02 <- data.frame(dat_positiv$Feature.ID, -log10(dat_positiv$padj))
colnames(dat_positiv_02) <- c("ASV_ID", "logP")
input <- rbind(dat_negativ_02, dat_positiv_02)
is.na(input)<-sapply(input, is.infinite)
input[is.na(input)]<-0
return(input)
}
#' @title heatmap_samples_hclust
#'
#' @description Adapt hclust for heatmap sample to sample analysis
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return hclust object for the heatmap.2() function
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{hc <- heatmap_samples_hclust()}
#' @export
heatmap_samples_hclust <- function (nothing)
{
sampleFiles <- grep("input_",list.files(targeted_analysis_dir),value=TRUE)
sampleCondition <- samplesInfo$V3
sampleTable <- data.frame(sampleName = sampleFiles, fileName = sampleFiles)
sampleTable <- data.frame(sampleName = sampleFiles,
fileName = sampleFiles,
condition = sampleCondition)
dasva_raw <- dasva_raw_input(sampleTable = sampleTable,
directory = targeted_analysis_dir,
design= ~ condition)
#Trims too low represented ASVs
dasva_raw <- dasva_raw[ rowSums(counts(dasva_raw)) > threshold, ] ; dasva_raw
rld <- rlog(dasva_raw, blind=FALSE)
distsRL <- dist(t(assay(rld)))
hc <- hclust(distsRL)
return(hc)
}
#' @title heatmap_samples_matrix
#'
#' @description Adapt samples matrix for heatmap sample to sample analysis
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return samples matrix object for the heatmap.2() function
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{mat <- heatmap_samples_matrix()}
#' @export
heatmap_samples_matrix <- function (nothing)
{
sampleFiles <- grep("input_",list.files(targeted_analysis_dir),value=TRUE)
sampleCondition <- samplesInfo$V3
sampleTable <- data.frame(sampleName = sampleFiles, fileName = sampleFiles)
sampleTable <- data.frame(sampleName = sampleFiles,
fileName = sampleFiles,
condition = sampleCondition)
dasva_raw <- dasva_raw_input(sampleTable = sampleTable,
directory = targeted_analysis_dir,
design= ~ condition)
#Trims too low represented ASVs
dasva_raw <- dasva_raw[ rowSums(counts(dasva_raw)) > threshold, ] ; dasva_raw
rld <- rlog(dasva_raw, blind=FALSE)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
# rownames(mat) <- colnames(mat) <- with(colData(dasva), paste(Id, condition , sep=' : '))
rownames(mat) <- gsub(".txt", "", rownames(mat))
rownames(mat) <- gsub("input_", "", rownames(mat))
colnames(mat) <- gsub(".txt", "", colnames(mat))
colnames(mat) <- gsub("input_", "", colnames(mat))
return(mat)
}
#' @title heatmap_taxo
#'
#' @description Adding taxonomy in the pheatmap plot, instead of ASVs codes
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return log2.norm.counts_taxo used fro adding taxonomy in the pheatmap plot, instead of ASVs codes
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{log2.norm.counts_taxo <- heatmap_taxo()}
#' @export
heatmap_taxo <- function (nothing)
{
colnames(log2.norm.counts) <- row.names(heatmap_condition_df)
dat <- merge(log2.norm.counts, taxo, by.x="row.names", by.y="Feature.ID", sort = TRUE)
dat$ASV_code <- NULL
dat$Confidence <- NULL
dat$NCBITaxon <- NULL
dat$Taxon.1 <- NULL
dat$Row.names <- NULL
dat$length_taxon <- 1:length(dat$Taxon)
dat$length_taxon <- as.numeric(dat$length_taxon)
# In order to deal with duplicated taxon name
row.names(dat) <- c(paste(dat$Taxon, dat$length_taxon))
dat$length_taxon <- NULL
dat$Taxon <- NULL
return(dat)
}
#' @title heatmap_condition
#'
#' @description For Clustering step. Fill directly the annotation_col variable of the pheatmap() function
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return Fill directly the annotation_col variable of the pheatmap() function
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @export
heatmap_condition <- function (nothing)
{
df <- as.data.frame(colData(dasva)["condition"])
row.names(df) <- gsub('input_', '', row.names(df))
row.names(df) <- gsub('.txt', '', row.names(df))
df <- data.matrix(df, rownames.force=NA)
return(df)
}
#' @title heatmap_data_dasva
#'
#' @description For Clustering step. Create the log2.norm.counts object.
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return Create the log2.norm.counts object.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @export
heatmap_data_dasva <- function (nothing)
{
#Selects only most abundant ASVs (here 75)
nCounts <- counts(dasva, normalized=TRUE)
select <- order(rowMeans(nCounts),decreasing=TRUE)[1:75]
#Selects corresponding norm counts
nt <- normTransform(dasva)
log2.norm.counts <- assay(nt)[select,]
#Gets the metadata
log2.norm.counts <- data.frame(log2.norm.counts)
names(log2.norm.counts) <- gsub("input_", "", names(log2.norm.counts))
names(log2.norm.counts) <- gsub(".txt", "", names(log2.norm.counts))
colnames(log2.norm.counts) <- colData(dasva)$Id
return(log2.norm.counts)
}
#' @title PCA_data_dasva
#'
#' @description Compute the PCA (Pincipal Component Analysis) data.
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return data. The PCA (Pincipal Component Analysis) data.
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @export
PCA_data_dasva <- function (nothing)
{
sampleSheet="SampleSheet_comparison.txt"
samplesInfo <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
target_file <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
colnames(target_file) <- c("label","files","condition") ; head(target_file) ; dim(target_file)
#threshold=1 # minimum of ASVs value across samples
sampleFiles <- grep("input_",list.files(targeted_analysis_dir),value=TRUE)
sampleCondition <- samplesInfo$V3
sampleTable <- data.frame(sampleName = sampleFiles, fileName = sampleFiles)
sampleTable <- data.frame(sampleName = sampleFiles,
fileName = sampleFiles,
condition = sampleCondition)
dasva_raw <- dasva_raw_input(sampleTable = sampleTable,
directory = targeted_analysis_dir,
design= ~ condition)
#Trims too low represented ASVs
dasva_raw <- dasva_raw[ rowSums(counts(dasva_raw)) > threshold, ] ; dasva_raw
rld <- rlog(dasva_raw, blind=FALSE)
distsRL <- dist(t(assay(rld)))
mat <- as.matrix(distsRL)
data <- plotPCA(rld, intgroup=c("condition"), returnData=TRUE)
return(data)
}
#' @title plotSparsityASV
#'
#' @description Create a plot of Sparsity ASV
#'
#' @param x Corresponding to the DASVA (Differential ASV abundance) object
#' @param normalized normalized
#' @param ... ...
#' @return A plot of Sparsity ASV
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{plotSparsityASV(dasva)}
#' @export
plotSparsityASV <- function(x, normalized=TRUE, ...) {
if (is(x, "DESeqDataSet")) {
x <- counts(x, normalized=normalized)
}
rs <- rowSums(x)
rmx <- apply(x, 1, max)
plot(rs[rs > 0], (rmx/rs)[rs > 0], log="x", ylim=c(0,1), xlab="sum of counts per ASV",
ylab="max count / sum", main="Concentration of counts over total sum of counts", ...)
}
#' @title plotDispASVs
#'
#' @description Create a plot of Dispersion ASV
#'
#' @param object Corresponding to the DASVA (Differential ASV abundance) object
#'
#' @param ymin ymin
#' @param CV CV
#' @param genecol genecol
#' @param fitcol fitcol
#' @param finalcol finalcol
#' @param legend legend
#' @param xlab xlab
#' @param ylab ylab
#' @param log log
#' @param cex cex
#' @param ... ...
#' @return A plot of Dispersion ASV
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{plotDispASVs(dasva)}
#' @export
plotDispASVs <- function( object, ymin, CV=FALSE,
genecol = "black", fitcol = "red", finalcol = "dodgerblue",
legend=TRUE, xlab, ylab, log = "xy", cex = 0.45, ... )
{
if (missing(xlab)) xlab <- "mean of normalized counts"
if (missing(ylab)) {
if (CV) {
ylab <- "coefficient of variation"
} else {
ylab <- "dispersion"
}
}
px = mcols(object)$baseMean
sel = (px>0)
px = px[sel]
# transformation of dispersion into CV or not
f <- if (CV) sqrt else I
py = f(mcols(object)$dispGeneEst[sel])
if(missing(ymin))
ymin = 10^floor(log10(min(py[py>0], na.rm=TRUE))-0.1)
plot(px, pmax(py, ymin), xlab=xlab, ylab=ylab,
log=log, pch=ifelse(py<ymin, 6, 20), col=genecol, cex=cex, ... )
# use a circle over outliers
pchOutlier <- ifelse(mcols(object)$dispOutlier[sel],1,16)
cexOutlier <- ifelse(mcols(object)$dispOutlier[sel],2*cex,cex)
lwdOutlier <- ifelse(mcols(object)$dispOutlier[sel],2,1)
if (!is.null(dispersions(object))) {
points(px, f(dispersions(object)[sel]), col=finalcol, cex=cexOutlier,
pch=pchOutlier, lwd=lwdOutlier)
}
if (!is.null(mcols(object)$dispFit)) {
points(px, f(mcols(object)$dispFit[sel]), col=fitcol, cex=cex, pch=16)
}
if (legend) {
legend("bottomright",c("ASVs","fitted","final"),pch=16,
col=c(genecol,fitcol,finalcol),bg="white")
}
}
#' @title get_dasva
#'
#' @description Creates the DASVA object. Fit a Gamma-Poisson Generalized Linear Model, dispersion estimates for Negative Binomial distributed data, "parametric", local" or "mean"
#'
#' @param fitType Fit a Gamma-Poisson Generalized Linear Model, dispersion estimates for Negative Binomial distributed data, "parametric", local" or "mean"
#'
#' @return DASVA object
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{dasva <- get_dasva(fitType="parametric")
#' dasva <- get_dasva(fitType="local")
#' dasva <- get_dasva(fitType="mean")}
#' @export
get_dasva <- function (fitType="")
{
sampleSheet="SampleSheet_comparison.txt"
samplesInfo <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
target_file <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
colnames(target_file) <- c("label","files","condition") ; head(target_file) ; dim(target_file)
#threshold=1 # minimum of ASVs value across samples
sampleFiles <- grep("input_",list.files(targeted_analysis_dir),value=TRUE)
sampleCondition <- samplesInfo$V3
sampleTable <- data.frame(sampleName = sampleFiles, fileName = sampleFiles)
sampleTable <- data.frame(sampleName = sampleFiles,
fileName = sampleFiles,
condition = sampleCondition)
dasva_raw <- dasva_raw_input(sampleTable = sampleTable,
directory = targeted_analysis_dir,
design= ~ condition)
#Trims too low represented ASVs
dasva_raw <- dasva_raw[ rowSums(counts(dasva_raw)) > threshold, ] ; dasva_raw
#dasva object
dasva <- suppressMessages(DESeq(dasva_raw, fitType=fitType))
#dasva <- suppressMessages(DESeq(dasva_raw, fitType="parametric"))
#options(warn=-1)
return(dasva)
}
#' @title dasva_raw_input
#'
#' @description Used in heatmap_samples_hclust(), heatmap_samples_matrix(), PCA_data_dasva() and get_dasva() functions.
#'
#' @param sampleTable Depending of the heatmap_samples_hclust(), heatmap_samples_matrix(), PCA_data_dasva() and get_dasva() functions.
#'
#' @param directory directory
#' @param design design
#' @param ignoreRank ignoreRank
#' @param ... ...
#' @return object
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{dasva_raw <- dasva_raw_input(sampleTable = sampleTable,
#' directory = targeted_analysis_dir,
#' design= ~ condition)}
#' @export
dasva_raw_input <- function (sampleTable, directory = ".", design, ignoreRank = FALSE,
...)
{
if (missing(design))
stop("design is missing")
l <- lapply(as.character(sampleTable[, 2]), function(fn) read.table(file.path(directory,
fn), fill = TRUE))
if (!all(sapply(l, function(a) all(a$V1 == l[[1]]$V1))))
stop("ASV IDs (first column) differ between files.")
tbl <- sapply(l, function(a) a[, ncol(a)])
colnames(tbl) <- sampleTable[, 1]
rownames(tbl) <- l[[1]]$V1
rownames(sampleTable) <- sampleTable[, 1]
oldSpecialNames <- c("no_feature", "ambiguous", "too_low_aQual",
"not_aligned", "alignment_not_unique")
specialRows <- (substr(rownames(tbl), 1, 1) == "_") | rownames(tbl) %in%
oldSpecialNames
tbl <- tbl[!specialRows, , drop = FALSE]
object <- DESeqDataSetFromMatrix(countData = tbl, colData = sampleTable[,
-(1:2), drop = FALSE], design = design, ignoreRank, ...)
return(object)
}
#' @title get_input_files
#'
#' @description Created sub directory "Targeted_analysis" if not already exist. Then, create one file by condition into it, and then upload the taxonomy file. Please, run setwd("01_Targeted_analysis") after this function.
#'
#' @param nothing It's important not to write anything between the brackets, all inputs will be adapted automatically.
#'
#' @return taxo
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxo <- get_input_files()}
#' # please, run setwd("01_Targeted_analysis") after this function.
#' @export
get_input_files <- function (nothing)
{
taxo <- read.table("taxonomy.tsv", sep="\t", header=T)
#taxo$Taxon <- gsub(";", "__", taxo$Taxon)
ASV <- read.table("ASV.tsv", sep="\t", header=T)
##dat_samples <- merge(ASV, taxo, by.x="ASV_ID", by.y="Feature.ID")
##dat_samples$ASV_ID <- NULL
##dat_samples$Confidence <- NULL
# I did this extrat step in order to deal with the Error in estimateSizeFactorsForMatrix --> every ASVs can contains at least one zero and so cannot compute log geometric means
# see https://help.galaxyproject.org/t/error-with-deseq2-every-gene-contains-at-least-one-zero/564
ASV_02 <- ASV
ASV_02$ASV_ID <- NULL
ASV_02 <- ASV_02 + 1
ASV_02$ASV_ID <- ASV$ASV_ID
# https://stackoverflow.com/questions/5620885/how-does-one-reorder-columns-in-a-data-frame
##arrange df vars by position
## vars must be a named vector, e.g. c("var.name""=1)
arrange.vars <- function(data, vars){
##stop if not a data.frame (but should work for matrices as well)
stopifnot(is.data.frame(data))
##sort out inputs
data.nms <- names(data)
var.nr <- length(data.nms)
var.nms <- names(vars)
var.pos <- vars
##sanity checks
stopifnot( !any(duplicated(var.nms)),
!any(duplicated(var.pos)) )
stopifnot( is.character(var.nms),
is.numeric(var.pos) )
stopifnot( all(var.nms %in% data.nms) )
stopifnot( all(var.pos > 0),
all(var.pos <= var.nr) )
##prepare output
out.vec <- character(var.nr)
out.vec[var.pos] <- var.nms
out.vec[-var.pos] <- data.nms[ !(data.nms %in% var.nms) ]
stopifnot( length(out.vec)==var.nr )
##re-arrange vars by position
data <- data[ , out.vec]
return(data)
}
ASV_02 <- arrange.vars(ASV_02, c("ASV_ID"=1))
dat <- ASV_02
oldwd <- getwd()
on.exit(setwd(oldwd))
# Created sub directory "Targeted_analysis" if not already exist
if (!dir.exists("01_Targeted_analysis")) {dir.create("01_Targeted_analysis")}
setwd("01_Targeted_analysis")
file.copy(from = file.path(original_dir, paste("SampleSheet_comparison.txt", sep="")), to ="SampleSheet_comparison.txt",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.remove(file.path(original_dir, paste("SampleSheet_comparison.txt", sep="")))
targeted_analysis_dir= getwd() #path to current directory containing sample sheet and count files from step 04
# Create one file by condition
# https://stackoverflow.com/questions/42532940/how-to-create-multiple-text-files-for-each-column-in-a-dataframe-and-keep-the-fi
for (i in names(dat)) {
raw_file <- paste("input_", i, ".txt", sep = "")
write.table(cbind(dat[1], dat[i]), raw_file, sep = "\t", quote = F, row.names = F, col.names = F, append = F)
}
system("rm input_ASV_ID.txt")
unlink("input_ASV_ID.txt")
sampleSheet="SampleSheet_comparison.txt"
samplesInfo <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
target_file <- read.table(sampleSheet, header=F) ; head(samplesInfo) ; dim(samplesInfo)
colnames(target_file) <- c("label","files","condition") ; head(target_file) ; dim(target_file)
return(taxo)
}
#' @title plotPCA.san
#'
#' @description Custom plotPCA function to plot PC1 et PC3
#'
#' @param object An object use for the PCA
#' @param intgroup intgroup
#' @param ntop ntop
#' @param returnData returnData
#' @return A PCA
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{plotPCA.san(object)}
#' @export
plotPCA.san <- function (object, intgroup = "condition", ntop = 500, returnData = FALSE)
{
rv <- rowVars(assay(object))
select <- order(rv, decreasing = TRUE)[seq_len(min(ntop,
length(rv)))]
pca <- prcomp(t(assay(object)[select, ]))
percentVar <- pca$sdev^2/sum(pca$sdev^2)
if (!all(intgroup %in% names(colData(object)))) {
stop("the argument 'intgroup' should specify columns of colData(dds)")
}
intgroup.df <- as.data.frame(colData(object)[, intgroup, drop = FALSE])
group <- if (length(intgroup) > 1) {
factor(apply(intgroup.df, 1, paste, collapse = " : "))
}
else {
colData(object)[[intgroup]]
}
d <- data.frame(PC1 = pca$x[, 1], PC3 = pca$x[, 3], group = group,
intgroup.df, name = colData(rld)[,1])
if (returnData) {
attr(d, "percentVar") <- percentVar[1:3]
return(d)
}
ggplot(data = d, aes_string(x = "PC1", y = "PC3", color = "group", label = "name")) + geom_point(size = 3) + xlab(paste0("PC1: ", round(percentVar[1] * 100), "% variance")) + ylab(paste0("PC3: ", round(percentVar[3] * 100), "% variance")) + coord_fixed() + geom_text_repel(size=3)
}
#' @title plotMA.dasva
#'
#' @description Custom MA plots for the Differential ASV abundance (DASVA) analysis. defining a new function to plot all ASVs and not only log2FoldChange > 2
#'
#' @param object Object from the Differential ASV abundance (DASVA) analysis
#' @param ... ...
#' @param alpha alpha
#' @param main main
#' @param xlab xlab
#' @param ylim ylim
#' @param MLE MLE
#' @param verbose verbose
#' @return A MA plot
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{plotMA.dasva(rXXX, main="XXX", ylim=c(-20,20))}
#' @export
plotMA.dasva <- function(object, alpha, main="", xlab="mean of normalized counts", ylim, MLE=FALSE, verbose=TRUE, ...) {
if (missing(alpha)) {
alpha <- if (is.null(metadata(object)$alpha)) {
0.1
} else {
metadata(object)$alpha
}
}
df <- if (MLE) {
# test if MLE is there
if (is.null(object$lfcMLE)) {
stop("lfcMLE column is not present: you should first run results() with addMLE=TRUE")
}
data.frame(mean = object$baseMean,
lfc = object$lfcMLE,
isDE = ifelse(is.na(object$padj), FALSE, object$padj < alpha))
} else {
data.frame(mean = object$baseMean,
lfc = object$log2FoldChange,
isDE = ifelse(is.na(object$padj), FALSE, object$padj < alpha))
#isDE = ifelse(object$padj < alpha & abs(object$log2FoldChange) >= 2, TRUE, FALSE))
}
if(verbose){print(dim(df[df$isDE==TRUE,]))}
if (missing(ylim)) {
plotMA(df, main=main, xlab=xlab, ...)
} else {
plotMA(df, main=main, xlab=xlab, ylim=ylim, ...)
}
}
#' @title clusteringGOs
#'
#' @description clusteringGOs from DESeq2 analysis pipeline
#'
#' @param gen2go from DESeq2 analysis pipeline
#'
#' @param div div
#' @param cutHeight cutHeight
#' @return a clustering GO
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{clusteringGOs()}
#' @export
clusteringGOs=function(gen2go,div,cutHeight) {
inname=paste("dissim0_",div,"_",gen2go,sep="")
diss=read.table(inname,sep="\t",header=T,check.names=F)
row.names(diss)=names(diss)
hc=hclust(as.dist(diss),method="complete")
cc=cutree(hc,h=cutHeight)
outname=paste("cl_",inname,sep="")
write.csv(cc,file=outname,quote=F)
}
#' @title mwuTest
#'
#' @description Mann-Whitney U Test from RBGOA
#'
#' @param gotable from gomwuStats from RBGOA
#' @param Alternative from gomwuStats from RBGOA
#'
#' @return mwuTest
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{mwuTest()}
#' @export
mwuTest=function(gotable,Alternative) {
gos=gotable
terms=levels(gos$term)
gos$seq=as.character(gos$seq)
nrg=gos[!duplicated(gos$seq),5]
names(nrg)=gos[!duplicated(gos$seq),4]
# nrg=nrg+rnorm(nrg,sd=0.01) # to be able to do exact wilcox test
rnk=rank(nrg)
names(rnk)=names(nrg)
pvals=c();drs=c();nams=c();levs=c();nseqs=c()
for (t in terms){
got=gos[gos$term==t,]
got=got[!duplicated(got$seq),]
ngot=gos[gos$term!=t,]
ngot=ngot[!duplicated(ngot$seq),]
ngot=ngot[!(ngot$seq %in% got$seq),]
sgo.yes=got$seq
n1=length(sgo.yes)
sgo.no=ngot$seq
n2=length(sgo.no)
wi=wilcox.test(nrg[sgo.yes],nrg[sgo.no],alternative=Alternative) # removed correct=FALSE
r1=sum(rnk[sgo.yes])/n1
r0=sum(rnk[sgo.no])/n2
dr=r1-r0
drs=append(drs,round(dr,0))
levs=append(levs,got$lev[1])
nams=append(nams,as.character(got$name[1]))
pvals=append(pvals,wi$p.value)
nseqs=append(nseqs,n1)
}
res=data.frame(cbind("delta.rank"=drs,"pval"=pvals,"level"=levs,nseqs))
res=cbind(res,"term"=as.character(terms),"name"=nams)
res$pval=as.numeric(as.character(res$pval))
res$delta.rank=as.numeric(as.character(res$delta.rank))
res$level=as.numeric(as.character(res$level))
res$nseqs=as.numeric(as.character(res$nseqs))
return(res)
}
#' @title fisherTest
#'
#' @description Ficher Test from RBGOA
#'
#' @param gotable from gomwuStats from RBGOA
#'
#' @return fisherTest
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{fisherTest()}
#' @export
fisherTest=function(gotable) {
gos=gotable
terms=levels(gos$term)
gos$seq=as.character(gos$seq)
pft=c();nam=c();lev=c();nseqs=c()
for (t in terms) {
got=gos[gos$term==t,]
got=got[!duplicated(got$seq),]
ngot=gos[gos$term!=t,]
ngot=ngot[!duplicated(ngot$seq),]
ngot=ngot[!(ngot$seq %in% got$seq),]
go.sig=sum(got$value)
go.ns=length(got[,1])-go.sig
ngo.sig=sum(ngot$value)
ngo.ns=length(ngot[,1])-ngo.sig
sig=c(go.sig,ngo.sig) # number of significant genes belonging and not belonging to the tested GO category
ns=c(go.ns,ngo.ns) # number of not-significant genes belonging and not belonging to the tested GO category
mm=matrix(c(sig,ns),nrow=2,dimnames=list(ns=c("go","notgo"),sig=c("go","notgo")))
ff=fisher.test(mm,alternative="greater")
pft=append(pft,ff$p.value)
nam=append(nam,as.character(got$name[1]))
lev=append(lev,got$lev[1])
nseqs=append(nseqs,length(got[,1]))
}
res=data.frame(cbind("delta.rank"=rep(0),"pval"=pft,"level"=lev,nseqs,"term"=terms,"name"=nam))
res[,1]=as.numeric(as.character(res[,1]))
res[,2]=as.numeric(as.character(res[,2]))
res[,3]=as.numeric(as.character(res[,3]))
res$nseqs=as.numeric(as.character(res$nseqs))
return(res)
}
#' @title taxon_mwuStats
#'
#' @description mwuStats from RBGOA adapted for taxonomic analysis
#'
#' @param input input
#' @param goDatabase goDatabase
#' @param goAnnotations goAnnotations
#' @param goDivision goDivision
#' @param Alternative Alternative
#' @param adjust.multcomp adjust.multcomp
#' @param clusterCutHeight clusterCutHeight
#' @param largest largest
#' @param smallest smallest
#' @param perlPath perlPath
#' @param verbose verbose
#'
#' @return Statistical analysis for taxonomic rank
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxon_mwuStats(input, ...)}
#' @export
taxon_mwuStats=function(input,goDatabase,goAnnotations, goDivision, Alternative="t", adjust.multcomp="BH", clusterCutHeight=0.25,largest=0.1,smallest=5,perlPath="perl", verbose=TRUE){
# file.copy(from = file.path(original_dir, paste("Working_scripts/mwu_a_NCBITaxon.pl", sep="")), to ="mwu_a_NCBITaxon.pl",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "mwu_a_NCBITaxon.pl", package="Anaconda"), to ="mwu_a_NCBITaxon.pl",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
#file.copy(from = file.path(original_dir, paste("Working_scripts/mwu_b_NCBITaxon.pl", sep="")), to ="mwu_b_NCBITaxon.pl",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "mwu_b_NCBITaxon.pl", package="Anaconda"), to ="mwu_b_NCBITaxon.pl",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
extraOptions=paste("largest=",largest," smallest=",smallest,"cutHeight=",clusterCutHeight,sep="")
system(paste(perlPath,"./mwu_a_NCBITaxon.pl",goDatabase,goAnnotations,input,goDivision,extraOptions))
clusteringGOs(goAnnotations,goDivision,clusterCutHeight)
system(paste(perlPath,"./mwu_b_NCBITaxon.pl",goAnnotations,input,goDivision))
inname=paste(goDivision,"_",input,sep="")
rsq=read.table(inname,sep="\t",header=T)
rsq$term=as.factor(rsq$term)
mwut.t=TRUE
if (length(levels(as.factor(rsq$value)))==2) {
if(verbose){cat("Binary classification detected; will perform Fisher's test\n");}
mwut.t=F
rr=fisherTest(rsq)
} else {
if(verbose){cat("Continuous measure of interest: will perform MWU test\n"); }
rr=mwuTest(rsq,Alternative)
}
if (adjust.multcomp=="shuffle"){
if(verbose){cat("shuffling values to calculate FDR, 5 reps\n")}
reps=5
spv=c()
for (i in 1:reps) {
if(verbose){print(paste("replicate",i))}
rsqq=rsq
rsqq$value=sample(rsq$value)
if (mwut.t==TRUE) { rs=mwuTest(rsqq,Alternative) } else { rs=fisherTest(rsqq) }
spv=append(spv,rs$pval)
}
fdr=c()
for (p in rr$pval){
fdr=append(fdr,(sum(spv<=p)/reps)/sum(rr$pval<=p))
}
fdr[fdr>1]=1
} else {
fdr=p.adjust(rr$pval,method=adjust.multcomp)
}
if(verbose){cat(paste(sum(fdr<0.1)," NCBITaxon terms at 10% FDR\n"))}
rr$p.adj=fdr
fname=paste("MWU_",inname,sep="")
write.table(rr,fname,row.names=F)
}
#' @title taxon_mwuPlot
#'
#' @description taxon mwuPlot for taxonomic analysis
#'
#' @param inFile inFile - results object from the DASVA analysis
#' @param goAnnotations parallel to goAnnotations from gomwuStats from RBGOA. Here, "database_bacteria_package_all.tab" if Bacteria, "database_fungi_package_all.tab" if Fungi
#' @param goDivision parallel to goAnnotations from gomwuStats from RBGOA. Here, "TR" = taxonomic Rank, don't change this
#' @param level1 level1
#' @param level2 level2
#' @param level3 level3
#' @param absValue absValue
#' @param adjusted adjusted
#' @param txtsize txtsize
#' @param font.family font.family
#' @param treeHeight treeHeight
#' @param colors colors
#' @param verbose verbose
#'
#' @return taxon mwuPlot and goods "Table_02_taxon_mwuPlot.txt"
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxon_mwuPlot(input,...)}
#' @export
taxon_mwuPlot=function(inFile,goAnnotations,goDivision,level1=0.1,level2=0.05,level3=0.01,absValue=-log(0.05,10),adjusted=TRUE,txtsize=1,font.family="sans",treeHeight=0.5,colors=NULL, verbose=TRUE) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
input=inFile
in.mwu=paste("MWU",goDivision,input,sep="_")
in.dissim=paste("dissim",goDivision,goAnnotations,sep="_")
cutoff=-log(level1,10)
pv=read.table(in.mwu,header=T)
row.names(pv)=pv$term
in.raw=paste(goDivision,input,sep="_")
rsq=read.table(in.raw,sep="\t",header=T)
rsq$term=as.factor(rsq$term)
if (adjusted==TRUE) { pvals=pv$p.adj } else { pvals=pv$pval }
heat=data.frame(cbind("pval"=pvals))
row.names(heat)=pv$term
heat$pval=-log(heat$pval+1e-15,10)
heat$direction=0
heat$direction[pv$delta.rank>0]=1
if (cutoff>0) {
goods=subset(heat,pval>=cutoff)
} else {
goods.names=unique(rsq$term[abs(rsq$value)>=absValue])
goods=heat[row.names(heat) %in% goods.names,]
}
if (is.null(colors) | length(colors)<4 ) {
colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral")
if (sum(goods$direction)==nrow(goods) | sum(goods$direction)==0) {
colors=c("black","black","grey50","grey50")
}
}
goods.names=row.names(goods)
# reading and subsetting dissimilarity matrix
diss=read.table(in.dissim,sep="\t",header=T,check.names=F)
row.names(diss)=names(diss)
diss.goods=diss[goods.names,goods.names]
# how many genes out of what we started with we account for with our best categories?
good.len=c();good.genes=c()
for (g in goods.names) {
sel=rsq[rsq$term==g,]
pass=abs(sel$value)>=absValue
sel=sel[pass,]
good.genes=append(good.genes,as.character(sel$seq))
good.len=append(good.len,nrow(sel))
}
ngenes=length(unique(good.genes))
################### HERE TO DELETE GENES NUMBERS
#hist(rsq$value)
totSum=length(unique(rsq$seq[abs(rsq$value)>=absValue]))
# row.names(goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(goods)=paste(pv[pv$term %in% goods.names,]$name,sep="") # modifier #############################################################################################
#row.names(heat)=paste(good.len,"/",pv$nseqs," ",pv$name,sep="")
row.names(heat)=paste(pv$name,sep="") # modifier ####################################
# row.names(diss.goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(diss.goods)=paste(pv[pv$term %in% goods.names,]$name,sep="")
# clustering terms better than cutoff
GO.categories=as.dist(diss.goods)
cl.goods=hclust(GO.categories,method="average")
labs=cl.goods$labels[cl.goods$order] # saving the labels to order the plot
goods=goods[labs,]
labs=sub(" activity","",labs)
old.par <- par( no.readonly = TRUE )
plots=layout(matrix(c(1,2,3),1,3,byrow=T),c(treeHeight,3,1),TRUE)
par(mar = c(2,2,0.85,0))
plot(as.phylo(cl.goods),show.tip.label=FALSE,cex=0.0000001)
step=100
left=1
top=step*(2+length(labs))
par(mar = c(0,0,0.3,0))
plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
ii=1
goods$color=1
goods$color[goods$direction==1 & goods$pval>cutoff]=colors[4]
goods$color[goods$direction==0 & goods$pval>cutoff]=colors[3]
goods$color[goods$direction==1 & goods$pval>(-log(level2,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level2,10))]=colors[1]
goods$color[goods$direction==1 & goods$pval>(-log(level3,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level3,10))]=colors[1]
for (i in length(labs):1) {
ypos=top-step*ii
ii=ii+1
if (goods$pval[i]> -log(level3,10)) {
text(left,ypos,labs[i],font=2,cex=1*txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
} else {
if (goods$pval[i]>-log(level2,10)) {
text(left,ypos,labs[i],font=1,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
} else {
# if (goods$pval[i]>cutoff) {
# text(left,ypos,labs[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
# } else {
text(left,ypos,labs[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
#}
}
}
}
par(mar = c(3,1,1,0))
plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
text(left,top-step*2,paste("p < ",level3,sep=""),font=2,cex=1* txtsize,adj=c(0,0),family=font.family)
text(left,top-step*3,paste("p < ",level2,sep=""),font=1,cex=0.8* txtsize,adj=c(0,0),family=font.family)
text(left,top-step*4,paste("p < ",10^(-cutoff),sep=""),font=3,col="grey50",cex=0.8* txtsize,adj=c(0,0),family=font.family)
if(verbose){cat(paste("NCBITaxon terms dispayed: ",length(goods.names)),"\n")}
if(verbose){cat(paste("\"Good ASVs\" accounted for: ", ngenes," out of ",totSum, " ( ",round(100*ngenes/totSum,0), "% )","\n",sep=""))}
par(old.par)
goods$pval=10^(-1*goods$pval)
write.table(goods, "Table_02_taxon_mwuPlot.txt")
return(goods)
}
#' @title taxon_mwu_list
#'
#' @description taxon Mann-Whitney U list for taxonomic analysis
#'
#' @param inFile inFile - results object from the DASVA analysis
#' @param goAnnotations parallel to goAnnotations from gomwuStats from RBGOA. Here, "database_bacteria_package_all.tab" if Bacteria, "database_fungi_package_all.tab" if Fungi
#' @param goDivision parallel to goAnnotations from gomwuStats from RBGOA. Here, "TR" = taxonomic Rank, don't change this
#' @param level1 level1
#' @param level2 level2
#' @param level3 level3
#' @param absValue absValue
#' @param adjusted adjusted
#' @param txtsize txtsize
#' @param font.family font.family
#' @param treeHeight treeHeight
#' @param colors colors
#'
#' @return List for the statistical analysis for taxonomic rank
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxon_list <- taxon_mwu_list(input, ...)}
#' @export
taxon_mwu_list=function(inFile,goAnnotations,goDivision,level1=0.1,level2=0.05,level3=0.01,absValue=-log(0.05,10),adjusted=TRUE,txtsize=1,font.family="sans",treeHeight=0.5,colors=NULL) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
input=inFile
in.mwu=paste("MWU",goDivision,input,sep="_")
in.dissim=paste("dissim",goDivision,goAnnotations,sep="_")
cutoff=-log(level1,10)
pv=read.table(in.mwu,header=T)
row.names(pv)=pv$term
in.raw=paste(goDivision,input,sep="_")
rsq=read.table(in.raw,sep="\t",header=T)
rsq$term=as.factor(rsq$term)
if (adjusted==TRUE) { pvals=pv$p.adj } else { pvals=pv$pval }
heat=data.frame(cbind("pval"=pvals))
row.names(heat)=pv$term
heat$pval=-log(heat$pval+1e-15,10)
heat$direction=0
heat$direction[pv$delta.rank>0]=1
if (cutoff>0) {
goods=subset(heat,pval>=cutoff)
} else {
goods.names=unique(rsq$term[abs(rsq$value)>=absValue])
goods=heat[row.names(heat) %in% goods.names,]
}
if (is.null(colors) | length(colors)<4 ) {
colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral")
if (sum(goods$direction)==nrow(goods) | sum(goods$direction)==0) {
colors=c("black","black","grey50","grey50")
}
}
goods.names=row.names(goods)
# reading and subsetting dissimilarity matrix
diss=read.table(in.dissim,sep="\t",header=T,check.names=F)
row.names(diss)=names(diss)
diss.goods=diss[goods.names,goods.names]
# how many genes out of what we started with we account for with our best categories?
good.len=c();good.genes=c()
for (g in goods.names) {
sel=rsq[rsq$term==g,]
pass=abs(sel$value)>=absValue
sel=sel[pass,]
good.genes=append(good.genes,as.character(sel$seq))
good.len=append(good.len,nrow(sel))
}
ngenes=length(unique(good.genes))
################### HERE TO DELETE GENES NUMBERS
#hist(rsq$value)
totSum=length(unique(rsq$seq[abs(rsq$value)>=absValue]))
# row.names(goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(goods)=paste(pv[pv$term %in% goods.names,]$name,sep="") # modifier #############################################################################################
#row.names(heat)=paste(good.len,"/",pv$nseqs," ",pv$name,sep="")
row.names(heat)=paste(pv$name,sep="") # modifier ####################################
# row.names(diss.goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(diss.goods)=paste(pv[pv$term %in% goods.names,]$name,sep="")
# clustering terms better than cutoff
GO.categories=as.dist(diss.goods)
cl.goods=hclust(GO.categories,method="average")
labs=cl.goods$labels[cl.goods$order] # saving the labels to order the plot
goods=goods[labs,]
labs=sub(" activity","",labs)
old.par <- par( no.readonly = TRUE )
# cat(paste("NCBITaxon terms dispayed: ",length(goods.names)),"\n")
# cat(paste("\"Good ASVs\" accounted for: ", ngenes," out of ",totSum, " ( ",round(100*ngenes/totSum,0), "% )","\n",sep=""))
# par(old.par)
# goods$pval=10^(-1*goods$pval)
#return(goods)
write.table(data.frame(labs), "Table_03_taxon_mwu_list.txt")
return(data.frame(labs))
}
#' @title taxon_mwuPlot_guilds
#'
#' @description taxon Mann-Whitney U Plot with Fungi Guilds added
#'
#' @param inFile inFile - results object from the DASVA analysis
#' @param goAnnotations parallel to goAnnotations from gomwuStats from RBGOA. Here, "database_bacteria_package_all.tab" if Bacteria, "database_fungi_package_all.tab" if Fungi
#' @param goDivision parallel to goAnnotations from gomwuStats from RBGOA. Here, "TR" = taxonomic Rank, don't change this
#' @param level1 level1
#' @param level2 level2
#' @param level3 level3
#' @param absValue absValue
#' @param adjusted adjusted
#' @param txtsize txtsize
#' @param font.family font.family
#' @param treeHeight treeHeight
#' @param colors colors
#' @param verbose verbose
#'
#' @return List for the statistical analysis for taxonomic rank
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxon_mwuPlot_guilds(input, ...)}
#' @export
taxon_mwuPlot_guilds=function(inFile,goAnnotations,goDivision,level1=0.1,level2=0.05,level3=0.01,absValue=-log(0.05,10),adjusted=TRUE,txtsize=1,font.family="sans",treeHeight=0.5,colors=NULL, verbose=TRUE) {
oldpar <- par(no.readonly = TRUE)
on.exit(par(oldpar))
input=inFile
in.mwu=paste("MWU",goDivision,input,sep="_")
in.dissim=paste("dissim",goDivision,goAnnotations,sep="_")
cutoff=-log(level1,10)
pv=read.table(in.mwu,header=T)
row.names(pv)=pv$term
in.raw=paste(goDivision,input,sep="_")
rsq=read.table(in.raw,sep="\t",header=T)
rsq$term=as.factor(rsq$term)
if (adjusted==TRUE) { pvals=pv$p.adj } else { pvals=pv$pval }
heat=data.frame(cbind("pval"=pvals))
row.names(heat)=pv$term
heat$pval=-log(heat$pval+1e-15,10)
heat$direction=0
heat$direction[pv$delta.rank>0]=1
if (cutoff>0) {
goods=subset(heat,pval>=cutoff)
} else {
goods.names=unique(rsq$term[abs(rsq$value)>=absValue])
goods=heat[row.names(heat) %in% goods.names,]
}
if (is.null(colors) | length(colors)<4 ) {
colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral")
if (sum(goods$direction)==nrow(goods) | sum(goods$direction)==0) {
colors=c("black","black","grey50","grey50")
}
}
goods.names=row.names(goods)
# reading and subsetting dissimilarity matrix
diss=read.table(in.dissim,sep="\t",header=T,check.names=F)
row.names(diss)=names(diss)
diss.goods=diss[goods.names,goods.names]
# how many genes out of what we started with we account for with our best categories?
good.len=c();good.genes=c()
for (g in goods.names) {
sel=rsq[rsq$term==g,]
pass=abs(sel$value)>=absValue
sel=sel[pass,]
good.genes=append(good.genes,as.character(sel$seq))
good.len=append(good.len,nrow(sel))
}
ngenes=length(unique(good.genes))
################### HERE TO DELETE GENES NUMBERS
#hist(rsq$value)
totSum=length(unique(rsq$seq[abs(rsq$value)>=absValue]))
# row.names(goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(goods)=paste(pv[pv$term %in% goods.names,]$name,sep="") # modifier #############################################################################################
#row.names(heat)=paste(good.len,"/",pv$nseqs," ",pv$name,sep="")
row.names(heat)=paste(pv$name,sep="") # modifier ####################################
# row.names(diss.goods)=paste(good.len,"/",pv[pv$term %in% goods.names,]$nseqs," ",pv[pv$term %in% goods.names,]$name,sep="")
row.names(diss.goods)=paste(pv[pv$term %in% goods.names,]$name,sep="")
# clustering terms better than cutoff
GO.categories=as.dist(diss.goods)
cl.goods=hclust(GO.categories,method="average")
labs=cl.goods$labels[cl.goods$order] # saving the labels to order the plot
goods=goods[labs,]
labs=sub(" activity","",labs)
old.par <- par( no.readonly = TRUE )
#plots=layout(matrix(c(1,1,1,2,
# 3,4,5,6,
# 3,4,5,6,
# 3,4,5,6),c(4,3.5,
# treeHeight,3,1,3), byrow=T))
plots=layout(matrix(c(1,1,1,1,2,2,2,2,2,2,
3,3,3,3,4,4,4,4,4,4,
3,3,3,3,4,4,4,4,4,4,
3,3,3,3,4,4,4,4,4,4,
3,3,3,3,4,4,4,4,4,4,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8,
5,6,7,7,8,8,8,8,8,8), nrow=14, byrow=T))
#plots=layout(matrix(c(1,2,3),1,3,byrow=T),c(treeHeight,3,1),TRUE)
##### plots=layout(matrix(c(1, 2, 3, 3,
##### 4, 5, 6, 6,
##### 4, 5, 6, 6), nrow=3, byrow=TRUE))
#####
#layout.show(n=6)
step=100
left=1
top=step*(2+length(labs))
#par(mar = c(1,1,1,1))
#plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
#text(left,top-step*2,paste("p < ",level3,sep=""),font=2,cex=1* txtsize,adj=c(0,0),family=font.family)
#text(left,top-step*3,paste("p < ",level2,sep=""),font=1,cex=0.8* txtsize,adj=c(0,0),family=font.family)
#text(left,top-step*4,paste("p < ",10^(-cutoff),sep=""),font=3,col="grey50",cex=0.8* txtsize,adj=c(0,0),family=font.family)
par(mar = c(0,0,0.3,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.39, y = 0.5, paste("Taxa enrichment"),
cex = 2.5, col = "black", family="serif", font=2, adj=0.5)
par(mar = c(0,0,0.3,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.39, y = 0.5, paste("Fungal guilds"),
cex = 2.5, col = "black", family="serif", font=2, adj=0.5)
par(mar = c(0,0,0.3,0))
plot(NULL, xlim=c(0,1), ylim=c(0,1), ylab="", xlab="", axes=FALSE)
legend(0, 1, legend=c("Over-represented ASVs in the first condition",
"Under-represented ASVs in the first condition"),
col=c("firebrick1",
"dodgerblue2"), pch=19, cex=1, box.lty=0, y.intersp=0.55)
par(mar = c(0,0,0.3,0))
plot(NULL, xlim=c(0,1), ylim=c(0,1), ylab="", xlab="", axes=FALSE)
legend(0,1, legend=c("Pathotroph-Saprotroph-Symbiotroph",
"Pathotroph-Saprotroph",
"Saprotroph-Symbiotroph",
"Pathotroph-Symbiotroph",
"Saprotroph",
"Symbiotroph",
"Pathotroph",
"Unassigned",
"NA"),
col=c("black",
"steelblue2",
"plum",
"hotpink",
"forestgreen",
"goldenrod3",
"brown4",
"cornsilk4",
"azure4"), pch=19, cex=1, box.lty=0, y.intersp=0.55)
par(mar = c(3,1,1,0))
plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
text(left,top-step*2,paste("p < ",level3,sep=""),font=2,cex=1* txtsize,adj=c(0,0),family=font.family)
text(left,top-step*3,paste("p < ",level2,sep=""),font=1,cex=0.8* txtsize,adj=c(0,0),family=font.family)
text(left,top-step*4,paste("p < ",10^(-cutoff),sep=""),font=3,col="grey50",cex=0.8* txtsize,adj=c(0,0),family=font.family)
par(mar = c(2,2,0.85,0))
plot(as.phylo(cl.goods),show.tip.label=F,cex=0.0000001)
#plot(as.dendrogram(cl.goods))
step=100
left=1
top=step*(2+length(labs))
par(mar = c(0,0,0.3,0))
plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
ii=1
goods$color=1
goods$color[goods$direction==1 & goods$pval>cutoff]=colors[4]
goods$color[goods$direction==0 & goods$pval>cutoff]=colors[3]
goods$color[goods$direction==1 & goods$pval>(-log(level2,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level2,10))]=colors[1]
goods$color[goods$direction==1 & goods$pval>(-log(level3,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level3,10))]=colors[1]
for (i in length(labs):1) {
ypos=top-step*ii
ii=ii+1
if (goods$pval[i]> -log(level3,10)) {
text(left,ypos,labs[i],font=2,cex=1*txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
} else {
if (goods$pval[i]>-log(level2,10)) {
text(left,ypos,labs[i],font=1,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
} else {
# if (goods$pval[i]>cutoff) {
# text(left,ypos,labs[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
# } else {
text(left,ypos,labs[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
#}
}
}
}
###labs_02 <- merge(data.frame(labs), link_guilds, by.x="labs", by.y="V1")
# From qed answer in https://stackoverflow.com/questions/17878048/merge-two-data-frames-while-keeping-the-original-row-order
merge_sameord = function(x, y, ...) {
UseMethod('merge_sameord')
}
merge_sameord.data.frame = function(x, y, ...) {
rstr = paste(sample(c(0:9, letters, LETTERS), 12, replace=TRUE), collapse='')
x[, rstr] = 1:nrow(x)
res = merge(x, y, all.x=TRUE, sort=FALSE, ...)
res = res[order(res[, rstr]), ]
res[, rstr] = NULL
res
}
labs_02 <- merge_sameord(data.frame(labs), link_guilds, by.x="labs", by.y="V1")
# labs_02 <- data.table::fintersect(setDT(labs$labs), setDT(link_guilds))
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Pathotroph-Saprotroph-Symbiotroph","black",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Pathotroph-Saprotroph","steelblue2",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Saprotroph-Symbiotroph","plum",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Pathotroph-Symbiotroph","hotpink",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Saprotroph","forestgreen",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Symbiotroph","goldenrod3",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Pathotroph","brown4",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("Unassigned","cornsilk4",x)})
labs_02$V3 <- sapply(labs_02$V3,function(x) {x <- gsub("NA","azure4",x)})
### ICI CHANGER SAPRO EN VERT, ETC...
#par(mar = c(2,2,0.85,0))
par(mar = c(0,0,0.3,0))
#plot(NULL, xlim=c(0,1), ylim=c(0,1), ylab="y label", xlab="x lablel")
plot(c(1:top)~c(1:top),type="n",axes=F,xlab="",ylab="")
ii=1
goods$color=1
goods$color[goods$direction==1 & goods$pval>cutoff]=colors[4]
goods$color[goods$direction==0 & goods$pval>cutoff]=colors[3]
goods$color[goods$direction==1 & goods$pval>(-log(level2,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level2,10))]=colors[1]
goods$color[goods$direction==1 & goods$pval>(-log(level3,10))]=colors[2]
goods$color[goods$direction==0 & goods$pval>(-log(level3,10))]=colors[1]
for (i in length(labs):1) {
ypos=top-step*ii
ii=ii+1
if (goods$pval[i]> -log(level3,10)) {
# text(left,ypos,paste(labs[i], labs_02$V2[i]),font=2,cex=1*txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
#text(left,ypos,expression(labs[i]*phantom(labs_02$V2[i])),col=goods$color[i],font=2,cex=1*txtsize,adj=c(0,0),family=font.family)
#text(left,ypos,expression(phantom(labs[i])*labs_02$V2[i]),col="black",font=2,cex=1*txtsize,adj=c(0,0),family=font.family)
### FOR PUTTING NCBI TAXON and GUILDS Together
### text(left,ypos,bquote(bold(.(labs_02$labs[i]))*' Guild '~phantom(.( labs_02$V2[i]))), col=goods$color[i],cex=1*txtsize,adj=c(0,0),family=font.family)
### text(left,ypos,bquote(phantom(.(labs_02$labs[i]))*' Guild '~.( labs_02$V2[i])), col="black",cex=1*txtsize,adj=c(0,0),family=font.family)
text(left,ypos,paste(labs_02$V2[i]),font=2,cex=1*txtsize,col=labs_02$V3[i],adj=c(0,0),family=font.family)
# text(left,ypos,labs_02$V2[i],font=2,cex=1*txtsize,col=goods$color[i],adj=c(0,0),family=font.family) + text(left,ypos,labs[i],font=2,cex=1*txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
} else {
if (goods$pval[i]>-log(level2,10)) {
# text(left,ypos,labs_02$V2[i],font=1,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
### FOR PUTTING NCBI TAXON and GUILDS Together
### text(left,ypos,bquote(.(labs_02$labs[i]) ~ " " phantom(.( labs_02$V2[i]))),col=goods$color[i],font=1,cex=0.8*txtsize,adj=c(0,0),family=font.family)
### text(left,ypos,bquote(phantom(.(labs_02$labs[i])) ~ " " .( labs_02$V2[i])),col="black",font=1,cex=0.8*txtsize,adj=c(0,0),family=font.family)
text(left,ypos,labs_02$V2[i],font=2,cex=1* txtsize,col=labs_02$V3[i],adj=c(0,0),family=font.family)
} else {
# if (goods$pval[i]>cutoff) {
# text(left,ypos,labs[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
# } else {
# text(left,ypos,labs_02$V2[i],font=3,cex=0.8* txtsize,col=goods$color[i],adj=c(0,0),family=font.family)
### FOR PUTTING NCBI TAXON and GUILDS Together
### text(left,ypos,bquote(italic(.(labs_02$labs[i]) ~ " " ~ phantom(.( labs_02$V2[i])))),col=goods$color[i],cex=0.8*txtsize,adj=c(0,0),family=font.family)
### text(left,ypos,bquote(phantom(.(labs_02$labs[i])) ~ " " ~ .( labs_02$V2[i])),col="black",cex=0.8*txtsize,adj=c(0,0),family=font.family)
text(left,ypos,labs_02$V2[i],font=2,cex=1* txtsize,col=labs_02$V3[i],adj=c(0,0),family=font.family)
#}
}
}
}
if(verbose){cat(paste("NCBITaxon terms dispayed: ",length(goods.names)),"\n")}
if(verbose){cat(paste("\"Good ASVs\" accounted for: ", ngenes," out of ",totSum, " ( ",round(100*ngenes/totSum,0), "% )","\n",sep=""))}
par(old.par)
goods$pval=10^(-1*goods$pval)
write.table(labs_02, "Table_04_taxon_mwuPlot_guilds.txt")
return(labs_02)
}
#' @title get_taxon_list_drawer
#'
#' @description get taxonomic list drawer
#'
#' @param taxon_list object from taxon_mwu_list() function
#'
#' @return taxon_list_drawer Object and "taxon_list_drawer_input.txt" file
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{taxon_list_drawer <- get_taxon_list_drawer(taxon_list)}
#' @export
get_taxon_list_drawer <- function(taxon_list) {
taxon_list$labs<-gsub(" ", "_", taxon_list$labs)
names(taxon_list) <- NULL
write.table(taxon_list, "taxon_list.txt", row.names=FALSE, quote = FALSE)
# file.copy(from = file.path(original_dir, paste("Working_scripts/search_taxonomic_drawer.sh", sep="")), to ="search_taxonomic_drawer.sh",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "search_taxonomic_drawer.sh", package="Anaconda"), to ="search_taxonomic_drawer.sh",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
# file.copy(from = file.path(kingdom, paste("taxonomy.tsv", sep="")), to ="taxonomy.tsv",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = file.path(kingdom, paste("taxonomy.tsv", sep="")), to ="taxonomy.tsv",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
system('bash search_taxonomic_drawer.sh')
# If Taxon have NAs, that's corresponding to "holes" in the taxonomic.tsv file used in QIIME2 (limited to current knownledge and current worldwide database).
taxon_list_drawer <- read.table("taxon_list_drawer.txt")
taxon_list_drawer <- data.frame(rep(1:length(taxon_list_drawer$V1), 1), taxon_list_drawer)
#colnames(taxon_list_drawer) <- c("ID", "taxonomy")
names(taxon_list_drawer) <- NULL
write.table(taxon_list_drawer, "taxon_list_drawer_input.txt", sep="\t", row.names=FALSE, quote = TRUE)
system("sed -i '' $'1s/^/ID\ttaxonomy\\\n/' taxon_list_drawer_input.txt")
taxon_list_drawer <- read.table("taxon_list_drawer_input.txt", sep="\t", header=T)
return(taxon_list_drawer)
}
#' @title get_funguilds
#'
#' @description get Fungi Guilds from taxon_list_drawer Object
#'
#' @param taxon_list_drawer object from get_taxon_list_drawer() function
#'
#' @return funguilds Object
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{funguilds <- get_funguilds(taxon_list_drawer)}
#' @export
get_funguilds <- function(taxon_list_drawer) {
#file.copy(from = file.path(original_dir, paste("Working_scripts/Guilds_v1.1.py", sep="")), to ="Guilds_v1.1.py",
# overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
file.copy(from = system.file("extdata", "Guilds_v1.1.py", package="Anaconda"), to ="Guilds_v1.1.py",
overwrite = TRUE, copy.mode = TRUE, copy.date = FALSE)
taxon_list_drawer <- read.table("taxon_list_drawer_input.txt", sep="\t", header=T)
# to deal with https://stackoverflow.com/questions/17309288/importerror-no-module-named-requests
# if
#FunGuild v1.1 Beta
#Traceback (most recent call last):
# File "Guilds_v1.1.py", line 119, in <module>
# import requests
#ImportError: No module named requests
system('python -m pip install --user requests')
# https://stackoverflow.com/questions/41638558/how-to-call-python-script-from-r-with-arguments
system('python Guilds_v1.1.py -otu taxon_list_drawer_input.txt -m -u')
## CLEAN taxon_list_drawer_input.guilds.txt
dat <- read.table("taxon_list_drawer_input.guilds_matched.txt", sep="\t", header=T)
# https://stackoverflow.com/questions/26289681/r-regex-find-last-occurrence-of-delimiter
funguilds <- data.frame(sub(".*__", "", dat$taxonomy,), dat$Guild, dat$Trophic.Mode,stringsAsFactors = FALSE)
# funguilds <- data.frame(sub(".*[__]", "", dat$taxonomy,), dat$Guild, dat$Trophic.Mode,stringsAsFactors = FALSE)
colnames(funguilds) <- c("Taxon", "Guild", "Trophic_Mode")
unlink("Guilds_v1.1.py")
return(funguilds)
}
#' @title get_link_guilds
#'
#' @description get link guilds from taxon_list and funguilds Objects
#'
#' @param taxon_list object from taxon_mwu_list() function
#' @param funguilds object from get_funguilds() function
#'
#' @return link_guilds Object
#' @import ggrepel pheatmap lookup plyr ape DESeq2 ggplot2 stats utils data.table RColorBrewer rafalib
#' @importFrom graphics layout legend par points text
#' @importFrom methods is
#' @examples
#' \dontrun{link_guilds <- get_link_guilds(taxon_list, funguilds)}
#' @export
get_link_guilds <- function(taxon_list, funguilds) {
# MATCH taxon_list with dat_02
funguilds$Taxon_02 <- data.frame(sub("_", " ", funguilds$Taxon))
funguilds_02 <- data.frame(funguilds, funguilds$Taxon_02)
colnames(funguilds_02) <- c("Taxon", "Guild", "Trophic_Mode", "Taxon_02", "Taxon_03")
funguilds_03 <- data.frame(funguilds_02$Taxon_03, funguilds_02$Guild, funguilds_02$Trophic_Mode)
colnames(funguilds_03) <- c("Taxon", "Guild", "Trophic_Mode")
link <- merge(taxon_list, funguilds_03, by.x="labs", by.y="Taxon", all=TRUE)
link$Guild[is.na(link$Guild)] <- "Unassigned"
link$Trophic_Mode[is.na(link$Trophic_Mode)] <- "Unassigned"
colnames(link) <- c("V1", "V2", "V3")
link_guilds <- link
return(link_guilds)
}
| /scratch/gouwar.j/cran-all/cranData/Anaconda/R/Anaconda.functions.R |
utils::globalVariables(c("Taxon", "assay", "colData", "dasva", "heatmap_condition_df", "kingdom", "link_guilds", "mcols", "metadata", "original_dir", "pval", "rld", "rowVars", "targeted_analysis_dir", "taxo", "threshold"))
| /scratch/gouwar.j/cran-all/cranData/Anaconda/R/globals.R |
#' @title Correlation Clustering
#' @description
#' This function performs hierarchical clustering on a correlation matrix, providing insights into the relationships between variables.
#' It generates a dendrogram visualizing the hierarchical clustering of variables based on their correlation patterns.
#'
#' @param data Input data frame.
#' @param type The type of correlation to be computed. It can be "pearson", "kendall", or "spearman".
#' @param method The method for hierarchical clustering. It can be "complete", "single", "average", "ward.D", "ward.D2", "mcquitty", "median", or "centroid".
#' @param hclust_method The hierarchical clustering method. It can be "complete", "single", "average", "ward.D", "ward.D2", "mcquitty", "median", or "centroid".
#'
#' @return
#' A dendrogram visualizing the hierarchical clustering of variables based on the correlation matrix.
#'
#' @examples
#' data(mtcars)
#' corr_cluster(data = mtcars, type = 'pearson', method = 'complete')
#'
#' @importFrom Hmisc rcorr
#' @import stats
#' @export
corr_cluster <- function(data, type = 'pearson', method = 'complete', hclust_method = NULL) {
corr <- Hmisc::rcorr(as.matrix(data), type = type)$r
dist_matrix <- as.dist(1 - abs(corr))
hc <- hclust(dist_matrix, method = method)
plot(hc, hang = -1, main = "Feature Distance")
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/corr_cluster.R |
#' @title Correlation Matrix
#' @description
#' Column 1: Row names representing Variable 1 in the correlation test.
#'
#' Column 2: Column names representing Variable 2 in the correlation test.
#'
#' Column 3: The correlation coefficients quantifying the strength and direction of the relationship.
#'
#' Column 4: The p-values associated with the correlations, indicating the statistical significance
#' of the observed relationships. Lower p-values suggest stronger evidence against the null hypothesis.
#'
#' The table provides valuable insights into the relationships between variables, helping to identify
#' statistically significant correlations.
#'
#' @param data Input dataset.
#' @param type Pearson or Spearman correlation, default is Pearson.
#' @param corr_plot Generate a correlation matrix plot, default is false.
#' @param sig.level Significant level. Default is 0.01.
#' @param highlight Highlight p-value(s) that is less than sig.level, default is FALSE
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#'
#' @return A data frame which contains row names, column names, correlation coefficients, and p-values.
#' @return A plot of the correlation if corrplot is set to be true.
#'
#' @examples
#' data(mtcars)
#' corr_matrix(mtcars, type = 'pearson')
#' @importFrom DT datatable formatStyle styleInterval
#' @importFrom htmltools tagList
#' @importFrom Hmisc rcorr
#' @importFrom plotly plot_ly
#' @importFrom plotly layout
#' @importFrom plotly colorbar
#'
#' @export
corr_matrix <- function(data, type = 'pearson', corr_plot = FALSE, sig.level = 0.01,highlight=FALSE,html=FALSE) {
if (html) result <- htmltools::tagList() else result <- list()
cormat <- Hmisc::rcorr(as.matrix(data), type = type)$r
pmat <- Hmisc::rcorr(as.matrix(data), type = type)$P
pmat[is.na(pmat)] <- 0
ut <- lower.tri(cormat)
data <- data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor = cormat[ut],
p = pmat[ut]
)
table1 <- DT::datatable(
data,
extensions = "Buttons",
caption = "Correlation Table",
options = list(
dom = 'Blfrtip',
buttons = c('copy', 'csv', 'excel', 'pdf'),
paging = TRUE,
searching = FALSE,
ordering = TRUE,
scrollX = TRUE
),
style = 'default',
class = 'table-striped table-bordered'
)
if (highlight) {
table1 <- table1 %>%
formatStyle(
columns = c("p"),
valueColumns = c("p"),
backgroundColor = styleInterval(0.05, c('yellow','transparent'))
)
}
cormat[upper.tri(cormat)] <- NA
pmat[upper.tri(cormat)] <- NA
cormat[lower.tri(cormat)] <- ifelse(pmat[lower.tri(cormat)] < sig.level,cormat[lower.tri(cormat)], NA)
plot <- plot_ly(z = cormat, x = rownames(cormat), y = colnames(cormat),, type = "heatmap", colorscale = "RdBu", reversescale = TRUE) %>%
layout(title = "Correlation Heatmap",
xaxis = list(title = "Variables", showgrid = FALSE),
yaxis = list(title = "Variables", showgrid = FALSE))%>%
colorbar(zmin = -1, zmax = 1)
result <- htmltools::tagList()
result$table <- table1
result$plot <- plot
return(if (corr_plot) result else table1)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/corr_matrix.R |
#' @title Descriptive Statistics
#' @description desc_stat() function calculates various key descriptive statistics for each
#' variables in the provided data set. The function computes the count, number of unique values,
#' duplicate count, number of missing values, null rate, data type, minimum value, 25th percentile,
#' mean, median, 75th percentile, maximum value, standard deviation, kurtosis, skewness, and jarque_pvalue for each variable.
#' @param data input dataset
#' @param count An logical argument(default TRUE) that determines if count is included in the output
#' @param unique An logical argument(default TRUE) that determines if unique is included in the output
#' @param duplicate An logical argument(default TRUE) that determines if duplicate is included in the output
#' @param null An logical argument(default TRUE) that determines if null is included in the output
#' @param null_rate An logical argument(default TRUE) that determines if null_rate is included in the output
#' @param type An logical argument(default TRUE) that determines if type is included in the output
#' @param min An logical argument(default TRUE) that determines if min is included in the output
#' @param p25 An logical argument(default TRUE) that determines if p25 is included in the output
#' @param mean An logical argument(default TRUE) that determines if mean is included in the output
#' @param median An logical argument(default TRUE) that determines if median is included in the output
#' @param p75 An logical argument(default TRUE) that determines if p75 is included in the output
#' @param max An logical argument(default TRUE) that determines if max is included in the output
#' @param sd An logical argument(default TRUE) that determines if sd is included in the output
#' @param kurtosis An logical argument(default FALSE) that determines if kurtosis is included in the output
#' @param skewness An logical argument(default FALSE) that determines if skewness is included in the output
#' @param shapiro An logical argument(default FALSE) that determines if shapiro p-value is included in the output
#' @param kolmogorov An logical argument(default FALSE) that determines if kolmogorov p-value is included in the output
#' @param anderson An logical argument(default FALSE) that determines if anderson p-value is included in the output
#' @param lilliefors An logical argument(default FALSE) that determines if lilliefors p-value is included in the output
#' @param jarque An logical argument(default FALSE) that determines if jarque p-value is included in the output
#' @importFrom htmltools tagList
#' @importFrom DT datatable
#' @import stats
#' @return A data frame which summarizes the characteristics of a data set
#' @export
#'
#' @examples
#' data(mtcars)
#' desc_stat(mtcars)
desc_stat <- function(data, count = TRUE, unique = TRUE, duplicate = TRUE, null = TRUE,
null_rate = TRUE, type = TRUE, min = TRUE, p25 = TRUE, mean = TRUE,
median = TRUE, p75 = TRUE, max = TRUE, sd = TRUE, skewness = FALSE,
kurtosis = FALSE, shapiro = FALSE, kolmogorov = FALSE, anderson = FALSE,
lilliefors = FALSE, jarque = FALSE) {
if (length(data) == 0) stop("Input data is empty.")
Dname <- deparse(substitute(data))
num_stats <- sum(count, unique, duplicate, null, null_rate, type, min, p25,
mean, median, p75, max, sd, skewness, kurtosis, shapiro, kolmogorov,
anderson, lilliefors, jarque)
desc <- matrix(0, ncol =ncol(data), nrow = num_stats)
colnames(desc) <- names(data)
row_counter <- 1
if (count) {
desc[row_counter, ] <- sapply(data, function(x) sum(!is.na(x)))
row_counter <- row_counter + 1
}
if (unique) {
desc[row_counter, ] <- sapply(data, function(x) length(unique(x)))
row_counter <- row_counter + 1
}
if (duplicate) {
desc[row_counter, ] <- sum(duplicated(data))
row_counter <- row_counter + 1
}
if (null) {
desc[row_counter, ] <- sapply(data, function(x) sum(is.na(x)))
row_counter <- row_counter + 1
}
if (null_rate) {
desc[row_counter, ] <- sapply(data, function(x) round(sum(is.na(x)) / length(x),4))
row_counter <- row_counter + 1
}
if (type) {
desc[row_counter, ] <- sapply(data, function(x) class(x))
row_counter <- row_counter + 1
}
if (min) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), min(x, na.rm = TRUE), NA))
row_counter <- row_counter + 1
}
if (p25) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), quantile(x, 0.25, na.rm = TRUE), NA))
row_counter <- row_counter + 1
}
if (mean) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), round(mean(x, na.rm = TRUE), 4), NA))
row_counter <- row_counter + 1
}
if (median) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), median(x, na.rm = TRUE), NA))
row_counter <- row_counter + 1
}
if (p75) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), quantile(x, 0.75, na.rm = TRUE), NA))
row_counter <- row_counter + 1
}
if (max) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), max(x, na.rm = TRUE), NA))
row_counter <- row_counter + 1
}
if (sd) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), round(sd(x,na.rm = TRUE), 4), NA))
row_counter <- row_counter + 1
}
if (kurtosis) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), round(kurtosis(x), 4), NA))
row_counter <- row_counter + 1
}
if (skewness) {
desc[row_counter, ] <- sapply(data, function(x) ifelse(is.numeric(x), round(skewness(x), 4), NA))
row_counter <- row_counter + 1
}
if (shapiro) {
is_numeric <- sapply(data, is.numeric)
desc[row_counter, ] <- ifelse(is_numeric, round(sapply(data[, is_numeric], function(x) shapiro.test(x)$p.value), 4), NA)
row_counter <- row_counter + 1
}
if (kolmogorov) {
is_numeric <- sapply(data, is.numeric)
desc[row_counter, ] <- ifelse(is_numeric, round(sapply(data[, is_numeric], function(x) ks.test(x)$p.value), 4), NA)
row_counter <- row_counter + 1
}
if (anderson) {
is_numeric <- sapply(data, is.numeric)
desc[row_counter, ] <- ifelse(is_numeric, round(sapply(data[, is_numeric], function(x) ad.test(x)), 4), NA)
row_counter <- row_counter + 1
}
if (lilliefors) {
is_numeric <- sapply(data, is.numeric)
desc[row_counter, ] <- ifelse(is_numeric, round(sapply(data[, is_numeric], function(x) lillie.test(x)), 4), NA)
row_counter <- row_counter + 1
}
if (jarque) {
is_numeric <- sapply(data, is.numeric)
desc[row_counter, ] <- ifelse(is_numeric, round(sapply(data[is_numeric], function(x) jarque.test(x)), 4), NA)
row_counter <- row_counter + 1
}
rownames(desc) <- c(
if (count) "Count",
if (unique) "Unique",
if (duplicate) "Duplicate",
if (null) "Null",
if (null_rate) "Null Rate",
if (type) "Type",
if (min) "Min",
if (p25) "P25",
if (mean) "Mean",
if (median) "Median",
if (p75) "P75",
if (max) "Max",
if (sd) "SD",
if (kurtosis) "Kurtosis",
if (skewness) "Skewness",
if (shapiro) "Shapiro p-value",
if (kolmogorov) "Kolmogorov p-value",
if (anderson) "Anderson p-value",
if (lilliefors) "Lilliefors p-value",
if (jarque) "Jarque p-value"
)
desc_df <- as.data.frame(t(desc))
table1 <- DT::datatable(
desc_df,
extensions = "Buttons",
caption = paste(Dname, "Dataset Descriptive Statistics Summary With",
ncol(desc_df), "Features and",
nrow(desc_df), "Statistics"),
options = list(
dom = 'Blfrtip',
buttons = c('copy', 'csv', 'excel', 'pdf'),
paging = TRUE,
searching = FALSE,
ordering = TRUE,
scrollX = TRUE
),
style = 'default',
class = 'table-striped table-bordered'
)
return (table1)
}
skewness <- function(x) {
n <- length(x)
mean_val <- mean(x,na.rm=T)
std_dev <- sqrt(sum((x - mean_val)^2) / (n))
Z <- (x-mean_val)/std_dev
skewness <- sum(Z^3)/n
return(skewness)
}
kurtosis <- function(x) {
n <- length(x)
mean_val <- mean(x,na.rm=T)
std_dev <- sqrt(sum((x - mean_val)^2) / (n))
Z <- (x-mean_val)/std_dev
kurtosis <- sum(Z^4)/n
return(kurtosis)
}
jarque.test <- function(x) {
n <- length(x)
skew <- skewness(x)
kurt <- kurtosis(x)
test_stat <- n/6 * (skew^2 + (kurt - 3)^2 / 4)
p_value <- 1 - pchisq(test_stat, df = 2)
return(p_value)
}
ad.test <- function(x) {
x <- sort(x[complete.cases(x)])
n <- length(x)
if (n < 5) warning("asymptotic distribution can be used for n greater than 5 for practical purposes")
logp1 <- pnorm((x - mean(x)) / sd(x), log.p = TRUE)
logp2 <- pnorm(-(x - mean(x)) / sd(x), log.p = TRUE)
h <- (2 * seq_len(n) - 1) * (logp1 + rev(logp2))
A <- -n - mean(h)
AA <- (1 + 0.75/n + 2.25/n^2) * A
p_value <- if (AA < 0.2) {
1 - exp(-13.436 + 101.14 * AA - 223.73 * AA^2)
} else if (AA < 0.34) {
1 - exp(-8.318 + 42.796 * AA - 59.938 * AA^2)
} else if (AA < 0.6) {
exp(0.9177 - 4.279 * AA - 1.38 * AA^2)
} else if (AA < 10) {
exp(1.2937 - 5.709 * AA + 0.0186 * AA^2)
} else {
3.7e-24
}
return(p_value)
}
lillie.test <- function(x) {
x <- sort(x[complete.cases(x)])
n <- length(x)
if (n < 5) warning("asymptotic distribution can be used for n greater than 5 for practical purposes")
p <- pnorm((x - mean(x))/sd(x))
Dplus <- max(seq(1:n)/n - p)
Dminus <- max(p - (seq(1:n) - 1)/n)
K <- max(Dplus, Dminus)
if (n <= 100) {
Kd <- K
nd <- n
} else {
Kd <- K * ((n/100)^0.49)
nd <- 100
}
p_value <- exp(-7.01256 * Kd^2 * (nd + 2.78019) + 2.99587 *
Kd * sqrt(nd + 2.78019) - 0.122119 + 0.974598/sqrt(nd) +
1.67997/nd)
if (p_value > 0.1) {
KK <- (sqrt(n) - 0.01 + 0.85/sqrt(n)) * K
if (KK <= 0.302) {
p_value <- 1
} else if (KK <= 0.5) {
p_value <- 2.76773 - 19.828315 * KK + 80.709644 *
KK^2 - 138.55152 * KK^3 + 81.218052 * KK^4
} else if (KK <= 0.9) {
p_value <- -4.901232 + 40.662806 * KK - 97.490286 *
KK^2 + 94.029866 * KK^3 - 32.355711 * KK^4
} else if (KK <= 1.31) {
p_value <- 6.198765 - 19.558097 * KK + 23.186922 *
KK^2 - 12.234627 * KK^3 + 2.423045 * KK^4
} else {
p_value <- 0
}
}
return(p_value)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/desc_stat.R |
#' @title Histogram Plot for Numerical Variables
#' @description
#' This function generates histogram plots for all numerical variables in the input data frame.
#' It offers a vivid and effective visual summary of the distribution of each numerical variable,
#' helping in a quick understanding of their central tendency, spread, and shape.
#'
#' @param data The input data frame containing numerical variables.
#' @param fill The fill color for the histogram bars (default: "skyblue").
#' @param color The border color for the histogram bars (default: "black").
#' @param alpha The alpha (transparency) value for the histogram bars (default: 0.7).
#' @param subplot A logical argument (default: FALSE) indicating whether to create subplots for each variable.
#' @param nrow Number of rows for subplots (used when subplot is TRUE, default: 2).
#' @param margin Margin for subplots (used when subplot is TRUE, default: 0.1).
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of histogram plot.
#'
#' @examples
#' hist_plot(data = mtcars, fill = "skyblue", color = "black", alpha = 0.7, subplot = FALSE)
#' @importFrom htmltools tagList
#' @import ggplot2
#' @import stats
#' @import magrittr
#' @importFrom plotly ggplotly
#' @importFrom plotly style
#' @importFrom plotly layout
#' @export
hist_plot <- function(data, fill = "skyblue", color = "black", alpha = 0.7, subplot = FALSE, nrow = 2, margin = 0.1,html=FALSE) {
numerical <- names(Filter(is.numeric, data))
if (length(numerical) == 0) stop("There is no numerical variable in the dataset")
gg_list <- lapply(numerical, function(var) {
binwidth <- density(data[[var]], bw = "SJ")$bw
plot <- ggplot2::ggplot(data, aes(x = !!sym(var))) +
geom_histogram(binwidth = binwidth, fill = fill, color = color, alpha = alpha) +
labs(title = paste(var, "Distribution"), y = "Frequency") +
theme_minimal()
ggplotly(plot, tooltip = "all", dynamicTicks = TRUE) %>% style(hoverinfo = "text")
})
if (html) gg_list <- do.call(htmltools::tagList, gg_list)
if (subplot) {
fig <- plotly::subplot(gg_list, nrows = nrow, titleX = TRUE, titleY = FALSE, margin = margin) %>%
style(hoverinfo = "text") %>%
layout(title = list(text = "Distribution Histgram"))
} else {
fig <- gg_list
}
return(fig)
}
#' @title Numerical Variables Density Plots
#' @description
#' This function generates density plots for all numerical variables in the input data frame.
#' It offers a vivid and effective visual summary of the distribution of each numerical variable,
#' helping in a quick understanding of their central tendency, spread, and shape.
#'
#' @param data The input data frame containing numerical variables.
#' @param fill The fill color of the density plot (default: "skyblue").
#' @param color The line color of the density plot (default: "black").
#' @param alpha The transparency of the density plot (default: 0.7).
#' @param subplot A logical argument (default: FALSE) indicating whether to create subplots.
#' @param nrow Number of rows for subplots (if subplot is TRUE, default: 2).
#' @param margin Margin for subplots (if subplot is TRUE, default: 0.1).
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of density plots.
#'
#' @examples
#' data(mtcars)
#' dens_plot(mtcars)
#'
#' @import ggplot2
#' @importFrom htmltools tagList
#' @import stats
#' @importFrom plotly ggplotly
#' @importFrom plotly style
#' @importFrom plotly layout
#' @export
dens_plot <- function(data, fill = "skyblue", color = "black", alpha = 0.7, subplot = FALSE, nrow = 2, margin = 0.1,html=FALSE) {
numerical <- names(Filter(is.numeric, data))
if (length(numerical) == 0) stop("There is no numerical variable in the dataset")
gg_list <- lapply(numerical, function(var){
plot <- ggplot(data, aes_string(x = var)) +
geom_density(fill = fill, color = color, alpha = alpha) +
labs(title = paste(var, "Density Plot"), y = "Density") +
theme_minimal()
ggplotly(plot, tooltip = "all", dynamicTicks = TRUE) %>%
plotly::style(hoverinfo = "text")
})
if (html) gg_list <- do.call(htmltools::tagList, gg_list)
if (subplot) {
fig <- plotly::subplot(gg_list, nrows = nrow, titleX = TRUE, titleY = FALSE, margin = margin) %>%
plotly::style(hoverinfo = "text") %>%
plotly::layout(title = list(text = "Density Plots"))
} else {
fig <- gg_list
}
return(fig)
}
#' @title QQ Plots for Numerical Variables
#' @description
#' This function generates QQ plots for all numerical variables in the input data frame.
#' QQ plots are valuable for assessing the distributional similarity between observed data
#' and a theoretical normal distribution. It acts as a guide, revealing deviations from the
#' expected norm, outliers, and the contours of distribution tails.
#'
#' @param data The input data frame containing numerical variables.
#' @param color The color of the QQ plot line (default: "skyblue").
#' @param subplot A logical argument (default: FALSE) indicating whether to create subplots.
#' @param nrow Number of rows for subplots (if subplot is TRUE, default: 2).
#' @param margin Margin for subplots (if subplot is TRUE, default: 0.1).
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of QQ plots.
#'
#' @examples
#' data(mtcars)
#' qq_plot(mtcars)
#'
#' @import ggplot2
#' @import htmltools
#' @import stats
#' @importFrom plotly ggplotly
#' @importFrom plotly style
#' @importFrom plotly layout
#' @export
qq_plot <- function(data, color = "skyblue", subplot = FALSE, nrow = 2, margin = 0.1,html=FALSE) {
numerical <- names(Filter(is.numeric, data))
if (length(numerical) == 0) stop("There is no numerical variable in the dataset")
gg_list <- lapply(numerical, function(var) {
plot <- ggplot(data, aes(sample = !!sym(var))) +
geom_qq() +
geom_qq_line(color = color) +
labs(title = paste(var, "QQ Plot"), subtitle = "Normal QQ Plot") +
theme_minimal()
ggplotly(plot, tooltip = "all", dynamicTicks = TRUE) %>%
plotly::style(hoverinfo = "text") %>%
plotly::layout(title = list(text = paste(var, "QQ Plot")))
})
if (html) gg_list <- do.call(htmltools::tagList, gg_list)
if (subplot) {
fig <- plotly::subplot(gg_list, nrows = nrow, titleX = TRUE, titleY = FALSE, margin = margin) %>%
plotly::style(hoverinfo = "text") %>%
plotly::layout(title = list(text = ""))
} else {
fig <- gg_list
}
return(fig)
}
#' @title Bar Plots for Categorical Variables
#' @description
#' This function generates bar plots for all categorical variables in the input data frame.
#' Bar plots offer a visual representation of the distribution of categorical variables,
#' making it easy to understand the frequency of each category. They are particularly
#' useful for exploring patterns, identifying dominant categories, and comparing the relative
#' frequencies of different levels within each variable.
#'
#' @param data The input data frame containing categorical variables.
#' @param fill Fill color for the bars (default: "skyblue").
#' @param color Border color of the bars (default: "black").
#' @param width Width of the bars (default: 0.7).
#' @param subplot A logical argument (default: FALSE) indicating whether to create subplots.
#' @param nrow Number of rows for subplots (if subplot is TRUE, default: 2).
#' @param margin Margin for subplots (if subplot is TRUE, default: 0.1).
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of bar plots.
#'
#' @examples
#' data(iris)
#' bar_plot(iris)
#'
#' @import ggplot2
#' @import htmltools
#' @import stats
#' @importFrom plotly ggplotly
#' @importFrom plotly style
#' @importFrom plotly layout
#' @export
bar_plot <- function(data, fill = "skyblue", color = "black", width = 0.7, subplot = FALSE, nrow = 2, margin = 0.1,html=FALSE) {
categories = frequencies = NULL
categorical <- names(Filter(function(x) is.factor(x) || is.character(x), data))
if (length(categorical) == 0) stop("There is no categorical variable in the dataset")
gg_list <- lapply(categorical, function(cat) {
table_data <- table(data[[cat]])
bar_df <- data.frame(categories = names(table_data), frequencies = as.numeric(table_data))
plot <- ggplot2::ggplot(bar_df, aes(x = frequencies, y = reorder(categories, frequencies))) +
geom_bar(stat = "identity", color = color, width = width, fill = fill) +
labs(title = paste(cat, "Bar Plot"), x = "Frequency", y = cat) +
theme_minimal()
ggplotly(plot) %>%
plotly::style(hoverinfo = "text") %>%
plotly::layout(title = list(text = paste(cat, "Bar Plot")))
})
if (html) gg_list <- do.call(htmltools::tagList, gg_list)
if (subplot) {
fig <- plotly::subplot(gg_list, nrows = nrow, titleX = TRUE, titleY = FALSE, margin = margin) %>%
plotly::style(hoverinfo = "text") %>%
plotly::layout(title = list(text = ""))
return(fig)
} else {
return(gg_list)
}
}
#' @title Pie Plots for Categorical Variables
#' @description
#' This function generates pie charts for categorical variables in the input data frame using plotly.
#' Pie plots offer a visual representation of the distribution of categorical variables,
#' making it easy to understand the frequency of each category. They are particularly
#' useful for exploring patterns, identifying dominant categories, and comparing the relative
#' frequencies of different levels within each variable.
#'
#' @param data The input data frame containing categorical variables.
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of pie charts.
#'
#' @examples
#' data(iris)
#' pie_plot(iris)
#' @import htmltools
#' @import stats
#' @importFrom plotly plot_ly
#' @importFrom plotly layout
#' @export
pie_plot <- function(data,html=FALSE) {
categorical <- names(Filter(function(x) is.factor(x) || is.character(x), data))
if (length(categorical) == 0) stop("There is no categorical variable in the dataset")
gg_list <- lapply(categorical, function(cat) {
table_data <- table(data[[cat]])
pie_df <- data.frame(categories = names(table_data), frequencies = as.numeric(table_data))
plot_ly(pie_df, labels = ~categories, values = ~frequencies, type = 'pie',
marker = list(line = list(color = '#FFFFFF', width = 1)),
textinfo = 'label+percent') %>%
layout(title = paste(cat, "Pie Chart"))
})
if (html) gg_list <- do.call(htmltools::tagList,gg_list)
return(gg_list)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/dist_plot.R |
#' @title Missing Value Imputation
#'
#' @description
#' This function performs missing value imputation in the input data using various methods.
#' The available imputation methods are:
#'
#' - "mean": Imputes missing values with the mean of the variable.
#' - "median": Imputes missing values with the median of the variable.
#' - "mode": Imputes missing values with the mode of the variable (for categorical data).
#' - "locf": Imputes missing values using the Last Observation Carried Forward method.
#' - "knn": Imputes missing values using the k-Nearest Neighbors algorithm (specify k).
#'
#'
#' @param data Input data.
#' @param method Method of handling missing values: "mean," "median," "mode," "locf," or "knn."
#' @param k Value of the number of neighbors to be checked (only for knn method). Default is NULL.
#'
#' @return a data frame with imputed missing values
#' @export
#' @importFrom caret preProcess
#' @import RANN
#' @examples
#' data(airquality)
#' impute_missing(airquality, method='mean')
#'
impute_missing <- function(data, method = "mean", k = NULL) {
imputed_data <- data
if (is.null(k)) {
imputed_data <- lapply(imputed_data, function(col) {
if (method == "mean") {
col[is.na(col)] <- mean(col, na.rm = TRUE)
} else if (method == "median") {
col[is.na(col)] <- median(col, na.rm = TRUE)
} else if (method == "mode") {
col[is.na(col)] <- impute_mode(col)
} else if (method == "locf") {
col <- impute_locf(col)
}
return(col)
})
} else if (method == "knn" && !is.null(k)) {
imputed_data <- impute_knn(imputed_data, k)
} else {
stop("Invalid imputation method. Supported methods are: mean, median, mode, locf, knn")
}
return(as.data.frame(imputed_data))
}
impute_locf <- function(x) {
imputed_values <- x
lo <- NA
for (i in 1:length(x)) {
if (is.na(x[i])) {
if (!is.na(lo)) {
imputed_values[i] <- lo
}
} else {
lo <- x[i]
}
}
return(imputed_values)
}
impute_mode <- function(x) {
tbl <- table(x)
modes <- tbl[tbl == max(tbl)]
mode_values <- as.numeric(names(modes))
if (length(mode_values) != sum(is.na(x))) {
mode_values <- rep(mode_values, length.out = sum(is.na(x)))
}
return(mode_values)
}
impute_knn <- function(data, k) {
imputed_values <- caret::preProcess(data, method = 'knnImpute', k = k)
imputed_data <- predict(imputed_values, data)
procNames <- data.frame(col = names(imputed_values$mean), mean = imputed_values$mean, sd = imputed_values$std)
for (i in procNames$col) {
imputed_data[i] <- imputed_data[i] * imputed_values$std[i] + imputed_values$mean[i]
}
return(imputed_data)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/missing_impute.R |
#' @title Missing Values Plot
#' @description
#' This function generates plots to visualize missing values in a data frame. It includes two types of plots:
#' - A percentage plot: Displays the percentage of missing values for each variable, allowing quick identification
#' of variables with high missingness.
#' - A row plot: Illustrates the distribution of missing values across rows, providing insights into patterns of missingness.
#'
#' @param df The input data frame.
#' @param percentage A logical argument (default: TRUE) to generate a percentage plot.
#' @param row A logical argument (default: TRUE) to generate a row plot.
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return A list of plots, including a percentage plot and/or a row plot.
#'
#' @examples
#' \donttest{
#' data("airquality")
#' missing_values_plot(df = airquality, percentage = TRUE, row = TRUE)
#' }
#' @importFrom htmltools tagList
#' @import ggplot2
#' @importFrom plotly ggplotly
#' @importFrom grDevices colors
#' @export
missing_values_plot <- function(df, percentage = TRUE, row = TRUE, html=FALSE) {
variable = missing_percentage = key = id = isna = NULL
if (all(!is.na(df))) {
return("Data contains no missing values.")
}
if (percentage) {
missing_df <- data.frame(
variable = names(df),
missing_percentage = colMeans(is.na(df)) * 100
)
percentage_plot <- ggplot(missing_df, aes(x = reorder(variable, -missing_percentage), y = missing_percentage)) +
geom_bar(stat = "identity", alpha = 0.8, fill = "tomato") +
labs(title = "Percentage of Missing Values in Each Variable", x = "Variable", y = "Missing Percentage") +
coord_flip() +
theme_minimal()
}
if (row) {
long_data <- data.frame(
id = rep(seq_len(nrow(df)), time = ncol(df)),
key = rep(colnames(df), each = nrow(df)),
val = unlist(df),
isna = ifelse(is.na(unlist(df)), "Missing", "Present")
)
row_plot <- ggplot(long_data, aes(x = as.factor(key), y = id, fill = isna)) +
geom_raster(alpha = 0.8) +
scale_fill_manual(name = "", values = c('tomato', 'steelblue')) +
labs(x = "Variable", y = "Row Number", title = "Missing values in rows") +
coord_flip() +
theme_minimal()
}
if (percentage && row) {
plot_list <- list(ggplotly(percentage_plot), ggplotly(row_plot))
result <-plot_list
} else if (percentage) {
result <- ggplotly(percentage_plot)
} else if (row) {
result <- ggplotly(row_plot)
}
if (html) result <- htmltools::tagList(result)
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/missing_plot.R |
#' @title Principal Component Analysis (PCA)
#'
#' @description
#' This function performs Principal Component Analysis (PCA) on the input data, providing
#' a detailed analysis of variance, eigenvalues, and eigenvectors. It offers options to generate
#' a scree plot for visualizing variance explained by each principal component and a biplot to
#' understand the relationship between variables and observations in reduced dimensions.
#'
#'
#' @param data Numeric matrix or data frame containing the variables for PCA.
#' @param variance_threshold Proportion of total variance to retain (default: 0.90).
#' @param center Logical, indicating whether to center the data (default: TRUE).
#' @param scale Logical, indicating whether to scale the data (default: FALSE).
#' @param scree_plot Logical, whether to generate a scree plot (default: FALSE).
#' @param biplot Logical, whether to generate a biplot (default: FALSE).
#' @param choices Numeric vector of length 2, indicating the principal components to plot in the biplot.
#' @param groups Optional grouping variable for coloring points in the biplot.
#' @param length_scale Scaling factor for adjusting the length of vectors in the biplot (default: 1).
#' @param scree_legend Logical, indicating whether to show legend in scree plot (default: True).
#' @param scree_legend_pos A vector c(x, y) to adjust the position of the legend.
#' @param html Whether the output should be in HTML format,used when knitting into HTML. Default is FALSE.
#' @return
#' A list containing:
#' - summary_table: A matrix summarizing eigenvalues and cumulative variance explained.
#' - scree_plot: A scree plot if scree_plot is TRUE.
#' - biplot: A biplot if biplot is TRUE.
#'
#' @examples
#' data(mtcars)
#' pca_result <- pca(mtcars, scree_plot = TRUE, biplot = TRUE)
#' pca_result$summary_table
#' pca_result$scree_plot
#' pca_result$biplot
#'
#' @import ggplot2
#' @importFrom htmltools tagList
#' @importFrom DT datatable
#' @importFrom plotly ggplotly
#' @importFrom plotly add_text
#' @importFrom plotly add_trace
#' @export
pca <- function(data,
variance_threshold = 0.90,
center = TRUE,
scale = FALSE,
scree_plot = FALSE,
biplot = FALSE,
choices = 1:2,
groups = NULL,
length_scale = 1,
scree_legend = TRUE,
scree_legend_pos = c(0.7,0.5),
html = FALSE) {
x = y = cum_y = cum_label = xvar = yvar = varname = angle = hjust = NULL
pca_result <- prcomp(data, center = center, scale. = scale)
eigenvalue <- (pca_result$sdev)^2
eigenvector <- pca_result$rotation
score <- pca_result$x
proportion_variance_explained <- (eigenvalue) / sum(eigenvalue)
num_components <- min(which(cumsum(proportion_variance_explained) >= variance_threshold))
selected_eigenvalue <- eigenvalue[1:num_components]
selected_eigenvector <- eigenvector[, 1:num_components]
selected_score <- score[, 1:num_components]
if (html) result <- htmltools::tagList() else result <- list()
summary_table <- matrix(
c(selected_eigenvalue,
selected_eigenvalue / sum(eigenvalue) * 100,
cumsum(selected_eigenvalue / sum(eigenvalue)) * 100),
nrow = length(selected_eigenvalue),
byrow = FALSE
)
rownames(summary_table) <- paste("PC", 1:nrow(summary_table))
colnames(summary_table) <- c("Eigenvalue", "Variance Explained(%)", "Cumulative Variance Explained(%)")
table1<-datatable(
summary_table,
extensions = "Buttons",
caption = "Summary Table",
options = list(
dom = 'Blfrtip',
buttons = c('copy', 'csv', 'excel', 'pdf'),
paging = FALSE,
searching = FALSE,
ordering = FALSE
),
style = 'default',
class = 'table-striped table-bordered'
)
result$summary_table <- table1
if (scree_plot) {
data <- data.frame(
x = seq_along(eigenvalue),
y = eigenvalue / sum(eigenvalue) * 100,
cum_y = cumsum(eigenvalue / sum(eigenvalue) * 100),
label = round(eigenvalue / sum(eigenvalue) * 100, 2),
cum_label = round(cumsum(eigenvalue / sum(eigenvalue) * 100), 2)
)
scree_plot <- plot_ly(data, x = ~x) %>%
add_trace(
type = "bar",
y = ~y,
name = "Individual Explained Variance",
marker = list(color = "steelblue", line = list(color = "black", width = 1)),
showlegend = scree_legend
) %>%
add_trace(
type = "scatter",
mode = "markers",
y = ~y,
marker = list(size = 6, color = "black", symbol = "circle"),
name = "",
showlegend = scree_legend
) %>%
add_trace(
type = "scatter",
mode = "lines",
y = ~cum_y,
name = "Cumulative Explained Variance",
line = list(color = "tomato"),
showlegend = scree_legend
) %>%
add_trace(
type = "scatter",
mode = "markers",
y = ~cum_y,
marker = list(size = 6, color = "black", symbol = "square"),
name = "",
showlegend = scree_legend
) %>%
layout(
title = "Scree Plot",
xaxis = list(title = "PC"),
yaxis = list(title = "Variance Explained (%)"),
legend = list(
x = scree_legend_pos[[1]],
y = scree_legend_pos[[2]],
xanchor = "center",
yanchor = "top"
)
)
result$scree_plot <- scree_plot
}
if (biplot) {
if (length(choices) != 2) {
stop("Pick two Principle components")
}
scaling.factor <- sqrt(nrow(score) - 1)
u <- sweep(score, 2, 1 / (sqrt(eigenvalue) * scaling.factor), FUN = '*')
choices <- pmin(choices, ncol(u))
df.u <- as.data.frame(sweep(u[, choices], 2, sqrt(eigenvalue[choices]), FUN = '*'))
v <- sweep(eigenvector, 2, sqrt(eigenvalue), FUN = '*')
df.v <- as.data.frame(v[, choices])
names(df.u) <- c('xvar', 'yvar')
names(df.v) <- names(df.u)
df.u <- df.u * scaling.factor * length_scale
if (!is.null(groups)) {
df.u$groups <- groups
}
df.v$varname <- rownames(v)
df.v$angle <- with(df.v, (180/pi) * atan(yvar / xvar))
df.v$hjust <- with(df.v, (1 - 1.2 * sign(xvar)) / 2)
g <- ggplot(data = df.u, aes(x = xvar, y = yvar)) +
xlab(paste('PC', choices[1], ' (', sprintf('%0.1f%% explained var.)', 100 * eigenvalue[choices[1]]/sum(eigenvalue)), ')')) +
ylab(paste('PC', choices[2], ' (', sprintf('%0.1f%% explained var.)', 100 * eigenvalue[choices[2]]/sum(eigenvalue)), ')')) +
coord_equal()
g <- g +
geom_segment(data = df.v,
aes(x = 0, y = 0, xend = xvar, yend = yvar),
arrow = arrow(length = unit(1/2, 'picas')),
color = 'red')
if (!is.null(df.u$groups)) {
g <- g + geom_point(aes(color = groups), alpha = 1)
} else {
g <- g + geom_point(alpha = 1)
}
g <- g +
geom_text(data = df.v,
aes(label = varname, x = xvar, y = yvar,
angle = angle, hjust = hjust),
color = 'darkred', size = 3)+
theme_minimal()+
geom_hline(yintercept = 0, linetype = "solid", color = "black", size = 0.2) +
geom_vline(xintercept = 0, linetype = "solid", color = "black", size = 0.2)
result$biplot <-ggplotly(g)%>%
plotly::layout(title ="Biplot")
}
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/R/pca.R |
## ----echo=FALSE---------------------------------------------------------------
library(knitr)
library(AnalysisLin)
## -----------------------------------------------------------------------------
df <- data.frame(
Descriptive_Statistics = c("desc_stat()","","","","",""),
Data_Visualization = c("hist_plot()","dens_plot()", "bar_plot()","pie_plot()","qq_plot()","missing_value_plot()"),
Correlation_Analysis = c("corr_matrix()", "corr_cluster()","","","",""),
Feature_Engineering = c("missing_impute()", "pca()","","","","")
)
kable(df)
## -----------------------------------------------------------------------------
data("iris")
data("mtcars")
data("Titanic")
data("airquality")
## ----eval=FALSE---------------------------------------------------------------
# desc_stat(mtcars)
## ----eval=FALSE---------------------------------------------------------------
# desc_stat(iris)
## ----eval=FALSE---------------------------------------------------------------
# desc_stat(airquality)
## ----eval=FALSE---------------------------------------------------------------
# desc_stat(mtcars,max = F, min=F, sd=F,kurtosis = T,skewness = T,shapiro = T,anderson = T,lilliefors = T, jarque = T)
## ----eval=FALSE---------------------------------------------------------------
# hist_plot(iris,subplot=F)
## ----eval=FALSE---------------------------------------------------------------
# dens_plot(iris,subplot=T,nrow=2)
## ----eval=FALSE---------------------------------------------------------------
# qq_plot(iris,subplot = T)
## ----eval=FALSE---------------------------------------------------------------
# bar_plot(iris)
## ----eval=FALSE---------------------------------------------------------------
# pie_plot(iris)
## ----eval=FALSE---------------------------------------------------------------
# corr_matrix(mtcars)
## ----eval=FALSE---------------------------------------------------------------
# corr_matrix(mtcars,corr_plot=T)
## ----eval=FALSE---------------------------------------------------------------
# corr_matrix(mtcars,type='pearson')
# corr_matrix(mtcars,type='spearman')
## ----eval=FALSE---------------------------------------------------------------
# corr_cluster(mtcars,type='pearson')
## ----eval=FALSE---------------------------------------------------------------
# corr_cluster(mtcars, type='spearman')
## ----eval=FALSE---------------------------------------------------------------
# missing_values_plot(airquality)
## ----results='hide'-----------------------------------------------------------
impute_missing(airquality,method='mean')
## ----results='hide'-----------------------------------------------------------
impute_missing(airquality,method='mode')
impute_missing(airquality,method='median')
impute_missing(airquality,method='locf')
impute_missing(airquality,method='knn',k=5)
## ----eval=FALSE---------------------------------------------------------------
# pca(mtcars,variance_threshold = 0.9,scale=T)
## ----eval=FALSE---------------------------------------------------------------
# pca(mtcars,variance_threshold = 0.9,scale=TRUE,scree_plot=TRUE,biplot=TRUE)
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/inst/doc/AnalysisLin-vignette.R |
---
title: "AnalysisLin-vignette"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{AnalysisLin-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
- [**Introduction**](#introduction)
- [**Descriptive Statistic**](#descriptive-statistic)
- [**Data Visualization**](#data-visualization)
- [Numeric Plot](#numeric-plot)
- [Categorical Plot](#categorical-plot)
- [**Correlation Analysis**](#correlation-analysis)
- [Correlation Matrix](#correlation-matrix)
- [Correlation Clustering](#correlation-clustering)
- [**Feature Engineering**](#feature-engineering)
- [Missing Value Imputation](#missing-value-imputation)
- [Principle Component Analysis](#principle-component-analysis)
# Introduction
Hi everyone, this page is to introduce the package AnalysisLin, which is my personal package for exploratory data analysis. It includes several useful functions designed to assist with exploratory data analysis (EDA). These functions are based on my learnings throughout my academic years, and I personally use them for EDA.
Table below summarize the functions that would be going over in this page
```{r,echo=FALSE}
library(knitr)
library(AnalysisLin)
```
```{r}
df <- data.frame(
Descriptive_Statistics = c("desc_stat()","","","","",""),
Data_Visualization = c("hist_plot()","dens_plot()", "bar_plot()","pie_plot()","qq_plot()","missing_value_plot()"),
Correlation_Analysis = c("corr_matrix()", "corr_cluster()","","","",""),
Feature_Engineering = c("missing_impute()", "pca()","","","","")
)
kable(df)
```
Some famous and very useful pre-installed datasets, such as iris, mtcars, and airquality, would be used to demonstrate what does each function in the package do. If you have not installed the package, please do the following:
install.packages("AnalysisLin")
```{r}
data("iris")
data("mtcars")
data("Titanic")
data("airquality")
```
Exploratory Data Analysis, in simple words, is the process to get to know your data.
## Descriptive Statistic
First function in package is desc_stat. This function computes numerous useful statistical metrics so that you gain a profound understanding of your data
- **Count**: Number of values in a variable.
- **Unique**: Number of values that are unique in a variable.
- **Duplicate**: Number of rows that are duplicate in a dataset.
- **Null**: Number of values that are missing in a variable.
- **Null Rate**: Percentage of values that are missing in a variable.
- **Type**: Type of variable (e.g., numeric, character, factor).
- **Min**: Smallest value.
- **P25**: Median of the first half.
- **Mean**: Mean value.
- **Median**: Median value.
- **P75**: Median of the second half.
- **Max**: Largest value.
- **SD**: Standard deviation.
- **Kurtosis**: A measure of the tailedness of a distribution.
- **Skewness**: A measure of the asymmetry of a distribution.
- **Shapiro-Wilk Test**: Checks if a sample follows a normal distribution by comparing its statistics to expected values under the assumption of normality.
- **Kolmogorov-Smirnov Test**:Checks if a sample follows a normal distribution by comparing its cumulative distribution function to the expected normal distribution.
- **Anderson-Darling Test**:Assesses normality by emphasizing tail behavior, determining if a sample conforms to a specified distribution.
- **Lilliefors Test**:A variant of Kolmogorov-Smirnov, is tailored for small sample sizes, testing whether data is normally distributed.
- **Jarque-Bera Test P-value**: Checks whether data have the skewness and kurtosis matching a normal distribution.
```{r,eval=FALSE}
desc_stat(mtcars)
```
```{r,eval=FALSE}
desc_stat(iris)
```
```{r,eval=FALSE}
desc_stat(airquality)
```
These metrics provide valuable insights into the dataset in a deep level. If you don't want any of these metrics to be computed, you can set them to `FALSE`. This way, the unwanted metrics won't appear in the output.
Furthermore, desc_stat() can also compute Kurtosis, Skewness, Shapiro-Wilk Test, Anderson-Darling Test, Lilliefors Test,Jarque-Bera Test
```{r,eval=FALSE}
desc_stat(mtcars,max = F, min=F, sd=F,kurtosis = T,skewness = T,shapiro = T,anderson = T,lilliefors = T, jarque = T)
```
## Data Visualization
### Numeric Plot
To visualize histogram for all numerical variables
```{r,eval=FALSE}
hist_plot(iris,subplot=F)
```
To visualize desnity for all numerical variables in two rows of subplots
```{r,eval=FALSE}
dens_plot(iris,subplot=T,nrow=2)
```
A Quantile-Quantile (QQ) plot is a graphical tool used to assess whether a dataset follows a normal distribution. It compares the quantiles of the observed data to the quantiles of the expected distribution.
if you want to check the normality for numerical variables by drawing QQ plot.
```{r,eval=FALSE}
qq_plot(iris,subplot = T)
```
### Categorical Plot
To visualize bar charts for all categorical variables
```{r,eval=FALSE}
bar_plot(iris)
```
if you want pie chart:
```{r,eval=FALSE}
pie_plot(iris)
```
## Correlation Analysis
### Correlation Matrix
To visualize correlation table for all variables.
```{r,eval=FALSE}
corr_matrix(mtcars)
```
if you want to visualize correlation map along with correlation table:
```{r,eval=FALSE}
corr_matrix(mtcars,corr_plot=T)
```
you may also choose type of correlation:Pearson correlation and Spearman correlation.
```{r,eval=FALSE}
corr_matrix(mtcars,type='pearson')
corr_matrix(mtcars,type='spearman')
```
### Correlation Clustering
Correlation clustering partitioning data points into groups based on their similarity(correlation)
```{r,eval=FALSE}
corr_cluster(mtcars,type='pearson')
```
```{r,eval=FALSE}
corr_cluster(mtcars, type='spearman')
```
## Feature_Engineering
### Missing Value Plot
To visualize the percentage of missing values in each variable.
```{r,eval=FALSE}
missing_values_plot(airquality)
```
### Missing Value Imputation
Imputing mssing value is a way to get more information from a data with missing values. However, one need to carefully choose what method to use to impute missing values in order to reach most accuracy.
- **mean**: use mean value to replace missing value.
```{r,results='hide'}
impute_missing(airquality,method='mean')
```
- **mode**: use most frequency value to replace missing value.
- **median**: use median value to replace missing value.
- **locf**: use last observation value to replace missing value.
- **knn**: use k-nearest nerighbor to replace missing value, k needs to be chosen.
```{r,results='hide'}
impute_missing(airquality,method='mode')
impute_missing(airquality,method='median')
impute_missing(airquality,method='locf')
impute_missing(airquality,method='knn',k=5)
```
### Principle Component Analysis
Principle Component Analysis can help you to reduce the number of variables in a dataset.
To perform and visualize PCA on some selected variables
```{r,eval=FALSE}
pca(mtcars,variance_threshold = 0.9,scale=T)
```
to visualize the scree plot and biplot
```{r,eval=FALSE}
pca(mtcars,variance_threshold = 0.9,scale=TRUE,scree_plot=TRUE,biplot=TRUE)
```
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/inst/doc/AnalysisLin-vignette.Rmd |
---
title: "AnalysisLin-vignette"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{AnalysisLin-vignette}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
- [**Introduction**](#introduction)
- [**Descriptive Statistic**](#descriptive-statistic)
- [**Data Visualization**](#data-visualization)
- [Numeric Plot](#numeric-plot)
- [Categorical Plot](#categorical-plot)
- [**Correlation Analysis**](#correlation-analysis)
- [Correlation Matrix](#correlation-matrix)
- [Correlation Clustering](#correlation-clustering)
- [**Feature Engineering**](#feature-engineering)
- [Missing Value Imputation](#missing-value-imputation)
- [Principle Component Analysis](#principle-component-analysis)
# Introduction
Hi everyone, this page is to introduce the package AnalysisLin, which is my personal package for exploratory data analysis. It includes several useful functions designed to assist with exploratory data analysis (EDA). These functions are based on my learnings throughout my academic years, and I personally use them for EDA.
Table below summarize the functions that would be going over in this page
```{r,echo=FALSE}
library(knitr)
library(AnalysisLin)
```
```{r}
df <- data.frame(
Descriptive_Statistics = c("desc_stat()","","","","",""),
Data_Visualization = c("hist_plot()","dens_plot()", "bar_plot()","pie_plot()","qq_plot()","missing_value_plot()"),
Correlation_Analysis = c("corr_matrix()", "corr_cluster()","","","",""),
Feature_Engineering = c("missing_impute()", "pca()","","","","")
)
kable(df)
```
Some famous and very useful pre-installed datasets, such as iris, mtcars, and airquality, would be used to demonstrate what does each function in the package do. If you have not installed the package, please do the following:
install.packages("AnalysisLin")
```{r}
data("iris")
data("mtcars")
data("Titanic")
data("airquality")
```
Exploratory Data Analysis, in simple words, is the process to get to know your data.
## Descriptive Statistic
First function in package is desc_stat. This function computes numerous useful statistical metrics so that you gain a profound understanding of your data
- **Count**: Number of values in a variable.
- **Unique**: Number of values that are unique in a variable.
- **Duplicate**: Number of rows that are duplicate in a dataset.
- **Null**: Number of values that are missing in a variable.
- **Null Rate**: Percentage of values that are missing in a variable.
- **Type**: Type of variable (e.g., numeric, character, factor).
- **Min**: Smallest value.
- **P25**: Median of the first half.
- **Mean**: Mean value.
- **Median**: Median value.
- **P75**: Median of the second half.
- **Max**: Largest value.
- **SD**: Standard deviation.
- **Kurtosis**: A measure of the tailedness of a distribution.
- **Skewness**: A measure of the asymmetry of a distribution.
- **Shapiro-Wilk Test**: Checks if a sample follows a normal distribution by comparing its statistics to expected values under the assumption of normality.
- **Kolmogorov-Smirnov Test**:Checks if a sample follows a normal distribution by comparing its cumulative distribution function to the expected normal distribution.
- **Anderson-Darling Test**:Assesses normality by emphasizing tail behavior, determining if a sample conforms to a specified distribution.
- **Lilliefors Test**:A variant of Kolmogorov-Smirnov, is tailored for small sample sizes, testing whether data is normally distributed.
- **Jarque-Bera Test P-value**: Checks whether data have the skewness and kurtosis matching a normal distribution.
```{r,eval=FALSE}
desc_stat(mtcars)
```
```{r,eval=FALSE}
desc_stat(iris)
```
```{r,eval=FALSE}
desc_stat(airquality)
```
These metrics provide valuable insights into the dataset in a deep level. If you don't want any of these metrics to be computed, you can set them to `FALSE`. This way, the unwanted metrics won't appear in the output.
Furthermore, desc_stat() can also compute Kurtosis, Skewness, Shapiro-Wilk Test, Anderson-Darling Test, Lilliefors Test,Jarque-Bera Test
```{r,eval=FALSE}
desc_stat(mtcars,max = F, min=F, sd=F,kurtosis = T,skewness = T,shapiro = T,anderson = T,lilliefors = T, jarque = T)
```
## Data Visualization
### Numeric Plot
To visualize histogram for all numerical variables
```{r,eval=FALSE}
hist_plot(iris,subplot=F)
```
To visualize desnity for all numerical variables in two rows of subplots
```{r,eval=FALSE}
dens_plot(iris,subplot=T,nrow=2)
```
A Quantile-Quantile (QQ) plot is a graphical tool used to assess whether a dataset follows a normal distribution. It compares the quantiles of the observed data to the quantiles of the expected distribution.
if you want to check the normality for numerical variables by drawing QQ plot.
```{r,eval=FALSE}
qq_plot(iris,subplot = T)
```
### Categorical Plot
To visualize bar charts for all categorical variables
```{r,eval=FALSE}
bar_plot(iris)
```
if you want pie chart:
```{r,eval=FALSE}
pie_plot(iris)
```
## Correlation Analysis
### Correlation Matrix
To visualize correlation table for all variables.
```{r,eval=FALSE}
corr_matrix(mtcars)
```
if you want to visualize correlation map along with correlation table:
```{r,eval=FALSE}
corr_matrix(mtcars,corr_plot=T)
```
you may also choose type of correlation:Pearson correlation and Spearman correlation.
```{r,eval=FALSE}
corr_matrix(mtcars,type='pearson')
corr_matrix(mtcars,type='spearman')
```
### Correlation Clustering
Correlation clustering partitioning data points into groups based on their similarity(correlation)
```{r,eval=FALSE}
corr_cluster(mtcars,type='pearson')
```
```{r,eval=FALSE}
corr_cluster(mtcars, type='spearman')
```
## Feature_Engineering
### Missing Value Plot
To visualize the percentage of missing values in each variable.
```{r,eval=FALSE}
missing_values_plot(airquality)
```
### Missing Value Imputation
Imputing mssing value is a way to get more information from a data with missing values. However, one need to carefully choose what method to use to impute missing values in order to reach most accuracy.
- **mean**: use mean value to replace missing value.
```{r,results='hide'}
impute_missing(airquality,method='mean')
```
- **mode**: use most frequency value to replace missing value.
- **median**: use median value to replace missing value.
- **locf**: use last observation value to replace missing value.
- **knn**: use k-nearest nerighbor to replace missing value, k needs to be chosen.
```{r,results='hide'}
impute_missing(airquality,method='mode')
impute_missing(airquality,method='median')
impute_missing(airquality,method='locf')
impute_missing(airquality,method='knn',k=5)
```
### Principle Component Analysis
Principle Component Analysis can help you to reduce the number of variables in a dataset.
To perform and visualize PCA on some selected variables
```{r,eval=FALSE}
pca(mtcars,variance_threshold = 0.9,scale=T)
```
to visualize the scree plot and biplot
```{r,eval=FALSE}
pca(mtcars,variance_threshold = 0.9,scale=TRUE,scree_plot=TRUE,biplot=TRUE)
```
| /scratch/gouwar.j/cran-all/cranData/AnalysisLin/vignettes/AnalysisLin-vignette.Rmd |
#' Maelstrom_Motif2TF
#'
#' create motif-factor links & export tables for printing motif score alongside its binding factor
#' @param seurat_object object
#' @param mot_mat motif_matrix, if not provided extracts one from the single cell object from the maelstrom assay
#' @param m2f_df motif to factor dataframe, if not provided extracts from the maelstrom directory
#' @param cluster_id ID used for finding clusters of cells
#' @param maelstrom_dir directory where the GimmeMotifs m2f table is stored
#' @param RNA_expression_assay Seurat assay containing factor expression info
#' @param RNA_expression_slot slot within assay used for calculating mean factor expression per cluster
#' @param expr_tresh minimum sum of gene counts over all cells in RNA_expression_assay to filter genes by
#' @param cor_tresh minimum value of to filter the cor() output by
#' @param cor_method specify one of the cor() methods
#' @param curated_motifs use only curated motifs (T), or all motifs in the database (F)
#' @param combine_motifs means (take mean multiple motifscores), max_var (take motif with highest variance), or max_cor (take motif with best correlation to gene expression)
#' @param return_df return both the seurat object and two dataframes with maelstrom scores and expression values as a list
#' @return seurat object with two assays added, MotifTFcor for TFs with positive correlation to the linked motif, and MotifTFanticor for TFs with positive correlation to the linked motif
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' maelstrom_dir_path <- system.file("extdata","maelstrom",package = 'AnanseSeurat')
#' sce_small <- Maelstrom_Motif2TF(sce_small, maelstrom_dir = maelstrom_dir_path)
#' @export
#' @importFrom rlang .data
Maelstrom_Motif2TF <- function(seurat_object,
mot_mat = NULL,
m2f_df = NULL,
cluster_id = 'seurat_clusters',
maelstrom_dir = './maelstrom/',
combine_motifs = 'means',
RNA_expression_assay = "RNA",
RNA_expression_slot = "data",
expr_tresh = 10,
cor_tresh = 0.30,
curated_motifs = FALSE,
cor_method = "pearson",
return_df = FALSE) {
## Check if m2f_df object provided or path to Maelstrom output.
## Check if m2f_df object provided contains the right columns.
m2f_df_cols <- c("Motif", "Factor")
if (is.null(m2f_df)) {
if (is.null(maelstrom_dir)) {
stop(
"Provide path for maelstrom_dir or provide table m2f_df with Motif and Factor column."
)
} else {
m2f_file <-
paste0(maelstrom_dir,
"/nonredundant.motifs.motif2factors.txt")
m2f_df <-
utils::read.table(
m2f_file,
header = T,
sep = '\t',
check.names = FALSE
)
}
} else if (!unique(m2f_df_cols %in% colnames(m2f_df))) {
stop("Provide m2f_df with at least 2 columns with names Motif and Factor.")
}
if ((combine_motifs != 'means') &
(combine_motifs != 'max_cor') &
(combine_motifs != 'max_var')) {
stop(
"use either 'max_var', 'max_cor' or 'mav_var' as a selection method to select the most relevant motifs per TF"
)
}
if (curated_motifs) {
message('using only curated motifs from database')
m2f_df <- m2f_df[m2f_df$Curated == 'Y', ]
}
## Load needed objects
if (is.null(mot_mat)) {
message(
paste0(
'loading maelstrom values from maelstrom assay using the cluster identifier ',
cluster_id
)
)
mot_mat <- per_cluster_df(seurat_object,
assay = 'maelstrom',
cluster_id = cluster_id)
mot_mat <- as.matrix(mot_mat)
}
## Set up Seurat object
message("non-expressed genes are removed")
Seurat::DefaultAssay(seurat_object) <- RNA_expression_assay
genes_expressed <-
rownames(seurat_object)[rowSums(as.matrix(seurat_object[[RNA_expression_assay]]@counts)) >= expr_tresh]
seurat_object[[RNA_expression_assay]] <-
Seurat::CreateAssayObject(counts = seurat_object[[RNA_expression_assay]]@data[genes_expressed, ])
## Select motifs with binding TFs present in object
m2f_df <- m2f_df[m2f_df$Factor %in% rownames(seurat_object), ]
## Check if data is normalized
if (identical(seurat_object[[RNA_expression_assay]]@data, seurat_object[[RNA_expression_assay]]@counts) &
RNA_expression_slot == "data") {
message("Your data slot was not yet normalized.")
message(
paste0(
"Seurat NormalizeData with default settings will be run on all the genes in the ",
RNA_expression_assay,
" assay."
)
)
seurat_object <-
Seurat::NormalizeData(seurat_object, assay = RNA_expression_assay)
}
## Obtain df with mean expression
exp_mat <-
Seurat::AggregateExpression(
seurat_object,
assays = RNA_expression_assay,
slot = RNA_expression_slot,
features = m2f_df$Factor,
group.by = cluster_id
)[[1]]
## make sure that all genes in matrix have mean expression > 0
exp_mat <- exp_mat[!rowSums(as.matrix(exp_mat)) <= 0, ]
## Select the same exp_mat columns as in mot_mat columns (if the grouping var is the same)
exp_mat <- exp_mat[, colnames(mot_mat)]
## limit table to motifs and TFs present in dataset
mot_mat <- mot_mat[rownames(mot_mat) %in% m2f_df$Motif, ]
TF_mat <- exp_mat[rownames(exp_mat) %in% m2f_df$Factor, ]
m2f_df_match <-
m2f_df[m2f_df$Motif %in% rownames(mot_mat) &
m2f_df$Factor %in% rownames(TF_mat), ]
## Creat unique motif-factor entries, losing curation information
m2f_df_match <-
unique(m2f_df_match[, !colnames(m2f_df_match) %in% c("Evidence", "Curated")])
## perform correlations between cluster expression and cluster motif enrichment
## calculate motif score variation over clusters
m2f_df_match$cor <- NA
m2f_df_match$var <- NA
for (i in 1:nrow(m2f_df_match)) {
m2f_df_match$cor[i] <-
stats::cor(mot_mat[m2f_df_match$Motif[i], ], exp_mat[m2f_df_match$Factor[i], ], method = cor_method)
m2f_df_match$var[i] <-
stats::var(mot_mat[m2f_df_match$Motif[i], ])
}
## Only keep motif-TF combinations with an absolute R higher than treshold
message(paste0("Only keep motif-TF combinations with an R > ", cor_tresh))
m2f_df_match <-
m2f_df_match[base::abs(m2f_df_match$cor) > cor_tresh, ]
## Select highest absolute correlation of TF and motif
m2f_df_unique <-
as.data.frame(
m2f_df_match %>% dplyr::group_by(m2f_df_match$Motif) %>%
dplyr::arrange(dplyr::desc(base::abs(m2f_df_match$cor))) %>% dplyr::filter(dplyr::row_number() == 1)
)
#Select only positive correlations or only negative correlations (repressors)
matrix_list <- list()
for (typeTF in c('MotifTFcor', 'MotifTFanticor')) {
m2f <- m2f_df_unique
if (typeTF == 'MotifTFanticor') {
message("Selecting anticorrelating TFs")
m2f <- m2f_df_unique[m2f_df_unique$cor < 0, ]
} else {
message("Selecting correlating TFs")
m2f <- m2f_df_unique[m2f_df_unique$cor > 0, ]
}
m2f$associated_motifs <- NA
for (tf in unique(m2f$Factor)) {
## Generate a string with all associated motifs and their correlation to the tf
motif_vector <- c()
for (motif in unique(m2f[m2f$Factor == tf, c("Motif")])) {
motif_cor <-
paste0(c(motif, unique(m2f[m2f$Factor == tf &
m2f$Motif == motif, "cor"])), collapse = ":")
motif_vector <-
paste0(c(motif_vector, motif_cor), collapse = "_")
}
m2f[m2f$Factor == tf, ]$associated_motifs <- motif_vector
}
## Order motifs according to m2f & replace with TF name
mot_plot <-
as.matrix(mot_mat[match(m2f$Motif, rownames(mot_mat)), ])
## Make motif score per TF (selecting most variable motif per TF or make mean of all motifs associated).
if (combine_motifs == 'means') {
rownames(mot_plot) <- m2f$Factor
message("Take mean motif score of all binding motifs per TF")
## Take mean of motifs linked to the same TF
mot_plot <-
stats::aggregate(mot_plot, list(row.names(mot_plot)), mean)
mot_plot <-
as.data.frame(mot_plot, row.names = mot_plot[, 1])[, -1]
m2f <-
as.data.frame(m2f[!duplicated(m2f$Factor), c("Factor", "associated_motifs"), drop = FALSE])
}
if (combine_motifs == 'max_cor') {
message("Motif best (absolute) correlated to expression is selected per TF")
## Using m2f file for selecting highest correlating motif to factor:
m2f <- m2f[order(base::abs(m2f[, "cor"]), decreasing = T),]
m2f <-
m2f[!duplicated(m2f$Factor), c("Factor", "Motif", "cor"), drop = FALSE]
mot_plot <- mot_plot[match(m2f$Motif, rownames(mot_plot)), ]
#mot_plot <- as.data.frame(mot_plot)
rownames(mot_plot) <- m2f$Factor
}
if (combine_motifs == 'max_var') {
message("Most variable binding motif is selected per TF")
## Using m2f file for selecting highest variable motif to factor:
m2f <- m2f[order(base::abs(m2f[, "var"]), decreasing = T),]
m2f <- m2f[!duplicated(m2f$Factor), ]
mot_plot <- mot_plot[match(m2f$Motif, rownames(mot_plot)), ]
rownames(mot_plot) <- m2f$Factor
}
## order expression matrix and motif matrix the same way
exp_plot <-
as.matrix(exp_mat[match(rownames(mot_plot), rownames(exp_mat)), ])
exp_plot_scale <- t(scale(t(exp_plot)))
mot_plot_scale <- t(scale(t(mot_plot)))
matrix_list[[paste0("expression_", typeTF)]] <- exp_plot
matrix_list[[paste0("motif_score_", typeTF)]] <- mot_plot
matrix_list[[paste0("scaled_expression_", typeTF)]] <- exp_plot_scale
matrix_list[[paste0("scaled_motif_score_", typeTF)]] <- mot_plot_scale
matrix_list[[paste0("tf2motif_selected_",typeTF, "_", combine_motifs)]] <- m2f
## Create seurat assay with binding factor assay
new_assay <-
as.data.frame(matrix(
data = NA,
ncol = length(colnames(seurat_object)),
nrow = length(rownames(mot_plot))
))
colnames(new_assay) <- colnames(seurat_object)
rownames(new_assay) <- rownames(mot_plot)
for (cluster in colnames(mot_plot)) {
cluster_cells <-
colnames(seurat_object[, [email protected][[cluster_id]] == cluster])
for (TF in rownames(new_assay)) {
new_assay[TF, cluster_cells] <- as.matrix(mot_plot[TF, cluster])
}
}
seurat_object[[typeTF]] <- Seurat::CreateAssayObject(new_assay)
## Adding meta.features with information about the motifs used in the matrix
m2f <-
as.data.frame(m2f[match(rownames(new_assay), m2f$Factor), ])
rownames(m2f) <- m2f$Factor
seurat_object[[typeTF]]@meta.features <-
m2f[, !colnames(m2f) == "Factor", drop = FALSE]
}
matrix_list[["seurat_object"]] <- seurat_object
if (return_df) {
return(list(seurat_object, matrix_list))
}
else{
return(seurat_object)
}
}
#' Factor_Motif_Plot
#'
#' plot both expression of a TF, and the motif accessibility of the associated motif. Finally, fetch the motif logo from the Maelstrom directory.
#' @param seurat_object seurat object
#' @param TF_list list of TFs to plot the expression and linked motif Z-score for
#' @param assay_RNA RNA_count_assay assay containing the RNA data
#' @param assay_maelstrom maelstrom assay used for zscore vizualization, often either TFcor or TFanticor
#' @param logo_dir directory containing motif logos generated by gimme maelstrom
#' @param col colours used for zscore vizualization
#' @param dim_reduction dimensionality reduction method to use
#' @return patchwork plot containing a expression dimreduction plot, a maelstrom motif score dimreduction plot, and a png image of the motif
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' logos_dir_path <- system.file("extdata","maelstrom","logos",package = 'AnanseSeurat')
#' sce_small <- Factor_Motif_Plot(sce_small,
#' c('gene1', 'gene2'),
#' dim_reduction = 'pca',
#' logo_dir = logos_dir_path)
#' @export
Factor_Motif_Plot <- function(seurat_object,
TF_list,
assay_RNA = 'RNA',
assay_maelstrom = 'MotifTFanticor',
logo_dir = '~/maelstrom/logos',
col = c('darkred', 'white', 'darkgrey'),
dim_reduction = 'umap') {
Seurat::DefaultAssay(object = seurat_object) <- assay_RNA
plot_expression1 <-
Seurat::FeaturePlot(
seurat_object,
features = TF_list,
ncol = 1,
reduction = dim_reduction
)
Seurat::DefaultAssay(object = seurat_object) <- assay_maelstrom
plot_Maelstrom_raw <-
Seurat::FeaturePlot(
seurat_object,
ncol = 1,
features = TF_list,
combine = F
)
TF_motif_table <- seurat_object@assays[[assay_maelstrom]][[]]
#replace the TF name with the motif name for the maelstrom enrichment score
plot_Maelstrom <- lapply(plot_Maelstrom_raw, function(x) {
TF_name <- names(x$data)[4][[1]]
motif_name <- TF_motif_table[TF_name, ]$Motif
x <- x + ggplot2::labs(title = motif_name)
suppressMessages(x + ggplot2::scale_colour_gradient2(
low = col[1],
mid = col[2],
high = col[3],
midpoint = 0
))
})
plot_Maelstrom <- patchwork::wrap_plots(plot_Maelstrom , ncol = 1)
plot_logo <- lapply(plot_Maelstrom_raw, function(x) {
TF_name <- names(x$data)[4][[1]]
motif_name <- TF_motif_table[TF_name, ]$Motif
motif_name <- gsub('\\.', '_', motif_name)
motif_path <- paste0(logo_dir, '/', motif_name, '.png')
logo_image <- png::readPNG(motif_path)
ggplot2::ggplot() + ggpubr::background_image(logo_image) #+ ggplot2::coord_fixed()
})
plot_logo <- patchwork::wrap_plots(plot_logo, ncol = 1)
return(plot_expression1 | plot_Maelstrom | plot_logo)
}
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/R/Motif2TF_functions.R |
#' DEGS_scANANSE
#'
#' Calculate the differential genes needed for ananse influence
#' @param seurat_object seurat object
#' @param min_cells minimum of cells a cluster needs to be exported
#' @param output_dir directory where the files are outputted
#' @param cluster_id ID used for finding clusters of cells
#' @param genome path to the genome folder used for the anansnake config file
#' @param RNA_count_assay assay containing the RNA data
#' @param additional_contrasts additional contrasts to add
#' between clusters within cluster_ID
#' @return None, outputs DEG files in the output directory
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_obj_tiny.Rds",package = 'AnanseSeurat'))
#' DEGS_scANANSE(sce_small, min_cells = 2, output_dir = tempdir())
#' @export
DEGS_scANANSE <- function(seurat_object,
output_dir,
min_cells = 50,
cluster_id = 'seurat_clusters',
genome = './scANANSE/data/hg38',
RNA_count_assay = "RNA",
additional_contrasts = 'None') {
if (missing(output_dir)) {
warning('no output_dir specified')
}
dir.create(file.path(paste0(output_dir, '/deseq2/')), showWarnings = FALSE)
Seurat::Idents(seurat_object) <- cluster_id
cluster_names <- list()
i <- 1
for (cluster in levels(Seurat::Idents(seurat_object))) {
seurat_object_cluster <- subset(x = seurat_object, idents = cluster)
#only use ANANSE on clusters with more than 50 cells
n_cells <- dim(seurat_object_cluster)[2]
if (n_cells > min_cells) {
cluster_names[i] <- cluster
i <- i + 1 # Increase i to add clusters iteratively
}
}
#lets generate the snakemake config file
contrast_list <-
as.list(paste0('anansesnake_', cluster_names, '_average'))
if (typeof(additional_contrasts) == 'list') {
message('adding additional contrasts')
additional_contrasts <-
paste0('anansesnake_', additional_contrasts)
contrast_list <- c(contrast_list, additional_contrasts)
}
for (contr in contrast_list) {
message(paste0('calculating DEGS for contrast ', contr))
comparison1 <- stringr::str_split(contr, "_")[[1]][2]
comparison2 <- stringr::str_split(contr, "_")[[1]][3]
DEG_file <-
paste0(
output_dir,
'/deseq2/',
basename(genome),
'-anansesnake_',
comparison1,
'_',
comparison2,
'.diffexp.tsv'
)
if (file.exists(DEG_file)) {
message('DEGfile already exist, skipping regenerating the DEG file')
message('If new files are needed remove the existing DEG files ')
} else {
if (comparison2 == 'average') {
DEGS <- Seurat::FindMarkers(
seurat_object,
ident.1 = comparison1,
assay = RNA_count_assay,
logfc.threshold = 0.1,
min.pct = 0.05,
return.thresh = 0.1
)
} else{
DEGS <- Seurat::FindMarkers(
seurat_object,
ident.1 = comparison1,
ident.2 = comparison2,
assay = RNA_count_assay,
logfc.threshold = 0.1,
min.pct = 0.05,
return.thresh = 0.1
)
}
DEGS <- DEGS[c('avg_log2FC', 'p_val_adj')]
colnames(DEGS) <- c('log2FoldChange', 'padj')
utils::write.table(DEGS, DEG_file, sep = '\t', quote = FALSE)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/R/calculateDEGS_functions.R |
#' export_CPM_scANANSE
#'
#' This functions exports CPM values from a seurat object
#' @param seurat_object the seurat object used to export the CPM values from
#' @param min_cells minimum of cells a cluster needs to be exported
#' @param output_dir directory where the files are outputted
#' @param RNA_count_assay assay of the seurat object containing the RNA count data
#' @param cluster_id ID used for finding clusters of cells
#' @return None, outputs CPM and counts files in the output directory
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' export_CPM_scANANSE(sce_small, min_cells = 2, output_dir = tempdir())
#' @export
export_CPM_scANANSE <- function(seurat_object,
output_dir,
min_cells = 50,
RNA_count_assay = "RNA",
cluster_id = 'seurat_clusters') {
if (missing(output_dir)) {
stop('no output_dir specified')
}
dir.create(file.path(output_dir), showWarnings = FALSE)
Seurat::Idents(seurat_object) <- cluster_id
message('calculating CPM')
seurat_object <- Seurat::NormalizeData(
seurat_object,
assay = RNA_count_assay,
normalization.method = 'RC',
scale.factor = 1e6
)
rna_count_lists <- list()
FPKM_count_lists <- list()
cluster_names <- list()
i <- 1
for (cluster in levels(Seurat::Idents(seurat_object))) {
seurat_object_cluster <- subset(x = seurat_object, idents = cluster)
#only use ANANSE on clusters with more than 50 cells
n_cells <- dim(seurat_object_cluster)[2]
if (n_cells > min_cells) {
message(paste0('gather data from ', cluster, ' with ', n_cells, ' cells'))
cluster_names[i] <- cluster
#lets grab the scRNAseq counts
mat_counts <-
seurat_object_cluster@assays[[RNA_count_assay]]@counts
mat_counts <- as.matrix(rowSums(as.matrix(mat_counts)))
colnames(mat_counts) <- cluster
rna_count_lists[i] <- as.data.frame(mat_counts)
#lets also grab the normalized intensities, wich we will
#use as a FPKM like matrix, since the reads are UMI normalized
#we do not expect a gene-length bias
mat_norm <-
seurat_object_cluster@assays[[RNA_count_assay]]@data
mat_norm <- as.matrix(rowSums(as.matrix(mat_norm)))
colnames(mat_norm) <- cluster
FPKM_count_lists[i] <- as.data.frame(mat_norm)
i <- i + 1
} else{
warning(paste0(
cluster,
' less than ',
min_cells,
' not including this cluster'
))
}
}
#RNA count matrix
count_matrix <- do.call(rbind, rna_count_lists)
count_matrix <- as.data.frame(count_matrix)
count_matrix <- t(count_matrix)
colnames(count_matrix) <- cluster_names
rownames(count_matrix) <- rownames(mat_counts)
count_matrix <- as.data.frame(count_matrix)
count_matrix$average <- round(rowMeans(count_matrix))
#FPKM matrix
FPKM_matrix <- do.call(rbind, FPKM_count_lists)
FPKM_matrix <- as.data.frame(FPKM_matrix)
FPKM_matrix <- t(FPKM_matrix)
colnames(FPKM_matrix) <- cluster_names
rownames(FPKM_matrix) <- rownames(mat_norm)
FPKM_matrix <- as.data.frame(FPKM_matrix)
FPKM_matrix$average <- rowMeans(FPKM_matrix)
count_file <- paste(output_dir, "RNA_Counts.tsv", sep = '/')
CPM_file <- paste(output_dir, "TPM.tsv", sep = '/')
utils::write.table(as.matrix(count_matrix), count_file, sep = '\t', quote = FALSE)
utils::write.table(as.matrix(FPKM_matrix), CPM_file, sep = '\t', quote = FALSE)
return('done exporting cluster data files')
}
#' export_ATAC_scANANSE
#'
#' This functions exports ATAC values from a seurat object
#' @param seurat_object object
#' @param min_cells minimum of cells a cluster needs to be exported
#' @param output_dir directory where the files are outputted
#' @param ATAC_peak_assay assay of the seurat object containing the peaks and peakcounts
#' @param cluster_id ID used for finding clusters of cells
#' @return None, outputs ATAC peak count file in the output directory
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' export_ATAC_scANANSE(sce_small, min_cells = 2, output_dir = tempdir())
#' @export
export_ATAC_scANANSE <- function(seurat_object,
output_dir,
min_cells = 50,
ATAC_peak_assay = "peaks",
cluster_id = 'seurat_clusters') {
if (missing(output_dir)) {
stop('no output_dir specified')
}
dir.create(file.path(output_dir), showWarnings = FALSE)
Seurat::Idents(seurat_object) <- cluster_id
peak_count_lists <- list()
cluster_names <- list()
i <- 1
for (cluster in levels(Seurat::Idents(seurat_object))) {
seurat_object_cluster <- subset(x = seurat_object, idents = cluster)
#only use ANANSE on clusters with more than 50 cells
n_cells <- dim(seurat_object_cluster)[2]
if (n_cells > min_cells) {
message(paste0('gather data from ', cluster, ' with ', n_cells, ' cells'))
cluster_names[i] <- cluster
#lets grab the ATAC signal
mat <- seurat_object_cluster@assays[[ATAC_peak_assay]]@counts
mat <- as.matrix(rowSums(as.matrix(mat)))
colnames(mat) <- cluster
peak_count_lists[i] <- as.data.frame(mat)
i <- i + 1
} else{
warning(paste0(
cluster,
' less than ',
min_cells,
' not including this cluster'
))
}
}
#ATAC peak matrix
activity_matrix <- do.call(rbind, peak_count_lists)
activity_matrix <- as.data.frame(activity_matrix)
activity_matrix <- t(activity_matrix)
colnames(activity_matrix) <- cluster_names
#output the peaks as a bed file:
peaks <- rownames(mat)
peaks <- stringr::str_split_fixed(peaks, '-', 3)
peaknames <- paste0(peaks[, 1], ":", peaks[, 2], '-', peaks[, 3])
rownames(activity_matrix) <- peaknames
activity_matrix <- as.data.frame(activity_matrix)
activity_matrix$average <- round(rowMeans(activity_matrix))
Peak_file <- paste(output_dir, "Peak_Counts.tsv", sep = '/')
utils::write.table(as.matrix(activity_matrix),
Peak_file,
sep = '\t',
quote = FALSE)
}
#' config_scANANSE
#'
#' This functions generates a sample file and config file for running Anansnake based on the seurat object
#' @param seurat_object seurat object
#' @param min_cells minimum of cells a cluster needs to be exported
#' @param output_dir directory where the files are outputted
#' @param genome genomepy name or location of the genome fastq file
#' @param cluster_id ID used for finding clusters of cells
#' @param additional_contrasts additional contrasts to add between clusters within cluster_ID
#' @return None, outputs snakemake config file in the output directory
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' config_scANANSE(sce_small, min_cells = 2, output_dir = tempdir())
#' @export
config_scANANSE <- function(seurat_object,
output_dir,
min_cells = 50,
cluster_id = 'seurat_clusters',
genome = './scANANSE/data/hg38',
additional_contrasts = c()) {
if (missing(output_dir)) {
stop('no output_dir specified')
}
dir.create(file.path(output_dir), showWarnings = FALSE)
Seurat::Idents(seurat_object) <- cluster_id
Peak_file <- paste(output_dir, "Peak_Counts.tsv", sep = '/')
count_file <- paste(output_dir, "RNA_Counts.tsv", sep = '/')
CPM_file <- paste(output_dir, "TPM.tsv", sep = '/')
cluster_names <- list()
i <- 1
for (cluster in levels(Seurat::Idents(seurat_object))) {
seurat_object_cluster <- subset(x = seurat_object, idents = cluster)
#only use ANANSE on clusters with more than 50 cells
n_cells <- dim(seurat_object_cluster)[2]
if (n_cells > min_cells) {
cluster_names[i] <- cluster
i <- i + 1
}
}
#lets generate the snakemake sample file
sample_names <- c(cluster_names, 'average')
sample_file_df <- as.data.frame(sample_names)
sample_file_df <- as.data.frame(t(sample_file_df))
colnames(sample_file_df) <- 'sample'
sample_file_df$assembly <- basename(genome)
sample_file_df$anansesnake <- sample_file_df$sample
sample_file_location <- paste0(output_dir, "/samplefile.tsv")
utils::write.table(
sample_file_df,
sample_file_location,
sep = '\t',
row.names = FALSE,
quote = FALSE
)
#lets generate the snakemake config file
contrast_list <-
paste0('anansesnake_', cluster_names, '_average')
if (length(additional_contrasts)>0){
message('adding additional contrasts')
additional_contrasts <-
paste0('anansesnake_', additional_contrasts)
contrast_list <- c(contrast_list, additional_contrasts)
}
file <- paste0(output_dir, "/config.yaml")
lines <- c(
paste0("result_dir: ", output_dir, '\n'),
paste0("rna_samples: ", sample_file_location, '\n'),
paste0("rna_tpms: ", CPM_file, '\n'),
paste0("rna_counts: ", count_file, '\n'),
paste0("atac_samples: ", sample_file_location, '\n'),
paste0("atac_counts: ", Peak_file, '\n'),
paste0("genome: ", genome, "\n"),
"database: gimme.vertebrate.v5.0 \n",
"jaccard: 0.1 \n",
"edges: 500_000 \n",
"padj: 0.05 \n",
'plot_type: "png" \n',
"get_orthologs: false \n",
"contrasts: \n"
)
string <- paste(lines, collapse = '')
cat(string,
file = paste0(output_dir, "/config.yaml"),
append = FALSE)
for (contr in contrast_list) {
cat(
paste0(' - "', contr, '"'),
"\n",
file = paste0(output_dir, "/config.yaml"),
append = TRUE
)
}
}
#' export_seurat_Maelstrom
#'
#' normalize and export the peak table of a seurat object based on clusters
#' @param seurat_object object
#' @param min_cells minimum of cells a cluster needs to be exported
#' @param output_dir directory where the files are outputted
#' @param ATAC_peak_assay assay of the seurat object containing the peaks and peakcounts
#' @param cluster_id ID used for finding clusters of cells
#' @param select_top_rows only output the top variable rows, or all rows if false
#' @param n_top_rows amount of variable rows to export
#' @return None, outputs maelstrom peak counts table in the output directory
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' config_scANANSE(sce_small, min_cells = 2, output_dir = tempdir())
#' @export
export_ATAC_maelstrom <- function(seurat_object,
output_dir,
min_cells = 50,
ATAC_peak_assay = "peaks",
cluster_id = 'seurat_clusters',
select_top_rows = TRUE,
n_top_rows = 100000) {
if (missing(output_dir)) {
stop('no output_dir specified')
}
dir.create(file.path(output_dir), showWarnings = FALSE)
Seurat::Idents(seurat_object) <- cluster_id
peak_count_lists <- list()
cluster_names <- list()
i <- 1
for (cluster in levels(Seurat::Idents(seurat_object))) {
seurat_object_cluster <- subset(x = seurat_object, idents = cluster)
#only use ANANSE on clusters with more than 50 cells
n_cells <- dim(seurat_object_cluster)[2]
if (n_cells > min_cells) {
message(paste0('gather data from ', cluster, ' with ', n_cells, ' cells'))
cluster_names[i] <- cluster
#lets grab the ATAC signal
mat <- seurat_object_cluster@assays[[ATAC_peak_assay]]@counts
mat <- as.matrix(rowSums(as.matrix(mat)))
colnames(mat) <- cluster
peak_count_lists[i] <- as.data.frame(mat)
i <- i + 1
} else{
message(paste0(cluster, ' less than ', min_cells, ' skipping'))
}
}
#ATAC peak matrix
activity_matrix <- do.call(rbind, peak_count_lists)
activity_matrix <- as.data.frame(activity_matrix)
activity_matrix <- t(activity_matrix)
colnames(activity_matrix) <- cluster_names
#output the peaks as a bed file:
peaks <- rownames(mat)
peaks <- stringr::str_split_fixed(peaks, '-', 3)
peaknames <- paste0(peaks[, 1], ":", peaks[, 2], '-', peaks[, 3])
rownames(activity_matrix) <- peaknames
activity_matrix <- as.data.frame(activity_matrix)
activity_matrix <- log2(activity_matrix + 1)
activity_matrix <- scale(activity_matrix)
activity_matrix <- as.data.frame(activity_matrix)
if (select_top_rows) {
if (nrow(activity_matrix) > n_top_rows) {
message(paste0(
'large dataframe detected, selecting top variable rows n = ',
n_top_rows
))
message('if entire dataframe is required, add select_top_rows = False as a parameter')
message('or change ammount of rows via the n_top_rows parameter')
row_variance <- apply(activity_matrix, 1, stats::var)
activity_matrix$RowVar <- row_variance
activity_matrix <-
activity_matrix[order(activity_matrix$RowVar, decreasing = TRUE),]
activity_matrix <- utils::head(activity_matrix, n_top_rows)
activity_matrix$RowVar <- NULL
}
}
Peak_file <- paste(output_dir, "Peaks_scaled.tsv", sep = '/')
utils::write.table(activity_matrix,
Peak_file,
sep = '\t',
quote = FALSE)
} | /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/R/export_functions.R |
#' import_seurat_scANANSE
#'
#' import the influences from a anansnake directory into a seurat object
#' @param seurat_object seurat object
#' @param cluster_id ID used for finding clusters of cells
#' @param anansnake_inf_dir influence directory generated by anansnake
#' @param return_df return both the seurat object and a dataframe with influence scores as a list
#' @return seurat object with the influence scores addes as an assay
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' infdir <- system.file("extdata","influence",package = 'AnanseSeurat')
#' sce_small <- import_seurat_scANANSE(sce_small, anansnake_inf_dir = infdir)
#' @export
import_seurat_scANANSE <- function(seurat_object,
cluster_id = 'seurat_clusters',
anansnake_inf_dir = 'None',
return_df = FALSE) {
files <-
list.files(
path = anansnake_inf_dir,
pattern = "*.tsv",
full.names = TRUE,
recursive = FALSE
)
GRN_files <-
list.files(
path = anansnake_inf_dir,
pattern = "_diffnetwork.tsv",
full.names = TRUE,
recursive = FALSE
)
if (length(GRN_files) == 0) {
warning(paste0(
paste0(
'Error: no _diffnetwork.tsv files found in influence dir ',
anansnake_inf_dir
)
), ' \n')
warning('double check the location of the influence dir provided in anansnake_inf_dir \n')
stop()
}
files <- files[!files %in% GRN_files]
influence_scores_avg <- list()
influence_targets <- list()
i <- 1
for (file in files) {
cell_target <- stringr::str_split(basename(file), "_")[[1]][2]
cell_source <- stringr::str_split(basename(file), "_")[[1]][3]
cell_source <- stringr::str_replace(cell_source, '.tsv', '')
in_df <- utils::read.table(file, header = TRUE)
in_df <- in_df[c('factor', 'influence_score')]
colnames(in_df) <- c('factor', cell_target)
if (cell_source == 'average') {
influence_scores_avg[[i]] <- as.data.frame(in_df)
influence_targets[[i]] <- cell_target
i <- i + 1
}
}
avg_df <-
influence_scores_avg %>% purrr::reduce(dplyr::full_join, by = "factor", copy = T)
avg_df[is.na(avg_df)] <- 0
rownames(avg_df) <- avg_df$factor
avg_df$factor <- NULL
#add the TF intensities to the seurat object
TF_df <- t(avg_df)
cell_signal_list <- list()
i <- 1
#get cell IDs from the pbmcs
for (cell_type in rownames(TF_df)) {
cells_in_seurat <- seurat_object[[cluster_id]] == cell_type
if (TRUE %in% cells_in_seurat) {
relevant_IDS <-
seurat_object[, seurat_object[[cluster_id]] == cell_type][[cluster_id]]
TF_signal <- TF_df[rownames(TF_df) == cell_type, ]
TF_signal <- as.data.frame(TF_signal)
TF_signal <- t(TF_signal)
TF_signal <-
TF_signal[rep(seq_len(nrow(TF_signal)), each = nrow(relevant_IDS)),]
rownames(TF_signal) <- rownames(as.data.frame(relevant_IDS))
cell_signal_list[[i]] <-
TF_signal
} else{
warning(paste0(
paste0('no cells of id ', cell_type),
' found in seurat object'
))
}
i <- i + 1
}
TF_array <- do.call("rbind", cell_signal_list)
#add cell IDs that do not have ANANSE signal and give them a NA vallue
cell_barcodes_all <-
rownames(as.data.frame(Seurat::Idents(object = seurat_object)))
cell_barcodes_missing <-
cell_barcodes_all[!cell_barcodes_all %in% rownames(TF_array)]
cell_barcodes_missing <- as.data.frame(cell_barcodes_missing)
if (length(cell_barcodes_missing > 0)) {
message('adding cells with no influence scores with NA')
for (TF in colnames(TF_signal)) {
cell_barcodes_missing[[TF]] <- NA
}
rownames(cell_barcodes_missing) <-
cell_barcodes_missing$cell_barcodes_missing
cell_barcodes_missing$cell_barcodes_missing <- NULL
TF_output <- t(rbind(TF_array, cell_barcodes_missing))
}
else{
TF_output <- t(TF_array)
}
seurat_object[['influence']] <-
Seurat::CreateAssayObject(TF_output)
if (return_df) {
return(list(seurat_object, avg_df))
}
else{
return(seurat_object)
}
}
#' import_seurat_Maelstrom
#'
#' load Maelstrom enriched motifs
#' @param seurat_object object
#' @param cluster_id ID used for finding clusters of cells
#' @param maelstrom_file maelstrom final.out.txt file
#' @param return_df return both the seurat object and a dataframe with maelstrom scores as a list
#' @return seurat object with the maelstrom motif scores addes as an assay
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' maelstromfile_path <- system.file("extdata","maelstrom","final.out.txt",package = 'AnanseSeurat')
#' sce_small <- import_seurat_maelstrom(sce_small, maelstrom_file = maelstromfile_path)
#' @export
import_seurat_maelstrom <- function(seurat_object,
cluster_id = 'seurat_clusters',
maelstrom_file = '~/final.out.txt',
return_df = FALSE) {
maelstrom_df <-
utils::read.table(
maelstrom_file,
header = TRUE,
row.names = 1,
sep = '\t',
check.names = FALSE
)
rownames(maelstrom_df) <- gsub('_', '-', rownames(maelstrom_df))
maelstrom_Zscore <-
maelstrom_df[grep("z-score ", colnames(maelstrom_df))]
maelstrom_corr <-
maelstrom_df[grep("corr ", colnames(maelstrom_df))]
#add the motif intensities to the seurat object
motif_df <- t(maelstrom_Zscore)
cell_signal_list <- list()
rownames(motif_df) <-
mapply(
gsub,
pattern = "z-score ",
x = rownames(motif_df),
replacement = ''
)
i <- 1
#get cell IDs from the single cell object
for (cell_type in rownames(motif_df)) {
#lets check if any cells with this annotation are present in the seurat object
cells_in_seurat <- seurat_object[[cluster_id]] == cell_type
if (TRUE %in% cells_in_seurat) {
relevant_IDS <-
seurat_object[, seurat_object[[cluster_id]] == cell_type][[cluster_id]]
TF_signal <- motif_df[rownames(motif_df) == cell_type, ]
TF_signal <- as.data.frame(TF_signal)
TF_signal <- t(TF_signal)
TF_signal <-
TF_signal[rep(seq_len(nrow(TF_signal)), each = nrow(relevant_IDS)),]
rownames(TF_signal) <- rownames(as.data.frame(relevant_IDS))
cell_signal_list[[i]] <-
TF_signal
} else{
warning(paste0(
paste0('no cells of id ', cell_type),
' found in seurat object'
))
}
i <- i + 1
}
TF_array <- do.call("rbind", cell_signal_list)
#add cell IDs that do not have Maelstrom signal and give them a NA vallue
cell_barcodes_all <-
rownames(as.data.frame(Seurat::Idents(object = seurat_object)))
cell_barcodes_missing <-
cell_barcodes_all[!cell_barcodes_all %in% rownames(TF_array)]
if (length(cell_barcodes_missing) > 0) {
cell_barcodes_missing <- as.data.frame(cell_barcodes_missing)
for (TF in colnames(TF_signal)) {
cell_barcodes_missing[[TF]] <- NA
}
rownames(cell_barcodes_missing) <-
cell_barcodes_missing$cell_barcodes_missing
cell_barcodes_missing$cell_barcodes_missing <- NULL
TF_array <- rbind(TF_array, cell_barcodes_missing)
}
seurat_object[['maelstrom']] <-
Seurat::CreateAssayObject(t(TF_array))
motif_df <- t(motif_df)
if (return_df) {
return(list(seurat_object, motif_df))
}
else{
return(seurat_object)
}
}
#' per_cluster_df
#'
#' generate a table of the assay score averages per cluster identifier cell
#' @param seurat_object seurat object
#' @param assay assay containing influence or motif scores generated from cluster pseudobulk
#' @param cluster_id ID used for finding clusters of cells
#' @return dataframe with assay scores, concatinating cells from each per cluster
#' @examples
#' sce_small <- readRDS(system.file("extdata","sce_small.Rds",package = 'AnanseSeurat'))
#' df <- per_cluster_df(sce_small)
#' @export
per_cluster_df <- function(seurat_object,
assay = 'influence',
cluster_id = 'seurat_clusters') {
Seurat::Idents(seurat_object) <- cluster_id
#make a dataframe with the values per cluster:
clusters <- unique(seurat_object[[cluster_id]])
#check if assay exists
if (is.null(seurat_object@assays[[assay]])) {
stop(paste0('assay ', assay, ' not found in the seurat object '))
}
cluster_data <-
as.data.frame(rownames(seurat_object@assays[[assay]]@data))
rownames(cluster_data) <- cluster_data[[1]]
for (cluster in unique(seurat_object[[cluster_id]])[[cluster_id]]) {
seurat_object_subset <- subset(x = seurat_object, idents = cluster)
#get cluster data
cluster_matrix <-
as.data.frame(seurat_object_subset@assays[[assay]]@data)
if (length(unique(as.list(cluster_matrix))) != 1) {
warning(
paste0(
'not all cells of the cluster ',
cluster,
' have the same value in the assay ',
assay
)
)
}
cluster_data[cluster] <- rowMeans(cluster_matrix)
}
cluster_data[[1]] <- NULL
cluster_data <-
cluster_data[, colSums(is.na(cluster_data)) < nrow(cluster_data)]#remove columns with NAs
return(cluster_data)
}
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/R/import_functions.R |
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/R/utils-pipe.R |
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE,
comment = "#>")
knitr::opts_chunk$set(fig.width = 7, fig.height = 5)
## ----setup, eval = FALSE------------------------------------------------------
# # if (!requireNamespace("remotes", quietly = TRUE)) {
# # install.packages("remotes")
# # }
# # Sys.unsetenv("GITHUB_PAT")
# # remotes::install_github("JGASmits/AnanseSeurat")
#
# library(AnanseSeurat)
# library(Seurat)
# library(Signac)
## ----load_scObject, eval = FALSE----------------------------------------------
# rds_file <- 'preprocessed_PDMC.Rds'
# pbmc <- readRDS(rds_file)
# DimPlot(pbmc,
# label = TRUE,
# repel = TRUE,
# reduction = "umap") + NoLegend()
## ----export_CPMs, eval = FALSE------------------------------------------------
# export_CPM_scANANSE(
# pbmc,
# min_cells <- 25,
# output_dir = paste0(tempdir(),'/analysis'),
# cluster_id = 'predicted.id',
# RNA_count_assay = 'RNA'
# )
## ----eval = FALSE-------------------------------------------------------------
# export_ATAC_scANANSE(
# pbmc,
# min_cells <- 25,
# output_dir = paste0(tempdir(),'/analysis'),
# cluster_id = 'predicted.id',
# ATAC_peak_assay = 'peaks'
# )
## ----eval = FALSE-------------------------------------------------------------
# contrasts <- list('B-naive_B-memory',
# 'B-memory_B-naive',
# 'B-naive_CD16-Mono',
# 'CD16-Mono_B-naive')
#
# config_scANANSE(
# pbmc,
# min_cells <- 25,
# output_dir = paste0(tempdir(),'/analysis'),
# cluster_id = 'predicted.id',
# genome = './data/hg38',
# additional_contrasts = contrasts
# )
## ----eval = FALSE-------------------------------------------------------------
# DEGS_scANANSE(
# pbmc,
# min_cells <- 25,
# output_dir = './analysis',
# cluster_id = 'predicted.id',
# additional_contrasts = contrasts
# )
## ----eval = FALSE-------------------------------------------------------------
# pbmc <- import_seurat_scANANSE(pbmc,
# cluster_id = 'predicted.id',
# anansnake_inf_dir = "./analysis/influence/")
## ----eval = FALSE-------------------------------------------------------------
# TF_influence <- per_cluster_df(pbmc,
# assay = 'influence',
# cluster_id = 'predicted.id')
#
# head(TF_influence)
## ----eval = FALSE-------------------------------------------------------------
# highlight_TF1 <- c('STAT4', 'MEF2C')
#
# DefaultAssay(object = pbmc) <- "RNA"
# plot_expression1 <-
# FeaturePlot(pbmc, features = highlight_TF1, ncol = 1)
# DefaultAssay(object = pbmc) <- "influence"
# plot_ANANSE1 <-
# FeaturePlot(
# pbmc,
# ncol = 1,
# features = highlight_TF1,
# cols = c("darkgrey", "#fc8d59")
# )
# print(plot_expression1 | plot_ANANSE1)
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/inst/doc/introduction.R |
---
title: "introduction"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(collapse = TRUE,
comment = "#>")
knitr::opts_chunk$set(fig.width = 7, fig.height = 5)
```
#### **Load your single cell object(s)**
AnanseSeurat requires a preprocessed single cell object. Before exporting data make sure the single cell object has been filtered for good quality cells, and has underwent sufficient pre-processing. For guides on pre-processing scRNA-seq and scATAC-seq there are some great vignettes and guides available. We will also set the working directory.
```{r setup, eval = FALSE}
# if (!requireNamespace("remotes", quietly = TRUE)) {
# install.packages("remotes")
# }
# Sys.unsetenv("GITHUB_PAT")
# remotes::install_github("JGASmits/AnanseSeurat")
library(AnanseSeurat)
library(Seurat)
library(Signac)
```
In this example we will use a preprocsed 10x PBMC multiome dataset (PBMC from a Healthy Donor (v1, 150x150) Single Cell Multiome ATAC + Gene Expression Dataset by Cell Ranger ARC 2.0.0). This data was pre-processed following the standard pre-processing from Signac. The pre-pocessed single cell object is available at Zenado: <https://zenodo.org/record/7446267/>
```{r load_scObject, eval = FALSE}
rds_file <- 'preprocessed_PDMC.Rds'
pbmc <- readRDS(rds_file)
DimPlot(pbmc,
label = TRUE,
repel = TRUE,
reduction = "umap") + NoLegend()
```
This single cell object contains multimodal data, both RNA and ATAC signal from each cell. However in the case of a separate scRNA-seq object and scATAC-seq object, AnanseSeurat can still prepare files for running single cell Ananse. In the case of two seperate objects as input it is important that both the objects share their respective cluster names.
#### **Output files using AnanseSeurat**
We will start with outputting the CPM file, for this we select:
1. the minimum amount of cells a cluster needs to have to be included via min_cells
2. the output directory
3. the cluster ID containing the cluster names
4. the name of the Assay containing the RNA data.
```{r export_CPMs, eval = FALSE}
export_CPM_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
RNA_count_assay = 'RNA'
)
```
Next we will output the ATAC peak matrix, , for this we select:
1. the minimum amount of cells a cluster needs to have to be included via min_cells
2. the output directory
3. the cluster ID containing the cluster names
4. the name of the Assay containing the ATAC peak data.
```{r, eval = FALSE}
export_ATAC_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
ATAC_peak_assay = 'peaks'
)
```
Next we will generate the config and sample file needed for anansnake, we will also specify specific pairwise comparisons between clusters of interest. By default, scANANSE compares all clusters to a network based on the average values of all clusters. Additional comparisons can be specified, in this case, B-naive and B-memory cells were also specified to compare directly to each other.
```{r, eval = FALSE}
contrasts <- list('B-naive_B-memory',
'B-memory_B-naive',
'B-naive_CD16-Mono',
'CD16-Mono_B-naive')
config_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
genome = './data/hg38',
additional_contrasts = contrasts
)
```
Finally we will calculate the markergenes for each cluster and between clusters of a specific comparison:
```{r, eval = FALSE}
DEGS_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './analysis',
cluster_id = 'predicted.id',
additional_contrasts = contrasts
)
```
#### **run Anansnake on the generated files**
After this all your files are ready to run Anansnake for gene regulatory network analysis. For info on installing and running anansnake see: <https://github.com/vanheeringen-lab/anansnake>
#### **Import influence scores back into your single cell object**
After running anansnake the results can be incorporated back into your single cell object.
```{r, eval = FALSE}
pbmc <- import_seurat_scANANSE(pbmc,
cluster_id = 'predicted.id',
anansnake_inf_dir = "./analysis/influence/")
```
The top TFs contributing to your clusters can be vizualized as a table:
```{r, eval = FALSE}
TF_influence <- per_cluster_df(pbmc,
assay = 'influence',
cluster_id = 'predicted.id')
head(TF_influence)
```
We can also visualize the influence score of TFs in the single cell object:
```{r, eval = FALSE}
highlight_TF1 <- c('STAT4', 'MEF2C')
DefaultAssay(object = pbmc) <- "RNA"
plot_expression1 <-
FeaturePlot(pbmc, features = highlight_TF1, ncol = 1)
DefaultAssay(object = pbmc) <- "influence"
plot_ANANSE1 <-
FeaturePlot(
pbmc,
ncol = 1,
features = highlight_TF1,
cols = c("darkgrey", "#fc8d59")
)
print(plot_expression1 | plot_ANANSE1)
```
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/inst/doc/introduction.Rmd |
---
title: "single cell cluster ananse analysis"
output:
html_document:
---
```{r setup}
# if (!requireNamespace("remotes", quietly = TRUE)) {
# install.packages("remotes")
# }
# Sys.unsetenv("GITHUB_PAT")
# remotes::install_github("mojaveazure/seurat-disk", upgrade = "never")
# remotes::install_github("JGASmits/AnanseSeurat", upgrade = "never")
library(Signac)
library(Seurat)
library(EnsDb.Hsapiens.v86)
library(BSgenome.Hsapiens.UCSC.hg38)
library(circlize)
library(stringr)
library(ComplexHeatmap)
library(circlize)
library(SeuratDisk)
library(AnanseSeurat)
set.seed(1234)
knitr::opts_knit$set(root.dir = normalizePath(
"input your working dir containing the scANANSE folder"))
```
```{r}
# load the RNA and ATAC data
counts <-
Read10X_h5(
'./scANANSE/data/pbmc_granulocyte_sorted_10k_filtered_feature_bc_matrix.h5')
fragpath <-
"./scANANSE/data/pbmc_granulocyte_sorted_10k_atac_fragments.tsv.gz"
# get gene annotations for hg38
annotation <- GetGRangesFromEnsDb(ensdb = EnsDb.Hsapiens.v86)
seqlevelsStyle(annotation) <- "UCSC"
# create a Seurat object containing the RNA adata
pbmc <- CreateSeuratObject(counts = counts$`Gene Expression`,
assay = "RNA")
# create ATAC assay and add it to the object
pbmc[["ATAC"]] <- CreateChromatinAssay(
counts = counts$Peaks,
sep = c(":", "-"),
fragments = fragpath,
annotation = annotation
)
DefaultAssay(pbmc) <- "ATAC"
pbmc <- NucleosomeSignal(pbmc)
pbmc <- TSSEnrichment(pbmc)
# filter out low quality cells
pbmc <- subset(
x = pbmc,
subset = nCount_ATAC < 100000 &
nCount_RNA < 25000 &
nCount_ATAC > 1000 &
nCount_RNA > 1000 &
nucleosome_signal < 2 &
TSS.enrichment > 1
)
# call peaks using MACS2
peaks <- CallPeaks(pbmc)
# remove peaks on nonstandard chromosomes and in genomic blacklist regions
peaks <- keepStandardChromosomes(peaks, pruning.mode = "coarse")
peaks <-
subsetByOverlaps(x = peaks, ranges = blacklist_hg38_unified, invert = TRUE)
# quantify counts in each peak
macs2_counts <- FeatureMatrix(
fragments = Fragments(pbmc),
features = peaks,
cells = colnames(pbmc)
)
# create a new assay using the MACS2 peak set and add it to the Seurat object
pbmc[["peaks"]] <- CreateChromatinAssay(counts = macs2_counts,
fragments = fragpath,
annotation = annotation)
DefaultAssay(pbmc) <- "RNA"
pbmc <- SCTransform(pbmc)
pbmc <- RunPCA(pbmc)
DefaultAssay(pbmc) <- "peaks"
pbmc <- FindTopFeatures(pbmc, min.cutoff = 5)
pbmc <- RunTFIDF(pbmc)
pbmc <- RunSVD(pbmc)
```
```{r}
# load PBMC reference
reference <-
LoadH5Seurat("./scANANSE/data/pbmc_multimodal.h5seurat")
DefaultAssay(pbmc) <- "SCT"
# transfer cell type labels from reference to query
transfer_anchors <- FindTransferAnchors(
reference = reference,
query = pbmc,
normalization.method = "SCT",
reference.reduction = "spca",
recompute.residuals = FALSE,
dims = 1:50
)
predictions <- TransferData(
anchorset = transfer_anchors,
refdata = reference$celltype.l2,
weight.reduction = pbmc[['pca']],
dims = 1:50
)
pbmc <- AddMetaData(object = pbmc,
metadata = predictions)
# set the cell identities to the cell type predictions
Idents(pbmc) <- "predicted.id"
# set a reasonable order for cell types to be displayed when plotting
levels(pbmc) <-
c(
"CD4 Naive",
"CD4 TCM",
"CD4 CTL",
"CD4 TEM",
"CD4 Proliferating",
"CD8 Naive",
"dnT",
"CD8 TEM",
"CD8 TCM",
"CD8 Proliferating",
"MAIT",
"NK",
"NK_CD56bright",
"NK Proliferating",
"gdT",
"Treg",
"B naive",
"B intermediate",
"B memory",
"Plasmablast",
"CD14 Mono",
"CD16 Mono",
"cDC1",
"cDC2",
"pDC",
"HSPC",
"Eryth",
"ASDC",
"ILC",
"Platelet"
)
```
```{r}
# build a joint neighbor graph using both assays
pbmc <- FindMultiModalNeighbors(
object = pbmc,
reduction.list = list("pca", "lsi"),
dims.list = list(1:50, 2:40),
modality.weight.name = "RNA.weight",
verbose = TRUE
)
# build a joint UMAP visualization
pbmc <- RunUMAP(
object = pbmc,
nn.name = "weighted.nn",
assay = "RNA",
verbose = TRUE
)
DimPlot(pbmc,
label = TRUE,
repel = TRUE,
reduction = "umap") + NoLegend()
```
```{r}
Idents(pbmc) <- str_replace(Idents(pbmc), ' ', '-')
Idents(pbmc) <- str_replace(Idents(pbmc), '_', '-')
pbmc$predicted.id <- str_replace(pbmc$predicted.id, ' ', '-')
pbmc$predicted.id <- str_replace(pbmc$predicted.id, '_', '-')
```
```{r}
rds_file <- './scANANSE/preprocessed_PBMC.Rds'
if (file.exists(rds_file)) {
pbmc <- readRDS(rds_file)
} else{
saveRDS(pbmc, file = rds_file)
}
pdf(
'./scANANSE/umap.pdf',
width = 7,
height = 3.5,
paper = 'special'
)
DimPlot(pbmc,
label = TRUE,
repel = TRUE,
reduction = "umap") + NoLegend()
dev.off()
table([email protected]$predicted.id)
```
```{r}
#Load pre-processed seurat object RDS file
rds_file <- './scANANSE/data/preprocessed_PBMC.Rds'
pbmc <- readRDS(rds_file)
export_CPM_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './scANANSE/analysis',
cluster_id = 'predicted.id',
RNA_count_assay = 'RNA'
)
export_ATAC_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './scANANSE/analysis',
cluster_id = 'predicted.id',
ATAC_peak_assay = 'peaks'
)
#specify additional contrasts:
contrasts <- list('B-naive_B-memory',
'B-memory_B-naive',
'B-naive_CD16-Mono',
'CD16-Mono_B-naive')
config_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './scANANSE/analysis',
cluster_id = 'predicted.id',
genome = './scANANSE/data/hg38',
additional_contrasts = contrasts
)
DEGS_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './scANANSE/analysis',
cluster_id = 'predicted.id',
additional_contrasts = contrasts
)
```
## Run Anansnake before processing
Lets load the results from Anansnake
```{r}
rds_file <- './scANANSE/data/preprocessed_PBMC.Rds'
pbmc <- readRDS(rds_file)
cluster_id <- 'predicted.id'
pbmc <- import_seurat_scANANSE(pbmc,
cluster_id = 'predicted.id',
anansnake_inf_dir =
"./scANANSE/analysis/influence/")
TF_influence <- per_cluster_df(pbmc,
cluster_id = 'predicted.id',
assay = 'influence')
```
get top 5 highest TFs
```{r}
TF_influence$TF <- rownames(TF_influence)
TF_long <- reshape2::melt(TF_influence, id.vars = 'TF')
TF_influence$TF <- NULL
colnames(TF_long) <- c('TF', 'cluster', 'influence')
TF_long <- TF_long[order(TF_long$influence, decreasing = TRUE),]
#get the top n TFs per cluster
topTF <- Reduce(rbind,
by(TF_long,
TF_long["cluster"],
head,
n = 5))# Top N highest TFs by cluster
top_TFs <- unique(topTF$TF)
TF_table <- topTF %>%
dplyr::group_by(cluster) %>%
dplyr::mutate('TopTFs' = paste0(TF, collapse = " "))
unique(TF_table[, c('cluster', 'TopTFs')])
```
```{r}
col_fun <- colorRamp2(c(0, 1), c("white", "orange"))
mat <- TF_influence[rownames(TF_influence) %in% top_TFs, ]
pdf(
'./scANANSE/analysis/ANANSE_Heatmap.pdf',
width = 16,
height = 8,
paper = 'special'
)
Heatmap(mat, col = col_fun)
dev.off()
```
Generate a Heatmap of the top TFs
```{r}
set.seed(123)
DefaultAssay(object = pbmc) <- "influence"
mat <- TF_influence[rownames(TF_influence) %in% top_TFs, ]
col_fun <- colorRamp2(c(0, 1), c("white", "orange"))
costum_sample_order <- c(
'CD14-Mono',
'CD16-Mono',
'cDC2',
'pDC',
'HSPC',
'B-naive',
'B-intermediate',
'B-memory',
'CD8-Naive',
'CD4-Naive',
'CD8-TCM',
'CD4-TEM',
'CD4-TCM',
'Treg',
'MAIT',
'CD8-TEM',
'gdT',
'NK'
)
dend <- stats::as.dendrogram(stats::hclust(dist(t(mat))))
pdf(
"./scANANSE/analysis/ANANSE_Heatmap.pdf" ,
width = 16,
height = 8,
paper = 'special'
)
print(
ComplexHeatmap::Heatmap(
mat,
row_dend_side = "right",
show_column_dend = T,
col = col_fun,
cluster_columns = dend,
row_km_repeats = 100
)
)
dev.off()
```
```{r}
pdf(
'./scANANSE/analysis/reference_umap.pdf',
width = 15,
height = 15,
paper = 'special'
)
Idents(object = reference) <- "celltype.l2"
print(DimPlot(
reference,
label = T,
repel = TRUE,
reduction = "umap"
))
Idents(object = reference) <- "celltype.l1"
print(DimPlot(
reference,
label = T,
repel = TRUE,
reduction = "umap"
))
dev.off()
pdf(
'./scANANSE/analysis/ANANSE_umap.pdf',
width = 10,
height = 5,
paper = 'special'
)
Idents(object = pbmc) <- "predicted.id"
print(DimPlot(
pbmc,
label = T,
repel = TRUE,
reduction = "umap"
) + NoLegend())
dev.off()
```
```{r}
highlight_TF1 <- c('STAT4', 'LEF1', 'MEF2C')
pdf(
'./scANANSE/analysis/ANANSE_highlight.pdf',
width = 10,
height = 10,
paper = 'special'
)
DefaultAssay(object = pbmc) <- "RNA"
plot_expression1 <-
FeaturePlot(pbmc, features = highlight_TF1, ncol = 1)
DefaultAssay(object = pbmc) <- "influence"
plot_ANANSE1 <-
FeaturePlot(
pbmc,
ncol = 1,
features = highlight_TF1,
cols = c("darkgrey", "#fc8d59")
)
print(DimPlot(
pbmc,
label = T,
repel = TRUE,
reduction = "umap"
) + NoLegend())
print(plot_expression1 | plot_ANANSE1)
dev.off()
```
Lets vizualise the influence results of a specific contrast:
```{r}
MemoryInfluence <-
read.table('./scANANSE/analysis/influence/anansesnake_B-memory_B-naive.tsv',
header = T)
NaiveInfluence <-
read.table('./scANANSE/analysis/influence/anansesnake_B-naive_B-memory.tsv',
header = T)
NaiveInfluence$factor_fc <- NaiveInfluence$factor_fc * -1
B_comparison <- rbind(NaiveInfluence, MemoryInfluence)
B_comparison_plot <-
ggplot(B_comparison, aes(factor_fc, influence_score)) +
geom_point(aes(size = direct_targets, colour = influence_score)) +
xlim(-2, 2) +
geom_text(aes(
label = ifelse(factor_fc > 0.26 |
factor_fc < -0.5, as.character(factor), ""),
hjust = 0.5,
vjust = 2
))
pdf(
'./scANANSE/analysis/B_comparison_plot.pdf',
width = 8,
height = 3.5,
paper = 'special'
)
print(B_comparison_plot)
dev.off()
```
import motif enrichment:
```{r}
rds_file <- './scANANSE/data/preprocessed_PBMC.Rds'
pbmc <- readRDS(rds_file)
pbmc <- import_seurat_maelstrom(pbmc,
cluster_id = 'predicted.id',
maelstrom_dir =
'./scANANSE/analysis/maelstrom/')
motif_scores <- per_cluster_df(pbmc,
assay = 'maelstrom',
cluster_id = 'predicted.id')
head(motif_scores)
```
```{r}
pbmc <- Maelstrom_Motif2TF(
pbmc,
cluster_id = 'predicted.id',
maelstrom_dir = './scANANSE/analysis/maelstrom',
RNA_expression_assay = "SCT",
output_dir = './scANANSE/analysis',
expr_tresh = 100,
cor_tresh = 0.3,
combine_motifs = 'max_cor'
)
act_t <-
per_cluster_df(pbmc, assay = 'TFcor', cluster_id = 'predicted.id')
negcor_TFs <-
per_cluster_df(pbmc, assay = 'TFanticor', cluster_id = 'predicted.id')
top_pTFs <- head(pbmc@assays[["TFcor"]][[]], 15)
top_nTFs <- head(pbmc@assays[["TFanticor"]][[]], 15)
cluster_order <-
c(
'CD14-Mono',
'CD16-Mono',
"cDC2",
"pDC",
"HSPC",
"B-naive",
"B-intermediate",
"B-memory",
"CD4-Naive",
"CD8-Naive",
"CD8-TCM",
"CD4-TEM",
"Treg",
"CD8-TEM" ,
"MAIT",
"gdT",
"NK"
)
```
```{r}
col_fun <-
circlize::colorRamp2(c(-5, 0, 5), c('#998ec3', 'white', '#f1a340'))
col_fun_cor <-
circlize::colorRamp2(c(-1, 0, 1), c('#7b3294', '#f7f7f7', '#008837'))
pdf(
'./scANANSE/analysis/Maelstrom_correlations.pdf',
width = 8,
height = 5,
paper = 'special'
)
for (regtype in c('TFcor', 'TFanticor')) {
top_TFs <- head(pbmc@assays[[regtype]][[]], 15)
mat <-
per_cluster_df(pbmc, assay = regtype, cluster_id = 'predicted.id')
mat <- mat[rownames(mat) %in% rownames(top_TFs), ]
#get TF expression matrix
exp_mat <-
AverageExpression(
pbmc,
assay = 'SCT',
slot = 'data',
features = rownames(top_TFs),
group.by = 'predicted.id'
)[[1]]
exp_mat <- exp_mat[, colnames(exp_mat)]
exp_mat <- t(scale(t(exp_mat)))
#get correlation score
row_ha <- rowAnnotation(correlation = top_TFs$cor,
col = list(correlation = col_fun_cor))
print(
Heatmap(exp_mat[, cluster_order], cluster_columns = F) + Heatmap(
mat[, cluster_order],
col = col_fun,
cluster_columns = F,
right_annotation = row_ha
)
)
}
dev.off()
```
```{r}
TF_list <- c('PAX5', 'STAT6', 'ETS1', 'GATA3', 'MAX')
pdf(
'./scANANSE/analysis/Factor_Motif_TFanticor.pdf',
width = 8,
height = 8,
paper = 'special'
)
Factor_Motif_Plot(
pbmc,
TF_list,
assay_maelstrom = 'TFanticor',
logo_dir = './scANANSE/analysis/maelstrom/logos/',
col = c('darkred', 'white', 'grey')
)
dev.off()
TF_list <- c('MEF2C', 'TCF7', 'ETS1', 'GATA3', 'MAX')
pdf(
'./scANANSE/analysis/Factor_Motif_TFcor.pdf',
width = 8,
height = 8,
paper = 'special'
)
Factor_Motif_Plot(
pbmc,
TF_list,
assay_maelstrom = 'TFcor',
logo_dir = './scANANSE/analysis/maelstrom/logos/',
col = c('grey', 'white', 'darkgreen')
)
dev.off()
```
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/inst/scANANSE.Rmd |
---
title: "introduction"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(collapse = TRUE,
comment = "#>")
knitr::opts_chunk$set(fig.width = 7, fig.height = 5)
```
#### **Load your single cell object(s)**
AnanseSeurat requires a preprocessed single cell object. Before exporting data make sure the single cell object has been filtered for good quality cells, and has underwent sufficient pre-processing. For guides on pre-processing scRNA-seq and scATAC-seq there are some great vignettes and guides available. We will also set the working directory.
```{r setup, eval = FALSE}
# if (!requireNamespace("remotes", quietly = TRUE)) {
# install.packages("remotes")
# }
# Sys.unsetenv("GITHUB_PAT")
# remotes::install_github("JGASmits/AnanseSeurat")
library(AnanseSeurat)
library(Seurat)
library(Signac)
```
In this example we will use a preprocsed 10x PBMC multiome dataset (PBMC from a Healthy Donor (v1, 150x150) Single Cell Multiome ATAC + Gene Expression Dataset by Cell Ranger ARC 2.0.0). This data was pre-processed following the standard pre-processing from Signac. The pre-pocessed single cell object is available at Zenado: <https://zenodo.org/record/7446267/>
```{r load_scObject, eval = FALSE}
rds_file <- 'preprocessed_PDMC.Rds'
pbmc <- readRDS(rds_file)
DimPlot(pbmc,
label = TRUE,
repel = TRUE,
reduction = "umap") + NoLegend()
```
This single cell object contains multimodal data, both RNA and ATAC signal from each cell. However in the case of a separate scRNA-seq object and scATAC-seq object, AnanseSeurat can still prepare files for running single cell Ananse. In the case of two seperate objects as input it is important that both the objects share their respective cluster names.
#### **Output files using AnanseSeurat**
We will start with outputting the CPM file, for this we select:
1. the minimum amount of cells a cluster needs to have to be included via min_cells
2. the output directory
3. the cluster ID containing the cluster names
4. the name of the Assay containing the RNA data.
```{r export_CPMs, eval = FALSE}
export_CPM_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
RNA_count_assay = 'RNA'
)
```
Next we will output the ATAC peak matrix, , for this we select:
1. the minimum amount of cells a cluster needs to have to be included via min_cells
2. the output directory
3. the cluster ID containing the cluster names
4. the name of the Assay containing the ATAC peak data.
```{r, eval = FALSE}
export_ATAC_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
ATAC_peak_assay = 'peaks'
)
```
Next we will generate the config and sample file needed for anansnake, we will also specify specific pairwise comparisons between clusters of interest. By default, scANANSE compares all clusters to a network based on the average values of all clusters. Additional comparisons can be specified, in this case, B-naive and B-memory cells were also specified to compare directly to each other.
```{r, eval = FALSE}
contrasts <- list('B-naive_B-memory',
'B-memory_B-naive',
'B-naive_CD16-Mono',
'CD16-Mono_B-naive')
config_scANANSE(
pbmc,
min_cells <- 25,
output_dir = paste0(tempdir(),'/analysis'),
cluster_id = 'predicted.id',
genome = './data/hg38',
additional_contrasts = contrasts
)
```
Finally we will calculate the markergenes for each cluster and between clusters of a specific comparison:
```{r, eval = FALSE}
DEGS_scANANSE(
pbmc,
min_cells <- 25,
output_dir = './analysis',
cluster_id = 'predicted.id',
additional_contrasts = contrasts
)
```
#### **run Anansnake on the generated files**
After this all your files are ready to run Anansnake for gene regulatory network analysis. For info on installing and running anansnake see: <https://github.com/vanheeringen-lab/anansnake>
#### **Import influence scores back into your single cell object**
After running anansnake the results can be incorporated back into your single cell object.
```{r, eval = FALSE}
pbmc <- import_seurat_scANANSE(pbmc,
cluster_id = 'predicted.id',
anansnake_inf_dir = "./analysis/influence/")
```
The top TFs contributing to your clusters can be vizualized as a table:
```{r, eval = FALSE}
TF_influence <- per_cluster_df(pbmc,
assay = 'influence',
cluster_id = 'predicted.id')
head(TF_influence)
```
We can also visualize the influence score of TFs in the single cell object:
```{r, eval = FALSE}
highlight_TF1 <- c('STAT4', 'MEF2C')
DefaultAssay(object = pbmc) <- "RNA"
plot_expression1 <-
FeaturePlot(pbmc, features = highlight_TF1, ncol = 1)
DefaultAssay(object = pbmc) <- "influence"
plot_ANANSE1 <-
FeaturePlot(
pbmc,
ncol = 1,
features = highlight_TF1,
cols = c("darkgrey", "#fc8d59")
)
print(plot_expression1 | plot_ANANSE1)
```
| /scratch/gouwar.j/cran-all/cranData/AnanseSeurat/vignettes/introduction.Rmd |
#' Add individuals to the CorPheno file.
#'
#' Add individuals to the CorPheno file in order to order and plot them with specific colours. Ids that aren't in the CorPheno will be plotted under 'Unspecified' and given a grey colour by plotAMids()
#'
#' @param phenoId Path to file containing list of invididuals to be added to CorPheno with the three columns 'UNIQID','Fam','Pheno_Pop'.
#' A full example called 'Example.phenoId' is present in the 'extdata' folder of the AncestryMapper package.
#' @param phenoValues Path to file containing information on each population to be added, such as continental origin and colours as well as other information.
#' A full example called 'Example.phenoValues' is present in the 'extdata' folder of the AncestryMapper package.
#'
#' @param ignoreDupes Logical value (TRUE or FALSE), specifying if the presence of individual IDs already in CorPheno should be ignored. Useful if the user knows this is the case and just wants the individuals not already included in the directed phenoFile.
#' Default = FALSE
#'
#' @param phenoFile Main file with phenotype information for each individual. A sample file called CorPheno is included with the package in the ext folder. It contains values for the samples from the HGDP. This function augments this file with any novel individuals.
#' If no value is given the sample file in the ext folder is used by default.
#'
#' @param writeCor Logical value (TRUE or FALSE), specifying if the new CorPheno should overwrite the file the given in 'phenoFile'. A backup of the previous CorPheno file with the same path as given in 'phenoFile' with '_Original' appended to the name will also be produced. You could alternatively write out your new file to your preferred location with write.table, making sure to keep the columns tab spaced. Default = TRUE
#'
#' @examples
#' \dontrun{
#' phenoIdPth <- system.file ("extdata", "Example.phenoId", package = "AncestryMapper")
#' PhenoValPth <- system.file("extdata", "Example.phenoValues", package = "AncestryMapper")
#' Corpheno <- system.file("extdata", "CorPheno", package = "AncestryMapper")
#'
#' refAdd(phenoId = phenoIdPth, phenoValues = PhenoValPth, phenoFile = Corpheno)
#'
#' }
#' @rdname refAdd
#' @export
#'
refAdd <- function(phenoId, phenoValues, ignoreDupes=F, phenoFile, writeCor = T){
if(missing(phenoId)) stop('phenoId is missing, needs path to phenoId file, see ?refAdd')
if(missing(phenoValues)) stop('phenoValues is missing, needs path to phenoValues file, see ?refAdd')
SamplePheno <- read.table(phenoId,header=T)
PopPheno <- read.table(phenoValues,header=T)
#if(missing(CorPheno)) CorPheno <- read.csv(system.file('extdata',"CorPheno",package="AncestryMapper"),h=T,sep='\t')
if(missing(phenoFile)) phenoFile <- system.file('extdata',"CorPheno",package="AncestryMapper")
CorPheno <- read.csv(phenoFile,header=T,sep='\t')
DupeID <- SamplePheno$UNIQID[SamplePheno$UNIQID%in%CorPheno$UNIQID]
NovelID <- SamplePheno$UNIQID[!(SamplePheno$UNIQID%in%CorPheno$UNIQID)]
if(ignoreDupes==F){
if(length(DupeID)>0) stop(paste0("Found ",length(DupeID)," of ",length(SamplePheno$UNIQID)," IDs already in CorPheno, if you are aware of this wish to procede please run again with argument 'ignoreDupes' set to TRUE, this will add only those IDs not already in the CorPheno"))
}
#SamplePhenoNov <- SamplePheno[!(SamplePheno$UNIQID%in%CorPheno$UNIQID),]
SampleIDNovel <- SamplePheno[!(SamplePheno$UNIQID%in%CorPheno$UNIQID),c('Pheno_Pop','UNIQID','Fam')]
if(nrow(SampleIDNovel)==0) stop("No novel IDs found in phenoId, all already present in CorPheno")
#SamplePhenoNov$UNIQID <- NULL
#SamplePhenoNov$Fam <- NULL
#SamplePhenoNov <- unique(SamplePhenoNov)
#for(z in unique(SamplePheno[SamplePheno$PhenoPop[!(SamplePheno$UNIQID%in%Corpheno$UNIQID)]){
#for(z in seq(1,length(SamplePhenoNov$Pheno_Pop))){
# NovelPopEntry <- CorPheno[1,]
# NovelPopEntry$UNIQID <- NULL
# NovelPopEntry$Fam <- NULL
# NovelPopEntry$Pheno_Pop <- SamplePhenoNov$PhenoPop[z]
# NovelPopEntry$Pheno_Data <- SamplePhenoNov$Pheno_Data[z]
# NovelPopEntry$Pheno_Region <- SamplePhenoNov$Pheno_Region[z]
# NovelPopEntry$Pheno_Continental <- SamplePhenoNov$Pheno_Continental[z]
# NovelPopEntry$Colors_Pop <- SamplePhenoNov$Colors_Pop[z]
# NovelPopEntry$Colors_Region <- SamplePhenoNov$Colors_Region[z]
# NovelPopEntry$Colors_Continental <- SamplePhenoNov$Colors_Continental[z]
# NovelPopEntry$Colors_Data <- SamplePhenoNov$Colors_Data[z]
# NovelPopEntry$Order <- SamplePhenoNov$Order[z]
# if(z!=1) NovelPopAll <- rbind(NovelPopAll,NovelPopEntry)
# if(z==1) NovelPopAll <- NovelPopEntry
#}
#NovelPopAllIds <- merge(NovelPopAll,SampleIDNovel,by='Pheno_Pop')
NovelPopAllIds <- merge(SampleIDNovel,PopPheno,by='Pheno_Pop')
NovelPopAllIds <- NovelPopAllIds[,colnames(CorPheno)]
NovelPopAllIds$Pheno_Pop <- paste0(NovelPopAllIds$Pheno_Pop,'.',NovelPopAllIds$Pheno_Data)
if(writeCor) CorPhenoOrig <- CorPheno
CorPheno <- rbind(CorPheno,NovelPopAllIds)
if(writeCor){
#write.table(CorPhenoOrig,file=paste0(system.file('extdata',"CorPheno",package="AncestryMapper"),'_Backup'),row.names=F,col.names=T,quote=F,sep='\t')
write.table(CorPhenoOrig,file=paste0(phenoFile,"_Original"),row.names=F,col.names=T,quote=F,sep='\t')
#write.table(CorPheno,file=system.file('extdata',"CorPheno",package="AncestryMapper"),row.names=F,col.names=T,quote=F,sep='\t')
write.table(CorPheno,file=phenoFile,row.names=F,col.names=T,quote=F,sep='\t')
}
return(CorPheno)
}
#if(Pathopop%in%corPheno.df$Pheno_Pop==FALSE){
# print(paste0(Pathopop," is not in reference file, please use refAdd('Poptoadd','PopColor','Continent') to add to reference file or check to make sure ",Pathopop," does not contain typos.")
# print("All Current Population Phenotypes")
# print(unique(corPheno.df$Pheno_Pop))
# PathopopEntry <- corPheno.df[1,]
# PathopopEntry$UNIQID <- PathoFileNam
# PathopopEntry$Pheno_Pop <- gsub('-','_',Pathopop)
# corPheno.df <- rbind(corPheno.df,PathopopEntry)
#}
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/R/CorphenoMaker.R |
#' Visualises genetic distances.
#'
#' plotAMids is used to visualise the relationship amongst individuals and references.
#'
#' @param AMids Dataframe of genetic distances calculated by calculateAMids or calculateAMidsArith.
#'
#' @param phenoFile Optional file with phenotype, color and order information for individuals and populations. An example file, called CorPheno, is contained in the 'extdata' folder with the package.
#'
#' @param columnPlot Takes values 'I' or 'C'. 'I' is the default option. 'I' plots the normalised euclidean distances whereas 'C' plots the crude distances.
#'
#' @param quantilePlot Logical. Takes values TRUE or FALSE. TRUE is the default option. If columnPlot is 'C', TRUE will plot the quantiles, FALSE will plot the raw values.
#'
#' @param colorPlot Colors for the AMids. Possible choices are 'RedBl', 'RedBlGr' and 'BLBrewer'. The user can also provide a vector of colors.
#'
#' @param sepLinesPop Logical. Takes values TRUE or FALSE. The default is TRUE.
#' If TRUE, a line demarcating populations is plotted.
#'
#' @param plotIndNames Logical. Takes values TRUE or FALSE. The default is FALSE.
#' If TRUE, the individual ids are plotted on the left axis.
#'
#' @param legColor Logical. Takes values TRUE or FALSE. The default is TRUE.
#' If TRUE, the legend for the colour gradient will be plotted in the top left.
#'
#' @param legRef Logical. Takes values TRUE or FALSE. The default is TRUE. If TRUE, text giving names of references will be plotted along the x axis.
#'
#' @param legPheno Logical. Takes values TRUE or 'no. The default is TRUE. If TRUE, will plot colour blocks relating to the population, dataset and regional origin of data if sample IDs have been given these in the Corpheno file, if not present, will plot them under 'Unspecified'.
#'
#' @param legAxisPop Logical. Takes values TRUE or FALSE. The default is TRUE.
#' If TRUE text will be plotted giving name and number of populations for samples used on the right y axis of the plot.
#'
#' @param legData Logical. Takes values TRUE or FALSE. The default is FALSE.
#' If TRUE, the reference to the dataset used to create the reference is appended to the reference population name on the bottom x axis.
#'
#' @param bmar Takes numeric value. Changes the size of the bottom outer margin of the plot. The default is empty.
#' For more see ?par()
#'
#' @param lmar Takes numeric value. Changes the size of the left outer margin of the plot. The default is empty.
#' For more see ?par()
#'
#' @param tmar Takes numeric value. Changes the size of the top outer margin of the plot. The default is empty.
#' For more see ?par()
#'
#' @param rmar Takes numeric value. Changes the size of the right outer margin of the plot. The default is empty.
#' For more see ?par()
#'
#' @param cexref Takes numeric value. Controls text size of reference names on y axis. Default is 0.9.
#'
#' @param cexind Takes numeric value. Controls text size of sample names on y axis. Default is 0.8.
#' Individual sample IDs need plotIndNames = "yes" to display, this is set to FALSE by default.
#'
#'
#'
#' @examples
#' \dontrun{
#' Refs <- system.file('data', package = 'AncestryMapper')
#' tpeds <- system.file('extdata', package = 'AncestryMapper')
#' Corpheno <- system.file('extdata', 'CorPheno', package = 'AncestryMapper')
#' All00Frq <- system.file ('data', 'MinMaxFreq.rda', package = 'AncestryMapper')
#'
#' genetic.distance <- calculateAMidsArith(pathTotpeds = tpeds,
#' NameOut = 'Example',
#' pathToAriMedoids = Refs,
#' pathAll00 = All00Frq)
#'
#' plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = "I")
#' }
#' @rdname plotAMids
#' @export
#'
plotAMids <- function(AMids, phenoFile, columnPlot = "I", quantilePlot = TRUE, colorPlot = "BlBrewer", sepLinesPop = TRUE, plotIndNames = FALSE,
legColor = TRUE, legRef = TRUE, legPheno = TRUE, legAxisPop = TRUE, legData = FALSE, bmar, lmar, tmar, rmar, cexref = 0.9, cexind = 0.8) {
#if(missing(phenoFile)) phenoFile <- system.file('extdata','CorPheno',package="AncestryMapper")
Pheno <- read.table(phenoFile, header = T, as.is = T, comment.char = "")
sepLineRef = FALSE
aid <- AMids
Is <- gsub('I_','',colnames(aid)[grep('I_',colnames(aid))])
ICCol <- grep('I_|C_',colnames(aid),invert=T)
PopOrd <- Pheno[,c('Pheno_Pop','Order')]
PopOrd <- unique(PopOrd)
PopOrd <- PopOrd[order(PopOrd[,2]),]
Unordref <- Is[!(Is%in%PopOrd[, 1])]
if(length(Unordref)==1) print(paste0(length(Unordref),' reference without entry and order in Corpheno, it will be plotted at the beginning of the Y axis.'))
if(length(Unordref)>1) print(paste0(length(Unordref),' references without entry and order in Corpheno, these will be plotted at the beginning of the Y axis.'))
PopOrd <- PopOrd[PopOrd[,1]%in%Is,]
#Dealing with undefined refs
if(length(Unordref)>=1){
#nrnewrow <- 1:length(Unordref)
nrnewrow <- length(Unordref)
#PopOrd <- rbind(nrnewrow,PopOrd)
newrows <- data.frame(matrix(0,nrow=nrnewrow,ncol=2))
colnames(newrows) <- colnames(PopOrd)
PopOrd <- rbind(newrows,PopOrd)
#PopOrd[nrnewrow,1] <- Unordref
PopOrd[1:nrnewrow,1] <- Unordref
#PopOrd[nrnewrow,2] <- 0
}
CorColOrd <- c(paste0('I_',PopOrd[,1]),paste0('C_',PopOrd[,1]),colnames(aid)[ICCol])
aid <- aid[,CorColOrd]
ICCol <- grep('I_|C_',colnames(aid),invert=T)
colnames(aid)[ICCol] <- 'Id'
aid <- aid[!duplicated(aid$Id),]
colnames(aid) <- gsub('1000.Genomes','1KG',colnames(aid))
aid <- merge(aid, Pheno, by.x = "Id", by.y = "UNIQID", all.x = T)
aid$Order[is.na(aid$Pheno_Pop)] <- min(aid$Order[!(is.na(aid$Pheno_Pop))]) -1
aid$Colors_Pop[is.na(aid$Pheno_Pop)] <- 'Grey'
aid$Colors_Data[is.na(aid$Pheno_Pop)] <- 'Grey'
aid$Colors_Region[is.na(aid$Pheno_Pop)] <- 'Grey'
aid$Pheno_Pop[is.na(aid$Pheno_Pop)] <- 'Unspecified'
aid <- aid[order(aid$Order, aid$Pheno_Pop, aid$Id), ]
aid <- aid[nrow(aid):1, ]
RedBl <- c("#2400D9", "#191DF7", "#2957FF", "#3D87FF", "#57B0FF", "#75D3FF", "#99EBFF", "#BDF9FF", "#EBFFFF", "#FFFFEB", "#FFF2BD", "#FFD699", "#FFAC75",
"#FF7857", "#FF3D3D", "#F72836", "#D91630", "#A60021")
RedBl <- rev(RedBl)
RedBlGr <- c(RedBl[1:9], gray.colors(100 - length(RedBl)), RedBl[10:18])
BlBrewer <- c("#FFFFD9", "#EDF8B1", "#C7E9B4", "#7FCDBB", "#41B6C4", "#1D91C0", "#225EA8", "#253494", "#081D58", "black")
colImage <- switch(colorPlot, BlBrewer = BlBrewer, RedBl = RedBl, BlBrewer = BlBrewer, RedBlGr)
plotI <- aid[grep(paste("^", columnPlot, "_", sep = ""), names(aid), value = T)]
dimnames(plotI)[[1]] <- aid$Id
if(legData==FALSE){
for(z in 1:length(dimnames(plotI)[[2]])){
sprf <- strsplit(dimnames(plotI)[[2]][z],'\\.')
sprf[[1]] <- sprf[[1]][-length(sprf[[1]])]
dimnames(plotI)[[2]][z] <- paste(sprf[[1]],collapse='.')
}
}
if (quantilePlot) {
breaks.in <- quantile(unlist(plotI), probs = seq(0, 1, length.out = (length(colImage) + 1)))
breaks.in[1] <- breaks.in[1] - 1
breaks.in[length(breaks.in)] <- breaks.in[length(breaks.in)] + 1
}
if (identical(columnPlot, "I")) {
breaks.in <- c(seq(0, 95, length.out = length(colImage) - 1), 99.9, 101)
breaks.in[1] <- -1
}
matPlot <- rep(0, 18)
matPlot[7] <- 1
PhenoCols <- grep("Pheno_", names(aid), value = T)
if (legPheno)
#matPlot[8:10] <- switch(length(PhenoCols) + 1, rep(0, 3), c(2, 0, 0), c(2, 3, 0), 2:4)
matPlot[8:10] <- switch(length(PhenoCols)+1, rep(0, 3), c(2, 0, 0), c(2, 3, 0), 2:4)
if(legColor==TRUE) matColor='yes'
if(legColor==FALSE) matColor='no'
if(legRef==TRUE) matRef='yes'
if(legRef==FALSE) matRef='no'
matPlot[1] <- switch(matColor, yes = max(matPlot) + 1, 0)
matPlot[13] <- switch(matRef, yes = max(matPlot) + 1, 0)
matLayout <- matrix(matPlot, ncol = 6, byrow = T)
widths.mat <- c(7, rep(0, 4), 0.5)
if(legPheno==FALSE) widths.mat[1] <- 8.2
if (matLayout[2, 2] != 0)
widths.mat[2] <- 0.2
if (matLayout[2, 3] != 0)
widths.mat[3] <- 0.2
if (matLayout[2, 4] != 0)
widths.mat[4] <- 0.2
if (legAxisPop)
widths.mat[5] <- 0.6
heights.mat <- c(0, 6, 0)
if (matLayout[1, 1] != 0)
heights.mat[1] <- 0.5
if (matLayout[3, 1] != 0)
heights.mat[3] <- 0.5
layout(matLayout, widths = widths.mat, heights = heights.mat)
mar1 <- c(5, 3, 3, 0)
if (plotIndNames)
mar1[2] <- 8
omav <- c(0,0,0,0)
if(!(missing(bmar))) omav[1] <- bmar
if(!(missing(lmar))) omav[2] <- lmar
if(!(missing(tmar))) omav[3] <- tmar
if(!(missing(rmar))) omav[4] <- rmar
#layout.show()
par(oma = omav)
par(mar = mar1)
if (quantilePlot == TRUE | columnPlot == "I")
image(x = 1:(ncol(plotI) + 1), y = 1:(nrow(plotI) + 1), t(plotI), col = colImage, axes = F, ann = F, breaks = breaks.in)
if (columnPlot == "C" & quantilePlot == FALSE)
image(x = 1:(ncol(plotI) + 1), y = 1:(nrow(plotI) + 1), t(plotI), col = colImage, axes = F, ann = F)
if(legRef) axis(1, at = seq(ncol(plotI)) + 0.5, labels = gsub("C_|I_", "", names(plotI)), cex.axis = T, las = 2, cex.axis = cexref)
if (plotIndNames)
axis(2, at = seq(nrow(plotI)) + 0.5, labels = dimnames(plotI)[[1]], las = 2, cex.axis = cexind)
colSepPop <- "black"
if (identical(grep("Pheno_Pop", names(aid), value = T), "Pheno_Pop")) {
sep.pop <- which(!duplicated(aid$Pheno_Pop))
if (sepLinesPop)
# Controls width of popsepline
abline(h = sep.pop, col = colSepPop, lwd = 0.3)
}
sep.pop <- which(!duplicated(aid$Pheno_Pop))
#print(sep.pop)
#if (identical(sepLinesPop, TRUE)) abline(h = sep.pop, col = colSepPop, lwd = 2.0)
# Controls width of popsepline
#sepLinesHGDP <- c(6, 10, 18, 25, 44, 49)
sepLinesHGDP <- c(5, 9, 16, 24, 38, 40)
if (sepLineRef)
abline(v = sepLinesHGDP + 1, col = "black")
mar2 <- mar1
mar2[c(2, 4)] <- 0
par(mar = mar2)
cexBarPlot <- 0.7
colPheno <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999")
PhenoCol <- grep("Pheno_", names(aid), value = T)
for (i in PhenoCol) {
col.i <- gsub("Pheno_", "Colors_", i)
if (!identical(grep(col.i, names(aid), value = T), col.i)) {
aid[, col.i] <- as.factor(aid[, i])
levels(aid[, col.i]) <- rep(colPheno, length(levels(aid[, col.i])))[1:length(levels(aid[, col.i]))]
aid[, col.i] <- as.character(aid[, col.i])
}
}
if(legPheno){
if (identical(grep("Pheno_Data", names(aid), value = T), "Pheno_Data")) {
barplot(rep(1, nrow(aid)), col = aid[, "Colors_Data"], beside = F, border = NA, space = 0, horiz = T, axes = F, yaxs = "i")
if (sepLinesPop) abline(h = sep.pop - 1, col = colSepPop, lwd = 0.5)
axis(1, 0.5, "Data", cex.axis = cexBarPlot, las = 2, xpd = NA)
}
if (identical(grep("Pheno_Region", names(aid), value = T), "Pheno_Region")) {
barplot(rep(1, nrow(aid)), col = aid[, "Colors_Region"], beside = F, border = NA, space = 0, horiz = T, axes = F, yaxs = "i")
if (sepLinesPop) abline(h = sep.pop - 1, col = colSepPop, lwd = 0.5)
axis(1, 0.5, "Region", cex.axis = cexBarPlot, las = 2, , xpd = NA)
}
if (identical(grep("Pheno_Pop", names(aid), value = T), "Pheno_Pop")) {
barplot(rep(1, nrow(aid)), col = aid[, "Colors_Pop"], beside = F, border = NA, space = 0, horiz = T, axes = F, yaxs = "i")
if (sepLinesPop) abline(h = sep.pop - 1, col = colSepPop, lwd = 0.5)
axis(1, 0.5, "Pop", cex.axis = cexBarPlot, las = 2, xpd = NA)
}
}
if(legAxisPop==TRUE && legPheno==TRUE){
if (identical(grep("Pheno_Pop", names(aid), value = T), "Pheno_Pop")) {
phenoLeg <- data.frame(Pheno_Pop = aid$Pheno_Pop[order(aid$Order,aid$Pheno_Pop)], Index = seq(from=nrow(aid),to=1))
phenfrq <- as.data.frame(table(phenoLeg$Pheno_Pop))
phenoLeg <- phenoLeg[!duplicated(phenoLeg$Pheno_Pop) | !duplicated(phenoLeg$Pheno_Pop, fromLast = T), ]
if(1%in%phenfrq$Freq){
for(z in phenfrq[phenfrq[,2]==1,1]){
tempduo <- rbind(phenoLeg[phenoLeg$Pheno_Pop==z,],phenoLeg[phenoLeg$Pheno_Pop==z,])
tempduo$Index[1] <- tempduo$Index[1]-0.75
tempduo$Index[2] <- tempduo$Index[2]-0.25
phenoLeg <- rbind(phenoLeg[phenoLeg$Pheno_Pop!=z,],tempduo)
}
}
phenoLeg$time <- 1:2
phenoLeg <- reshape(phenoLeg, idvar = "Pheno_Pop", v.names = "Index", timevar = "time", direction = "wide")
phenoLeg$loc <- (phenoLeg$Index.1 + phenoLeg$Index.2)/2
phenfrq <- phenfrq[match(phenoLeg$Pheno_Pop,phenfrq[,1]),]
phenfrq[,1] <- paste0(phenfrq[,1],' (',phenfrq[,2],')')
#axis(4, at = phenoLeg$loc, labels = phenoLeg$Pheno_Pop, xpd = NA, las = 2, cex.axis = 0.9)
axis(4, at = phenoLeg$loc, labels = phenfrq[,1], xpd = NA, las = 2, cex.axis = 0.9)
}
}
if(legColor){
mar3 <- mar1
mar3[c(1, 3)] <- 0
par(mar = mar3)
plot(1, 1, t = "n", axes = F, ann = F, xlim = c(0, length(colImage) * 3), xaxs = "i")
x1 <- seq(length(colImage))
#rect(x1, 0.7, x1 + 1, 0.9, col = colImage, xpd = NA)
rect(x1, 0.5, x1 + 1, 0.8, col = colImage, xpd = NA)
text(x1[1], 0.3, "low", xpd = NA)
text(x1[length(x1)]+1, 0.3, "high", xpd = NA)
popTextDown <- c(sepLinesHGDP, 45)
popText <- c("Sub-Saharan Africa", "MENA", "Europe", "C S Asia", "East Asia", "Oce", "Amer")
col.text <- "black"
par2 <- mar1
par2[c(1, 3)] <- 0
#plot(1, 1, xlim = c(0, 51), ylim = c(1, 100), xaxs = "i", axes = F, ann = F, t = "n")
sepPopLine <- 0.5
lwd.i <- 2
#y.seg.pos <- 100
#y.text.pos <- 70
y.seg.pos <- 55
y.text.pos <- 25
cex.i <- 1
x1 <- c(sepPopLine, popTextDown[-length(popTextDown)] + sepPopLine)
x2 <- c(popTextDown - sepPopLine)
}
#segments(x1, y.seg.pos, x2, y.seg.pos, col = col.text, lwd = lwd.i)
#textAll <- ((x1 + x2)/2)
#text(textAll, y.text.pos, popText, cex = cex.i, col = col.text)
}
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/R/Plot_AncestryMapper.R |
#' Calculate genetic distances.
#'
#' Calculates genetic distance between samples and population references.
#'
#' @param pathTotpeds Character vector giving path to folder containing the plink tPED file(s) to be used.
#'
#' @param pathToAriMedoids Character vector giving path to folder containing the arithmetic references to be used.
#'
#' @param AMmcapply Logical value (TRUE or FALSE), specifying if the multicore funcion mcapply, should be used.
#' Inappropriate for most HPC cluster systems. Default = FALSE
#'
#' @param nrcores Numeric value detailing how many cores should be used if AMmcapply==TRUE.
#' If left unspecificed the number of cores will be detected and nrcores will be set to that number -2.
#'
#' @param seqchip Character vector specifying if only references from one main SNP chip panel are to be used. All references are marked with what chip panel they use at the end of their file names, eg 'Yoruba.HGDP.20000.Illumina.ods'
#' May be important if your data has few SNPs in common with one panel. All toy references prepared use 'Illumina' panels. Whole Genome sequence data is specified with 'WG'.
#' Supports custom designations, but will trigger a warning when used.
#'
#' @param noseqdat Logical value (TRUE or FALSE), specifying if sequence data is to be excluded, will use only references that do not have names ending in '.WG.ods/rds/rda'. Default = FALSE
#'
#' @param wd Character vector giving the desired working directory to house the outputs of calculateAMidsArith. If left unspecified will use current working directory.
#'
#' @param NameOut Character vector giving the desired prefix name for the AMid file. Default is NULL.
#'
#' @param pathAll00 Character vector giving the path to a file containing the full data table of each dbSNP and both alleles. An example version covering the SNPs used in the example data is included. A full version can be found at: http://bit.ly/1OUstDP
#'
#'
#'
#' @examples
#' \dontrun{
#' Refs <- system.file('data', package = 'AncestryMapper')
#' tpeds <- system.file('extdata', package = 'AncestryMapper')
#' Corpheno <- system.file('extdata', 'CorPheno', package = 'AncestryMapper')
#' All00Frq <- system.file ('data', 'MinMaxFreq.rda', package = 'AncestryMapper')
#'
#' genetic.distance <- calculateAMidsArith(pathTotpeds = tpeds,
#' NameOut = 'Example',
#' pathToAriMedoids = Refs,
#' pathAll00 = All00Frq)
#'
#' plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = "I")
#' }
#' @import svd
#' @import parallel
#' @rdname calculateAMidsArith
#' @export
#'
calculateAMidsArith <- function(pathTotpeds,pathToAriMedoids,AMmcapply=F,nrcores,seqchip='',noseqdat=F,wd,NameOut=NULL,pathAll00){
#if(AMmcapply){
# library(parallel)
#}
if(missing(pathTotpeds)) stop("Error: No Path Given for Plink tPED Files")
if(missing(pathToAriMedoids)) stop("Error: No Path Given for medoid references")
if(!missing(wd)){
initialwd <- getwd()
setwd(wd)
}
if(missing(wd)) wd <- getwd()
#if(species
FUN.sumMinMaxName <- function(minMax,indGeno){
MinMaxGeno <- as.character(minMax[indGeno[1],])
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxGeno, na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',indGeno[-1])
indGeno <- c(indGeno[1],submin)
return(indGeno)
}
FUN.addSnps <- function(indGeno){
indGeno <- as.numeric(indGeno)
indGeno <- matrix(unlist(indGeno), ncol=2, byrow = T)
indGeno <- indGeno[, 1] + indGeno[, 2]
return(indGeno)
}
FUN.SharedSNPs <- function(y){
for(z in y){
#bim <- read.table(paste0(pathoToBeds,"/",z))
bim <- read.table(z)
bimSnp <- as.character(bim[,1])
if(z==y[1]) SNPsComm <- bimSnp
if(z!=y[1]) SNPsComm <- intersect(SNPsComm,bimSnp)
}
return(SNPsComm)
}
TM.distEuc <- function(X,y){
X <- as.matrix(X)
y <- as.vector(y)
dX <- dim(X)
p <- dX[1]
n <- dX[2]
return(sqrt(as.vector(rowMeans(X^2) + mean(y^2) - 2 * X %*% y/n)))
} ## TM.distEuc
tm.ped <- function(pedFile.f, ncol.f='no', sep.f=' ', nrLines, head.f=F, nFields, verbose.f=F){
pedOut <- list()
con <- file(pedFile.f, 'r')
nrLinesFile <- as.numeric(unlist(strsplit(system(paste0('wc -l ',pedFile.f), intern=T),' '))[1])
print(nrLinesFile)
while(length(pedOut)<nrLinesFile){
ped.i <- scan(con, nlines=1, what='character', quiet=T, sep=sep.f, flush=F)
## print(length(ped.i))
## print(head(ped.i,10))
if(!missing(nFields)) ped.i <- ped.i[1:nFields]
## print(length(ped.i))
if(verbose.f) print(length(pedOut))
pedOut[[length(pedOut)+1]] <- ped.i
#if(!missing(nrLines)) if(count>nrLines) break
} ## while, get all lines of file
close(con)
if(identical(head.f,T)) {names(pedOut) <- pedOut[1]; pedOut <- pedOut[-1]}
return(pedOut)
}## close pedReadingFunction
FUN.distEuc <- function(X,y,z){
#inter <- intersect(colnames(X),names(y))
inter <- intersect(colnames(X),refsnpinter)
inter <- intersect(inter,z)
#print(paste0('Len Inter1 ',length(inter)))
if(nrow(X)==1) X <- t(data.frame(X[,inter]))
if(nrow(X)!=1) X <- X[,inter]
#print(paste0('Number Common SNPs between ',X,' and ',y,': ',length(X)))
y <- as.vector(y[inter])
#print(class(y))
#print(class(X))
EuDist <- TM.distEuc(X,y)
attr(EuDist,'commonSnps') <- length(inter)
return(EuDist)
} ## FUN.distEuc
## nameArithmetic.f <- arithmeticRefsMedoidsFile[1]
## for cases where we are reading from a text "ods" file
FUN.listArithmeticMedoidsOds <- function(nameArithmetic.f){
filName <- strsplit(nameArithmetic.f,'_')
if(identical(grep('rda$',nameArithmetic.f), as.integer(1))) load(nameArithmetic.f)
if(identical(grep('.ods$',nameArithmetic.f), as.integer(1))) tm.ped(nameArithmetic.f,head.f=F)
#arithmeticRefsMedoids <- tm.ped(nameArithmetic.f,head.f=F)
#arithmeticRefsMedoids <- load(nameArithmetic.f)
if(identical(grep('rds$',nameArithmetic.f), as.integer(1))) arithmeticRefsMedoids <- readRDS(nameArithmetic.f)
snpNames <- arithmeticRefsMedoids[[1]]
## For dealing with nucleotides being kept as part of snpNames
snpsMinusNucs <-lapply(snpNames[-1], function(z) strsplit(z, "_")[[1]][1])
snpNames[2:length(snpNames)] <- snpsMinusNucs
arithmetValuesSnps <- as.vector(arithmeticRefsMedoids[[2]])
names(arithmetValuesSnps) <- snpNames
## get population name, remove it from the vector
#pop <- as.character(arithmetValuesSnps[2])
pop <- tail(filName[[1]],4)[1]
arithmetValuesSnps <- arithmetValuesSnps[-1]
class(arithmetValuesSnps) <- 'numeric'
##attr(arithmetValuesSnps, 'popName') <- paste0(pop,".",filName[[1]][3])
attr(arithmetValuesSnps, 'popName') <- paste0(pop,".",tail(filName[[1]],3)[1])
## we are so cute, what's SimCity?
if(length(grep('Irish',attr(arithmetValuesSnps,'popName'),value=T)>0)) print("Calculating Distance To Tipperary...")
if(length(grep('Irish',attr(arithmetValuesSnps,'popName'),value=T)>0)) print("Not Far Enough.")
if(length(grep('Finnish',attr(arithmetValuesSnps,'popName'),value=T)>0)) print("Finnished Finish")
print(paste(attr(arithmetValuesSnps,'popName'),"Finished"))
return(arithmetValuesSnps)
} ## FUN.listArithmeticMedoidsOds
## function to read "ods" arithmetic functions
# MODULE Arithmetic medoids
# list of all arithmetic medoids
## Pull list of medoids, select specific chipsets if applicable
arithmeticRefsMedoidsFile <- list.files(path=pathToAriMedoids,pattern='medoidArithmetic_*',full.names=T)
if(seqchip=='Illumina'){
arithmeticRefsMedoidsFile <- grep("WG|Illumina",arithmeticRefsMedoidsFile,value=T)
}
if(seqchip=='Affymetrix'){
arithmeticRefsMedoidsFile <- grep("WG|Affymetrix",arithmeticRefsMedoidsFile,value=T)
}
if(seqchip!='Illumina'&&seqchip!='Affymetrix'&&seqchip!=''&&seqchip!='WG'){
arithmeticRefsMedoidsFile <- grep(paste0("WG|",seqchip),arithmeticRefsMedoidsFile,value=T)
print(paste0("***Warning: ",seqchip," is a custom value not a native value for seqchip, only NULL, WG, Illumina or Affymetrix are. The function will still find medoids marked at the end of their name with .",seqchip,".ods but they will have to have been produced by the user, if this is what you have done, ignore this warning. Otherwise check there isn't a typo in the value given.*** Please contact the maintainer of the package if you have any questions." ))
}
if(noseqdat){
arithmeticRefsMedoidsFile <- grep("WG",arithmeticRefsMedoidsFile,value=T,invert=T)
}
medoidsList <- lapply(arithmeticRefsMedoidsFile,FUN.listArithmeticMedoidsOds)
names(medoidsList) <- unlist(lapply(medoidsList, function(obj) attr(obj,'popName')))
medsnp <- list()
for(z in seq(length(medoidsList))) medsnp[[z]] <- names(medoidsList[[z]])
refsnpinter <- Reduce(intersect, medsnp)
for(z in medoidsList){
medsnp <- names(z)
if(exists('intermed')) intermed <- intersect(intermed,medsnp)
if(!exists('intermed')) intermed <- medsnp
}
#print(length(intermed))
MedConform <- function(medentry){
medentry <- medentry[intermed]
return(medentry)
}
medoidsList <- lapply(medoidsList,MedConform)
# MODULE MinMax
# from Snp00 get all hets; give 1 or 2 depending on alphabetical order of the genotype
FUN.getSnpsFromBeds <- function(bimFileName){
z2 <- bimFileName
snps <- read.table(paste0(z2))
snps <- as.character(snps$V2)
return(snps)
}
FUN.getUniqSnps <- function(bimFileName){
z2 <- bimFileName
snps <- read.table(paste0(z2))
snps <- as.character(snps[,1])
return(snps)
}
## get only the snps that will be used, via ped files
#bimFile <- list.files(path=pathTotpeds,pattern="*\\.bim$",full.names=T)
#bimFile <- grep("HGDP|HapMap|TU_|Hellenthal|Ipatimup|IrinaMP|UCD|slavic_temp|BalkansTemp|Tibet|Genome",bimFile,v=T)
#HMSNP <- read.table(HapMapSNPs)
#HMSNP <- as.character(HMSNP$V1)
#lapply(bimFile,FUN.Frq)
#frqFile <- list.files(path=wd,pattern="*\\.frq$")
#snpsList <- lapply(bimFile,FUN.getSnpsFromBeds)
SNPls <- list.files(path=pathTotpeds,pattern="*\\.snplist$",full.names=T)
snpsList <- lapply(SNPls,FUN.getUniqSnps)
snpsList <- unique(do.call('c',snpsList))
#unlink("snpls.txt")
#write.table(snpsList,file="snpls.txt",row.names=FALSE,col.names=FALSE,quote=FALSE)
## from snp00 file, that has all the snps in the genome, create file for our snps
#MinMaxFreq <- read.table(system.file('extdata',"FreqtempAllSnps00-IllAffy.frq",package="AncestryMapper"),h=T)
#row.names(MinMaxFreq) <- MinMaxFreq$SNP
#MinMaxFreq$SNP <- NULL
#MinMaxFreq <- t(apply(MinMaxFreq,1,function(vec) sort(vec)))
if(identical(grep('rda$',pathAll00), as.integer(1))){
load(pathAll00)
} else if(identical(grep('rds$',pathAll00), as.integer(1))){
MinMaxFreq <- readRDS(pathAll00)
} else{
MinMaxFreq <- read.table(pathAll00,header=F)
}
MinMaxFreq <- MinMaxFreq[row.names(MinMaxFreq)%in%snpsList,]
# MODULE beds: from ACGT to 12
# substituting A by 1; T by 2; using min/Max otherwise
## ped.f=list.files(path=pathTotpeds,pattern="*\\.bed$")[2]
FUN.bedMedoids <- function(ped.f){
print(ped.f)
#tempOut <- paste0(ped.f,'_out10') ## temporary file names; delete later
zx2 <- gsub("\\.tped","",ped.f)
outname <- strsplit(zx2,'/')[[1]][length(strsplit(zx2,'/')[[1]])]
#command10 <- paste0(plink,' --bfile ',zx2 , ' --recode transpose --out ',tempOut)
## transpose bed file; easier to get into plink
#system(command10, wait=T)
## snps and individuals; add to the final AM id table
snps <- unlist(SNPsComm)
inds <- read.table(paste0(zx2,'.tfam'))
inds <- inds[,2]
## qc: ped is 1234, not ACGT, only run on first line, 'nline=1'
#conQc <- read.table(paste0(tempOut,'.tped'), nrows=1)
conQc <- read.table(ped.f, nrows=1)
all12 <- unique(unlist(conQc)[-c(1:6)])
#if(any(all12%in%c('1','2','3','4'))) stop("tm: recode to ACGT, it's currently 1234")
## import tped file; tried scan; read.table the fastest
#snps1234 <- read.table(paste0(tempOut,'.tped'))
snps1234 <- read.table(ped.f)
#unlink(grep(tempOut,dir(),value=T))
row.names(snps1234) <- snps1234$V2
## remove pedigree
snps1234 <- snps1234[,!colnames(snps1234)%in%c('V1','V2','V3','V4')]
## matrix; much faster
snps1234 <- as.matrix(snps1234)
snps1234 <- snps1234[intersect(row.names(snps1234),row.names(MinMaxFreq)),]
SortedGeno <- t(apply(snps1234,1,function(vec) paste0(sort(unique(vec)),collapse='')))
#if(sum(SortedGeno=='CG')!=0) stop("CGs Present in .frq file, strand reading issue, remove.")
if(sum(SortedGeno=='CG')!=0) {
print(paste0("Found ",sum(SortedGeno=='CG')," CGs Present, strand reading issue, removing."))
snps1234 <- snps1234[colnames(SortedGeno)[!(colnames(SortedGeno)%in%colnames(SortedGeno)[SortedGeno[1,]=='CG'])],]
SortedGeno <- t(SortedGeno[,!(colnames(SortedGeno)%in%colnames(SortedGeno)[SortedGeno[1,]=='CG'])])
}
if(sum(SortedGeno=='AT')!=0) {
print(paste0("Found ",sum(SortedGeno=='AT')," ATs Present, strand reading issue, removing."))
snps1234 <- snps1234[colnames(SortedGeno)[!(colnames(SortedGeno)%in%colnames(SortedGeno)[SortedGeno[1,]=='AT'])],]
SortedGeno <- t(SortedGeno[,!(colnames(SortedGeno)%in%colnames(SortedGeno)[SortedGeno[1,]=='AT'])])
}
if(length(grep('0',SortedGeno[1,],value=T))!=0) {
print(paste0("Found ",length(grep('0',SortedGeno[1,],value=T))," SNPs with missingness Present, removing."))
snps1234 <- snps1234[names(grep('0',SortedGeno[1,],value=T,invert=T)),]
SortedGeno <- t(SortedGeno[,names(grep('0',SortedGeno[1,],value=T,invert=T))])
}
monoCGs <- SortedGeno=='C' | SortedGeno=='G'
#Deals with indels
indel <- nchar(SortedGeno)>2
## If any mono Gs or Cs in .frq file,
if(sum(monoCGs)!=0) {
snps1234 <- data.frame(UNIQID=row.names(snps1234),snps1234)
snps1234 <- as.matrix(snps1234)
if(sum(monoCGs)==1){
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxFreq[monoCGs,], na.rm=F)
max2 <- max(MinMaxFreq[monoCGs,], na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',snps1234[monoCGs,])
submax <- gsub(max2,'2',snps1234[monoCGs,])
#indGeno <- c(indGeno[1],submin)
#snp3 <- t(data.frame(snps1234[monoCGs,]))
#row.names(snp3) <- row.names(snps1234)[monoCGs]
#minMax.f <- t(data.frame(MinMaxFreq[dimnames(snp3)[[1]],]))
#row.names(minMax.f) <- row.names(snps1234)[monoCGs]
}
if(sum(monoCGs)>1){
minMax.f <- MinMaxFreq[dimnames(snps1234[monoCGs, ])[[1]],]
debug(FUN.sumMinMaxName)
undebug(FUN.sumMinMaxName)
snps1234[monoCGs,] <- t(apply(snps1234[monoCGs,],1, function(r.f) FUN.sumMinMaxName(minMax=minMax.f,indGeno=r.f)))
}
## Replacing monoCG max with 2
#snps1234 <- snps1234[,!colnames(snps1234)%in%"UNIQID"]
snps1234[monoCGs,] <- gsub("^C$|^G$","2",snps1234[monoCGs,])
} ## end of snps exist that are not A nor T
#Indels
if(sum(nchar(SortedGeno)>2)!=0){
indelrs <- colnames(SortedGeno)[indel]
if(sum(nchar(SortedGeno)>2)==1){
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxFreq[indelrs,], na.rm=F)
max2 <- max(MinMaxFreq[indelrs,], na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',snps1234[indelrs,])
submax <- gsub(max2,'2',snps1234[indelrs,])
#indGeno <- c(indGeno[1],submin)
#snp3 <- t(data.frame(snps1234[monoCGs,]))
#row.names(snp3) <- row.names(snps1234)[monoCGs]
#minMax.f <- t(data.frame(MinMaxFreq[dimnames(snp3)[[1]],]))
#row.names(minMax.f) <- row.names(snps1234)[monoCGs]
}
if(sum(nchar(SortedGeno)>2)>1){
minMax.f <- MinMaxFreq[dimnames(snps1234[indelrs, ])[[1]],]
#debug(FUN.sumMinMaxName)
#undebug(FUN.sumMinMaxName)
snps1234[indelrs,] <- t(apply(snps1234[indelrs,],1, function(r.f) FUN.sumMinMaxName(minMax=minMax.f,indGeno=r.f)))
snps1234[nchar(snps1234)>1] <-'2'
}
}
snps1234 <- snps1234[,!colnames(snps1234)%in%"UNIQID"]
snps1234 <- gsub("A","1",snps1234)
snps1234 <- gsub("T","2",snps1234)
subx <- SortedGeno=='AC'
snps1234[subx,] <- gsub("C","2",snps1234[subx,])
subx <- SortedGeno=='CT'
snps1234[subx,] <- gsub("C","1",snps1234[subx,])
subx <- SortedGeno=='AG'
snps1234[subx,] <- gsub("G","2",snps1234[subx,])
subx <- SortedGeno=='GT'
snps1234[subx,] <- gsub("G","1",snps1234[subx,])
debug(FUN.addSnps)
undebug(FUN.addSnps)
# Sum of strands
SumPed <- apply(snps1234,1,FUN.addSnps)
if(length(inds)==1) {
SumPed <- t(data.frame(SumPed))
}
dimnames(SumPed)[[1]] <- inds
## distances of peds to lapplys
print(paste0('***** Number of SNPs Used: ',length(intersect(snps,refsnpinter))))
arithmInds <- lapply(medoidsList,function(medoid) FUN.distEuc(X=SumPed,y=medoid,z=snps))
#if(length(inds)==1) {
# arithmInds <- t(data.frame(arithmInds))
#}
AMids <- do.call('rbind',arithmInds)
## report; overlaps snps for each medoid, each ped
cat(ped.f, ';', paste0(row.names(AMids),': ',lapply(arithmInds, function(obj) attr(obj,'commonSnps'))),'\n',file=overlapNumbersFile, append=T)
AMids <- t(AMids)
dimnames(AMids)[[1]] <- inds
## these are the C_ raw distances
dimnames(AMids)[[2]] <- paste0('C_',dimnames(AMids)[[2]])
## indexes; least similar pop is 0; max is 100
fun.rescale <- function(vec) 100-((vec-min(vec))/(max(vec)-min(vec))*100)
Indexes <- data.frame(t(round(apply(AMids, 1, fun.rescale),1)))
names(Indexes) <- gsub("C_", "I_", names(Indexes))
## merge Indexes, Coordinates
AMids <- data.frame(Indexes, AMids)
AMids$UNIQID <- row.names(AMids)
return(AMids)
}#FUN.bedMoids; big function: treats ped, gets AMids for each arithmeticMedoid
#debug(FUN.bedMedoids)
#undebug(FUN.bedMedoids)
## log file for number of Ts As Cgs
logFile <- 'report_ArithmeticMedoids.txt'
unlink(logFile)
cat('report for arithmeticMedoids\n',file=logFile)
## report file for number of snps for ped/medoids
overlapNumbersFile <- 'report_overlapSnpsPedMedoids.txt'
unlink(overlapNumbersFile)
cat('report for arithmeticMedoids\n',file=overlapNumbersFile)
## Pull list of Plink BED files for use
tpedFile <- list.files(path=pathTotpeds,pattern="*\\.tped$",full.names=T)
print('tPED files used:')
print(tpedFile)
## Get list of common SNPs for all submitted Plink BED files.
#SNPls <- list.files(path=pathTotpeds,pattern="*\\.snplist$",full.names=T)
SNPsComm <- lapply(SNPls,FUN.SharedSNPs)
print(paste0(length(SNPsComm[[1]])," common SNPs found between all submitted tPED files."))
print(paste0(length(MinMaxFreq[,1]),' common SNPs found between All00 reference and all submitted tPED files'))
## Apply main function, with or without multicore
if(AMmcapply==F) amidsList <- lapply(tpedFile, FUN.bedMedoids)
if(AMmcapply==T){
if(missing(nrcores)) nrcores <- detectCores()-2
amidsList <- mclapply(tpedFile, FUN.bedMedoids,mc.cores=nrcores)
}
amids <- do.call('rbind',amidsList)
#h(amids,2)
## Writes out AMid file for future reference and other analyses.
#nameOutAMid <- paste0('AMid',NameOut,'_ref',(ncol(amids)-1)/2,'_pops',(length(tpedFile)),'_inds',nrow(amids),'.amid')
nameOutAMid <- paste0('AMid',NameOut,'_ref',(ncol(amids)-1)/2,'_inds',nrow(amids),'_SNPs',length(MinMaxFreq[,1]),'.amid')
#if(NameOut==NULL) nameOutAMid <- paste0('AMid',outname,'_ref',(ncol(amids)-1)/2,'_inds',nrow(amids),'_SNPs',length(MinMaxFreq[,1]),'.amid')
write.table(amids, row.names=F, col.names=T, quote=F, file=nameOutAMid,sep=' ')
print(paste0('Wrote out AMid file to, ',getwd(),'/',nameOutAMid))
## Set wd back to original
if(exists('initialwd')) setwd(initialwd)
rm(intermed)
return(amids)
}
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/R/R_AMids_distanceToArithmeticMedoids_SingleMedoids.R |
#' Calculate genetic distances.
#'
#' calculates and assigns Ancestry Mapper Ids (AMids) in a more crude, but faster manner than calculateAMids
#'
#' @param pedtxtFile Character vector giving path to PED file to be used. The PED file should include all 51 HGDP references and the individuals for which the user wishes to calculate the genetic distance.
#'
#' @param fileReferences Character vector giving path to file File detailing the individuals in the ped file that correspond to the references, and the populations they refer to. A file that uses the 51 HGDP reference populations is provided with the package.
#'
#'
#' @examples
#' \dontrun{
#' library(AncestryMapper)
#'
#' HGDP_References <- system.file('extdata',
#' 'HGDP_References.txt',
#' package = 'AncestryMapper')
#'
#'
#' HGDP_500SNPs <- system.file('extdata',
#' 'HGDP_500SNPs.ped',
#' package = 'AncestryMapper')
#'
#' Corpheno <- system.file('extdata',
#' 'CorPheno',
#' package = 'AncestryMapper')
#'
#' genetic.distance <- calculateAMids(pedtxtFile = HGDP_500SNPs,
#' fileReferences = HGDP_References)
#'
#' plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = "I")
#' }
#' @rdname calculateAMids
#' @export
#'
calculateAMids <- function(pedtxtFile, fileReferences){
FUN.sumPedG <- function(row){
row <- as.numeric(row)
## Treat 0s and NAs
row.na <- which(is.na(row))
row.0 <- which(row==0)
row.change <- c(row.na, row.0)
## When row is NA or O - random substitution of 1 or 2
row[row.change] <- sample(c(1,2), length(row.change), replace=T)
line.i <- matrix(unlist(row), length(row)/2, 2, byrow=T)
line.i <- line.i[,1] + line.i[,2]
return(line.i)
}
## function to calculate distances; euclidean divided by number of snps
## maximum of distance 2, minimum 0
FUN.dist2Ref <- function(row){
row <- as.numeric(row)
distOut <- sqrt(as.vector(rowMeans(referencesMatrix^2) + mean(row^2) - 2 * referencesMatrix %*% row/ncol(referencesMatrix)))
distOut <- round(distOut, 4)
return(distOut)
}
## Check only 1,2 and NAs
FUN.checkVectors <- function(row){
pedUnique <- unique(row)
if(!identical(setdiff(pedUnique, c(0,1,2,NA)), character(0)))
stop("Input file can have 0, 1, 2 and NA values only.")
return(NULL)
}
## Calculate sumPed from scratch, ped, map
## Import ped, faster with readLines so that it is faster
ped <- readLines(pedtxtFile)
## strplit, each vector by ' '
ped <- lapply(ped, function(row) unlist(strsplit(row, split=' ')))
## Individual names
indNames <- unlist(lapply(ped, function(row) row[2])) ### row 2 because ped file is Id
##Check duplicates; only looks at the individual ids not fam ids
if(sum(duplicated(indNames))!=0) stop("There are duplicate individuals in the input file.")
## Extract genotypes, remove pedigree informations
ped <- lapply(ped, function(row) row[7:length(row)])
## Check each vector has only 0, 1, 2 and NAs
checkRows <- lapply(ped, FUN.checkVectors)
##Sum each genotype per snp; add snps; convert 2 cols into one
sumPed <- lapply(ped, FUN.sumPedG)
print('Summation of genotypes complete.')
sumPed <- as.data.frame(do.call('rbind', sumPed))
row.names(sumPed) <- indNames
## Calculate indices: Assign 100 to most similar reference, 0 to least similar
FUN.indexes <- function(vec) round(100 - ((vec - min(vec))/(max(vec) - min(vec)) * 100), 1)
## Load the references file
references <- read.table(fileReferences, header=T, as.is=T)
## References matrix: 51 references
## Error if references not in ped file format
referencesMatrix <- as.matrix(sumPed[references[,'Reference'],])
if(nrow(referencesMatrix)!=51)
warning(paste('Number of references is usually 51; you are using ',nrow(referencesMatrix)), immediate.=T)
## Distances of every individual to references
count <- 0
dist2Ref <- apply(sumPed, 1, FUN.dist2Ref)
print('Distances of Samples to References Computed.')
dist2Ref <- as.data.frame(t(dist2Ref))
## Names; same as references
names(dist2Ref) <- paste('C_', references$Pop, sep='')
## Indices; min:0 max:100
Indexes <- data.frame(t(apply(dist2Ref, 1, FUN.indexes)))
names(Indexes) <- gsub('C_', 'I_', names(Indexes))
distAll <- data.frame(dist2Ref, Indexes)
distAll <- data.frame(Id=row.names(distAll), distAll)
## Return Distances
return(distAll)
}
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/R/R_CalculateAMIds.R |
#' Creates Ancestry Mapper Population Reference.
#'
#' Generates arithmetic population reference from PLINK tPED files.
#'
#' @param pathTotpeds Character vector giving path to folder containing tPED file(s) to be used.
#'
#' @param AMmcapply Logical value (TRUE or FALSE), specifying if the multicore funcion mcapply, should be used. Inappropriate for most HPC cluster systems.
#' Default = FALSE
#'
#' @param nrcores Numeric value detailing how many cores should be used if AMmcapply==TRUE. If left unspecificed the number of cores will be detected and mc.cores will be set to that number -1.
#'
#' @param wd Character vector giving the desired working directory to house the outputs of calculateAMids. If left unspecified will use current working directory.
#'
#' @param pathAll00 Character vector giving the path to a file containing the full data table of each dbSNP and both alleles. A toy version covering the SNPs used in the toy data is included. A full version can be found at: http://bit.ly/1OUstDP
#'
#' @param chipMan Character vector giving name of company from which the SNP panel is derived. Eg 'Illumina', 'Affymetrix'. If no value is given will default to 'ChipMan'. If it is whole genome sequencing, please put 'WG'.
#' The value will appear in the name of the arithmetic reference file. e.g. 'medoidArithmetic_Yoruba_HGDP_1000_Illumina.rda'.
#'
#' @param OutForm Character vector giving option for output format for arithmetic medoids. Can be one of three options.
#' 'ods' will generate a raw text file with the default extension of '.ods'. This is the default option as is the most flexible format.
#' 'rds' will save the arithmetic medoid as a .rds file which can be loaded into R faster and is also roughly a third the size of the raw text version.
#' 'rda' will save the arithmetic medoid as a .rda file which can be loaded into R faster and is also roughly a third the size of the raw text version.
#'
#'
#' @examples
#' \dontrun{
#' chipManExample <- 'Illumina'
#' tpeds <- system.file('extdata', package='AncestryMapper')
#'
#' createMedoid(pathTotpeds = tpeds, chipMan = chipManExample)
#'
#' }
#' @import parallel
#' @rdname createMedoid
#' @export
#'
createMedoid <- function(pathTotpeds, AMmcapply=F, nrcores, wd, pathAll00, chipMan='ChipMan', OutForm='rda'){
#if(AMmcapply){
# library(parallel)
#}
if(missing(pathAll00)) stop("Error: No Path Given for pathAll00")
if(missing(pathTotpeds)) stop("Error: No Path Given for tPED Files")
#if(missing(plink)) stop("Error: No path given for Plink binary, if it is in the Root Directory, should still be either plink='plink' or plink='plink1.9', depending on version")
if(!missing(wd)){
initialwd <- getwd()
setwd(wd)
}
if(missing(wd)){
wd <- getwd()
setwd(wd)
}
## nice, easy function
FUN.MinMax12 <- function(freq){
MinMaxGeno <- c(freq[1],freq[2])
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxGeno, na.rm=F)
## max1 <- max(MinMaxGeno, na.rm=F)
## substitute 1 the minimum
freq <- gsub(min1,'1',freq)
## freq <- gsub(max1,'2',freq)
return(freq)
}## FUN.MinMax12
## Calculate arithmetic medoids
FUN.Arithmetic <- function(MinMax12){
class(MinMax12) <- 'numeric'
Arith <- (MinMax12[1]*MinMax12[3])+(MinMax12[2]*(1-MinMax12[3]))
Arith <- Arith*2
return(Arith)
}## FUN.Arithmetic
FUN.sumMinMaxName <- function(minMax,indGeno){
MinMaxGeno <- as.character(minMax[indGeno[1],])
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxGeno, na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',indGeno[-1])
indGeno <- c(indGeno[1],submin)
return(indGeno)
}
FUN.addSnps <- function(indGeno){
indGeno <- as.numeric(indGeno)
indGeno <- matrix(unlist(indGeno), ncol=2, byrow = T)
indGeno <- indGeno[, 1] + indGeno[, 2]
return(indGeno)
}
FUN.getSnpsFromBeds <- function(bimFileName){
snps <- read.table(bimFileName)
snps <- as.character(snps$V1)
return(snps)
}
## get only the snps that will be used, via ped files
bimFile <- list.files(path=pathTotpeds,pattern="*\\.snplist$",full.names=T)
## bimFile <- grep("HGDP|HapMap",bimFile,v=T)
snpsList <- lapply(bimFile,FUN.getSnpsFromBeds)
snpsList <- unique(do.call('c',snpsList))
gc()
if(identical(grep('rda$',pathAll00), as.integer(1))){
load(pathAll00)
} else if(identical(grep('rds$',pathAll00), as.integer(1))){
MinMaxFreq <- readRDS(pathAll00)
} else{
MinMaxFreq <- read.table(pathAll00,header=F)
}
MinMaxFreq <- MinMaxFreq[row.names(MinMaxFreq)%in%snpsList,]
## ped.f=list.files(path=pathTotpeds,pattern="*\\.bed$")[2]
FUN.bedMedoids <- function(ped.f){
#zx2 <- paste0(pathTotpeds,'/',gsub(".tped","",ped.f))
zx2 <- gsub(".tped","",ped.f)
#outname <- gsub(paste0(pathTotpeds,'/'),"",ped.f)
outname <- strsplit(zx2,'/')[[1]][length(strsplit(zx2,'/')[[1]])]
#outname <- gsub(".tped","",outname)
famfile <- paste0(zx2,".tfam")
famfile <- read.table(famfile)
if(length(famfile$V2)<10) print(paste0('Insufficient samples, minimum at this release is 10. Population dataset submitted: ',ped.f,' contains ',length(famfile$V2)))
if(length(famfile$V2)>=10){
print(ped.f)
#tempOut <- paste0(ped.f,'_out10') ## temporary file names; delete later
#zx2 <- paste0(pathTotpeds,'/',gsub(".bed","",ped.f))
## snps and individuals; add to the final AM id table
#snps <- read.table(paste0(zx2,'.bim'))
#inds <- read.table(paste0(zx2,'.fam'))
inds <- famfile[,2]
## qc: ped is 1234, not ACGT, only run on first line, 'nline=1'
##conQc <- read.table(paste0(tempOut,'.tped'), nrow=1)
##all12 <- unique(unlist(conQc)[-c(1:4)])
##if(any(all12%in%c('1','2','3','4'))) stop("tm: recode to ACGT, it's currently 1234")
## import tped file; tried scan; read.table the fastest
##snps1234 <- read.table(paste0(tempOut,'.tped'))
snps1234 <- read.table(ped.f)
snps <- snps1234[,2]
row.names(snps1234) <- snps1234$V2
## remove pedigree
snps1234 <- snps1234[,!colnames(snps1234)%in%c('V1','V2','V3','V4')]
## matrix; much faster
snps1234 <- as.matrix(snps1234)
snps1234 <- snps1234[intersect(row.names(snps1234),row.names(MinMaxFreq)),]
SortedGeno <- t(apply(snps1234,1,function(vec) paste0(sort(unique(vec)),collapse='')))
#print(sum(SortedGeno=='CG'))
#print(colnames(SortedGeno)[SortedGeno=='CG'])
#print(SortedGeno=='CG')
#colnames(SortedGeno)[SortedGeno == "CG"]
CGs <- SortedGeno=='CG'
indel <- nchar(SortedGeno)>2
#if(sum(SortedGeno=='CG')!=0){
# SortedGeno2 <- t(matrix(SortedGeno[SortedGeno != "CG"]))
# colnames(SortedGeno2) <- colnames(SortedGeno)[SortedGeno != "CG"]
# SortedGeno <- SortedGeno2
# rm(SortedGeno2)
#}
#if(sum(nchar(SortedGeno)>2)){
# SortedGeno2 <- t(matrix(SortedGeno[nchar(SortedGeno)==2]))
# colnames(SortedGeno2) <- colnames(SortedGeno)[nchar(SortedGeno)==2]
# SortedGeno <- SortedGeno2
# rm(SortedGeno2)
#}
#if(sum(SortedGeno=='CG')!=0) stop("CGs Present in .frq file, strand reading issue, remove.")
monoCGs <- SortedGeno=='C' | SortedGeno=='G'
# # If any mono Gs or Cs in .frq file,
if(sum(monoCGs)!=0) {
snps1234 <- data.frame(UNIQID=row.names(snps1234),snps1234)
snps1234 <- as.matrix(snps1234)
if(sum(monoCGs)==1){
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxFreq[monoCGs,], na.rm=F)
max2 <- max(MinMaxFreq[monoCGs,], na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',snps1234[monoCGs,])
submax <- gsub(max2,'2',snps1234[monoCGs,])
#indGeno <- c(indGeno[1],submin)
#snp3 <- t(data.frame(snps1234[monoCGs,]))
#row.names(snp3) <- row.names(snps1234)[monoCGs]
#minMax.f <- t(data.frame(MinMaxFreq[dimnames(snp3)[[1]],]))
#row.names(minMax.f) <- row.names(snps1234)[monoCGs]
}
if(sum(monoCGs)>1){
minMax.f <- MinMaxFreq[dimnames(snps1234[monoCGs, ])[[1]],]
#debug(FUN.sumMinMaxName)
#undebug(FUN.sumMinMaxName)
snps1234[monoCGs,] <- t(apply(snps1234[monoCGs,],1, function(r.f) FUN.sumMinMaxName(minMax=minMax.f,indGeno=r.f)))
}
## Replacing monoCG max with 2
###snps1234 <- snps1234[,!colnames(snps1234)%in%"UNIQID"]
snps1234[monoCGs,] <- gsub("^C$|^G$","2",snps1234[monoCGs,])
#minMax.f <- MinMaxFreq[dimnames(snps1234[monoCGs,])[[1]],]
#snps1234 <- data.frame(UNIQID=row.names(snps1234),snps1234)
#snps1234 <- as.matrix(snps1234)
#debug(FUN.sumMinMaxName)
#undebug(FUN.sumMinMaxName)
#snps1234[monoCGs,] <- t(apply(snps1234[monoCGs,],1, function(r.f) FUN.sumMinMaxName(minMax=minMax.f,indGeno=r.f)))
## Replacing monoCG max with 2
#snps1234 <- snps1234[,!colnames(snps1234)%in%"UNIQID"]
#snps1234[monoCGs,] <- gsub("^C$|^G$","2",snps1234[monoCGs,])
} ## end of snps exist that are not A nor T
if(sum(nchar(SortedGeno)>2)!=0){
indelrs <- colnames(SortedGeno)[indel]
if(sum(nchar(SortedGeno)>2)==1){
## get what is 1; 2 outside for what is not 1
min1 <- min(MinMaxFreq[indelrs,], na.rm=F)
max2 <- max(MinMaxFreq[indelrs,], na.rm=F)
## substitute 1 the minimum
submin <- gsub(min1,'1',snps1234[indelrs,])
submax <- gsub(max2,'2',snps1234[indelrs,])
#indGeno <- c(indGeno[1],submin)
#snp3 <- t(data.frame(snps1234[monoCGs,]))
#row.names(snp3) <- row.names(snps1234)[monoCGs]
#minMax.f <- t(data.frame(MinMaxFreq[dimnames(snp3)[[1]],]))
#row.names(minMax.f) <- row.names(snps1234)[monoCGs]
}
if(sum(nchar(SortedGeno)>2)>1){
minMax.f <- MinMaxFreq[dimnames(snps1234[indelrs, ])[[1]],]
#debug(FUN.sumMinMaxName)
#undebug(FUN.sumMinMaxName)
snps1234[indelrs,] <- t(apply(snps1234[indelrs,],1, function(r.f) FUN.sumMinMaxName(minMax=minMax.f,indGeno=r.f)))
snps1234[nchar(snps1234)>1] <-'2'
}
}
snps1234 <- snps1234[,!colnames(snps1234)%in%"UNIQID"]
snps1234 <- gsub("A","1",snps1234)
snps1234 <- gsub("T","2",snps1234)
subx <- SortedGeno=='AC'
snps1234[subx,] <- gsub("C","2",snps1234[subx,])
subx <- SortedGeno=='CT'
snps1234[subx,] <- gsub("C","1",snps1234[subx,])
subx <- SortedGeno=='AG'
snps1234[subx,] <- gsub("G","2",snps1234[subx,])
subx <- SortedGeno=='GT'
snps1234[subx,] <- gsub("G","1",snps1234[subx,])
if(sum(SortedGeno=='CG')!=0){
CGrs <- colnames(SortedGeno)[!CGs]
snps1234 <- snps1234[CGrs,]
SortedGeno <- SortedGeno[,CGrs]
}
SortedGeno <- paste0(names(SortedGeno),'_',SortedGeno)
debug(FUN.addSnps)
undebug(FUN.addSnps)
# Sum of strands
SumPed <- apply(snps1234,1,FUN.addSnps)
dimnames(SumPed)[[1]] <- inds
## distances of peds to lapplys
eucdist <- as.matrix(dist(SumPed,method="euclidean",diag=T,upper=T))
## write.table(eucdist,file="eucdisttab",quote=F)
#library(svd)
h <- svd(eucdist,1,0)
h0 <- data.frame(h$u)
h0$V2 <- inds
#print(names(eucdist))
#print(h0)
h001 <- h0[order(h0$h.u),]
#print(h001)
svdsd <- 2*sd(h001[,1])
svdm <- mean(h001[,1])
upperlm <- svdm + svdsd
lowerlm <- svdm - svdsd
h002 <- h001[h001[,1] > lowerlm & h001[,1] < upperlm,]
PCAseq <- round(seq(from=1,to=length(h002[,1]),length.out=10),0)
PCAseqInds <- as.character(h002[PCAseq,2])
SumPed <- SumPed[row.names(SumPed)%in%PCAseqInds,]
SumPedScores <- lapply(seq_len(ncol(SumPed)), function(i) as.numeric(SumPed[,i]))
MedScores <- lapply(SumPedScores,function(i) round(mean(unlist(i)),1))
MedScores <- as.character(unlist(MedScores))
arithmeticRefsMedoids <- list(c('Pop',SortedGeno),c(outname,MedScores))
#ArithMedoid <- t(matrix(MedScores))
##arithmeticRefsMedoids <- t(matrix(MedScores))
#colnames(ArithMedoid) <- colnames(SumPed)
##colnames(arithmeticRefsMedoids) <- colnames(SumPed)
print(outname)
if(OutForm=='ods'){
#write.table(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_ChipManufacturer.ods'),row.names=F,col.names=T,quote=F,sep=' ')
write.table(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.ods'),row.names=F,col.names=T,quote=F,sep=' ')
print(paste0('Wrote Arithmetic Medoid to:',getwd(),'/','medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.ods'))
}
if(OutForm=='rda'){
#save(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_ChipManufacturer.rda'))
save(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.rda'))
print(paste0('Saved Arithmetic Medoid to:',getwd(),'/','medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.rda'))
}
if(OutForm=='rds'){
#saveRDS(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_ChipManufacturer.rds'))
saveRDS(arithmeticRefsMedoids,file=paste0('medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.rds'))
print(paste0('Saved Arithmetic Medoid to:',getwd(),'/','medoidArithmetic_',outname,'_',length(colnames(SumPed)),'_',chipMan,'.rds'))
}
}
}
bedFile <- list.files(path=pathTotpeds,pattern="*\\.tped$",full.names=T)
if(!AMmcapply) lapply(bedFile, FUN.bedMedoids)
if(AMmcapply){
if(missing(nrcores)) nrcores <- detectCores()-2
mclapply(bedFile, FUN.bedMedoids,mc.cores=nrcores)
}
}
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/R/R_CreateMedoids.R |
## ---- eval=FALSE---------------------------------------------------------
# $plink --bfile All-00
# --bmerge Prep.bed Prep.bim Prep.fam
# --make-bed --out PrepMerge
## ---- eval=FALSE---------------------------------------------------------
# $plink --bfile Prep --flip PrepMerge-merge.missnp --make-bed --out PrepFlipped
#
# $plink --bfile All-00
# --bmerge PrepFlipped.bed PrepFlipped.bim PrepFlipped.fam
# --make-bed --out PrepMerge
## ---- eval=FALSE---------------------------------------------------------
# $plink --bfile PrepMerge --remove IndRm --make-bed --out PrepMerge
## ---- eval=FALSE---------------------------------------------------------
# $plink --bfile PrepMerge --transpose --recode --write-snplist --out PrepFinal
## ---- eval=FALSE---------------------------------------------------------
# library(AncestryMapper)
#
# #Path to folder containing population references
# Refs <- system.file('data', package = 'AncestryMapper')
#
# #Path to folder containing samples in tPED format
# tpeds <- system.file('extdata', package = 'AncestryMapper')
#
# #Path to CorPheno file
# Corpheno <- system.file('extdata', 'CorPheno', package = 'AncestryMapper')
#
# #Path to dbSNP allele data file
# All00Frq <- system.file('data', 'MinMaxFreq.rda', package = 'AncestryMapper')
#
## ---- eval=FALSE---------------------------------------------------------
# genetic.distance <- calculateAMidsArith(pathTotpeds = tpeds,
# NameOut = 'Example',
# pathToAriMedoids = Refs,
# pathAll00 = All00Frq)
## ---- eval=FALSE---------------------------------------------------------
# plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = 'I')
## ---- eval=FALSE---------------------------------------------------------
# #Path to folder containing samples for each population in tPED format
# tpeds <- system.file("extdata", package = "AncestryMapper")
#
# #Path to dbSNP allele data file
# All00Frq <- system.file("data", "MinMaxFreq.rda", package = "AncestryMapper")
#
# createMedoid(pathTotpeds = tpeds, pathAll00 = All00Frq)
| /scratch/gouwar.j/cran-all/cranData/AncestryMapper/inst/doc/AncestryMapper2.0.R |
---
title: "AncestryMapper Overview"
author: "Eoghan O'Halloran, Tiago Magalhães, Darren J. Fitzpatrick"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{AncestryMapper Overview}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
Introduction
============
AncestryMapper is an R package that implements the methods described in "HGDP and HapMap Analysis by Ancestry Mapper Reveals Local and Global
Population Relationships" Magalhães TR, Casey JP, Conroy J, Regan R, Fitzpatrick DJ, et al. PLoS ONE 7(11): e49438. (2012) Ancestry Mapper assigns genetic ancestry to an individual and allows the study of relationships between
local and global populations. The method gives each individual an
Ancestry Mapper Id (AMid), a genetic identifier comprising genetic coordinates that correspond to its relationship to various reference populations. The AMid metrics
have intrinsic biological meaning and provide a tool to measure genetic similarity between world
populations.
Package Functions
=================
The package consists of four functions:
- **calculateAMidsArith**: calculates and assigns Ancestry Mapper Ids (AMids) to each individual using a new, more precise, arithmetic method
- **calculateAMids**: calculates and assigns Ancestry Mapper Ids (AMids) identical to the older versions of AncestryMapper
- **plotAMids**: produces a heatmap representation of AMids
- **createMedoid**: constructs a reference from sample inputs
- **refAdd**: adds user-supplied references or data to a reference file, containing information for order, labelling and color
**calculateAMidsArith**
--------------
For each individual, **calculateAMidsArith** computes the genetic distances
amongst that individual and the set of selected references. As input, the
function requires a tPED file, a standard
file format used by the PLINK software suite. For details on the
format see <http://pngu.mgh.harvard.edu/~purcell/plink/#tped>.
It also
requires the file 'Corpheno', containing the references with columns in the order of
population, reference and order. An example of which can be found with the package.
Pheno_Pop Pheno_Data Pheno_Region Colors_Pop Colors_Region Colors_Data Order UNIQID Fam
--------- ---------- ------------ ---------- ------------- ----------- ----- ------ ---
Example_Pop Fedorova2013 EUR 999999 black black 140 Example_ID Example_Fam
Neanderthal.SP SP EUR 999999 black black 140 AltaiNea AltaiNea
Denisovan.SP SP EUR 999999 black black 140 AltaiDen DenisovaPinky
A file containing the record of the major and minor alleles as described by dbSNP is needed. An example file for use with the toy data can be found as 'MinMaxFreq.rda' included with the package.
Only SNPs with entries in the `MinMaxFreq.rda' used will be used in the analysis.
A larger MinMaxFreq file containing SNPs used on common Illumina and Affymetrix ChIPs in addition to others is available at:
<http://bit.ly/1OUstDP>
Additionally the user can produce their own 'MinMaxFreq' reference file. This should be a data frame with 2 unnamed columns consisting of a column for each allele and row names corresponding to the rsID. This object must be named 'MinMaxFreq' and saved as an rda file.
row.names V1 V2
------ --- ---------
rs6663840 A G
rs548726 C T
rs10803320 C T
**calculateAMidsArith** returns a dataframe containing the genetic distance of each
individual to the references. This provides the raw distance
measures (Starting with the prefix C\_) and indices (Normalized values,
starting with the prefix I\_).
The genetic distance is computed as the Euclidean distance normalized by
the number of SNPs, between each individual and all the references used. AMids for a single individual from any dataset can be
computed provided there is a reasonable overlap between the set of SNPs
for that individual and all the references used. The AMids can take values
from 0 to 2. In our experience, the values are in the range 0.4 to 1.1.
The normalized values of the distances are such that the highest
reference is scored as 100, the lowest as 0 and all others adjusted
accordingly. These indices place the individual in the genomic map, thus, they provide a global overview on the number of relevant
references for each individual.
**calculateAMids**
--------------
For each individual, calculateAMids computes the genetic distances
amongst that individual and the set of HGDP references (or a set provided by the user). As input, the
function requires a PED formatted file. PED formatting is the standard
file format required by the PLINK software suite. For details on the
format see <http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped>. It also
requires a file containing the ids of the individuals to be used as references, and the population they correspond to.
As output, returns a dataframe containing the genetic distance of each
individual to the all HGDP references. We provide the raw distance
measures (starting with the prefix C_) and indices (normalized Values,
starting with the prefix I_).
As with calculateAMidsArith, genetic distance is computed as the Euclidean distance normalized by
the number of SNPs, between each individual and the references.
The user can include new references in AMids by editing the file 'HGDP_References.txt', inserting the population and the corresponding invidual’s name.
A demo file using 500 SNPs is included using data from the HGDP called 'HGDP_500SNPs.ped', this is compatiable with the 'CorPheno' file also included in the package. The small number of SNPs and samples will result in a highly noisy example.
For more information on using **calculateAMids** see, ?**calculateAMids**()
###Producing an Input File for calculateAMids###
The PED file should include individuals that will be taken as the population references, which will be used to calculate the ancestry mapper indexes (AMIds) for the user dataset. In our original work we used as references the 51 populations included in the Human Genome Diversity Project. The HGDP dataset can be obtained at <http://hagsc.org/hgdp/files.html>.
To merge a custom ped file with a ped file with the references, users could use PLINK. The commands \-\-bmerge or \-\-merge are used to merge two ped files.
In most cases there will be strand inconsistencies, that can be rectified by flipping snps, using the command \-\-flip. SNPs that are CG AT are impossible to determine which strand they are in and as such be removed.
Ancestry mapper requires the ped files to be in the 1/2 coding system.
The individual Ids are taken as the second column of the ped file; these ids should be unique.
We have produced a bed file with the references for the 51 HGDP populations, with 630,597 snps; the file is named HGDP_51RefAM_AutosomalSnps_630597_ACGT and can be obtained at <http://bit.ly/2bkWDSQ>.
**plotAMids**
---------
**plotAMids** is used to visualize the relationship amongst individuals
and the references. **plotAMids** takes as input the dataframe of genetic
distances returned by **calculateAMidsArith**. The user can also provide a file with phenotypes
for each individual which will be visible in the plot. Colors used were taken from the BlBrewer and RedBl packages.
**createMedoid**
---------
**createMedoid** constructs an arithmetic reference from a tPED containing only one population with
at least 10 individuals. The tPED should contain only individuals of a specific population and be formatted and prepared as described in 'Producing a Sample Input File'.
**refAdd**
---------
**refAdd** adds user-supplied references or data to the CorPheno file, giving population, color and ordering information to cluster the results of samples by population.
Producing a Sample Input File
====================
AncestryMapper's references are produced using sequences in the same strand orientation as that used by dbSNP.
Thus, all data input files for the functions **calculateAMidsArith** and **createMedoid** need also to be in the same orientation as dbSNP.
If your data is already in the same strand orientation as dbSNP you can skip this section.
The format for any sample data to be analysed by the function **calculateAMidsArith** or used to create a reference with **createMedoid** is a PLINK tPED.
All files used in this tutorial can be found in the 'extdata' folder with the package. An example BED file, 'All-00', containing the dbSNP alleles for 1000 SNPs is included.
A full version of the 'All-00' BED file covering 53,509,352 SNPs from the dbSNP database can be found at:
<http://bit.ly/1OUstDP>
It is faster to use PLINK BED format for the following steps; at the end the BED file needs to be converted to tPED format.
It is faster to extract only those SNPs being used in your dataset from the All-00 BED and working with the output.
The most efficient way to get data in the same orientation as that used by dbSNP is to to merge the 00-All file with your BED file using the \-\-merge command in PLINK;
both files should be in the ACGT format.
```{r, eval=FALSE}
$plink --bfile All-00
--bmerge Prep.bed Prep.bim Prep.fam
--make-bed --out PrepMerge
```
If there are SNPs in different strand orientations, a list of SNPs affected will be output in a file ending in '-merge.misssnp'.
Flip these SNPs using the file ending in '-merge.missnp' in your sample file before merging the two files again.
```{r, eval=FALSE}
$plink --bfile Prep --flip PrepMerge-merge.missnp --make-bed --out PrepFlipped
$plink --bfile All-00
--bmerge PrepFlipped.bed PrepFlipped.bim PrepFlipped.fam
--make-bed --out PrepMerge
```
At the end of this merging, the 'individual' from the All-00 file should be removed using the \-\-remove command in PLINK. A file containing
the ID and family ID for the All-00 individual is included as `IndRm'
```{r, eval=FALSE}
$plink --bfile PrepMerge --remove IndRm --make-bed --out PrepMerge
```
SNPs that are CG or AT are invisible to the strand issue. The current AncestryMapper functions exclude them automatically from analysis.
Sites with any missingness are also automatically excluded from analysis. Thus, if you wish to use any SNPs with samples missing you will need to impute replacements or exclude individuals who
have missing genotypes.
The final step is to transform the BED file to a tPED file and to output a list of the SNPs used.
```{r, eval=FALSE}
$plink --bfile PrepMerge --transpose --recode --write-snplist --out PrepFinal
```
It is important to keep a file with the SNPs used in the sample with the ending '.snplist' as **calculateAMidsArith** will search for all the '.snplist' files corresponding to the path of the tPEDs.
Example Data
========
Example Data in the form of samples and references are provided containing 1000 SNPs with data from numerous populations and datasets containing 147 populations and 591 individuals. Due to the low number of SNPs,
the plots will look noisy.
Additional Data
========
Full-sized medoids and a larger dbSNP reference ('00-All') for use with real user samples are currently being hosted at:
<http://bit.ly/1OUstDP>
Tutorial
========
Below is a short tutorial
####
The first step is to call the paths to the example data files distributed with the package.
```{r, eval=FALSE}
library(AncestryMapper)
#Path to folder containing population references
Refs <- system.file('data', package = 'AncestryMapper')
#Path to folder containing samples in tPED format
tpeds <- system.file('extdata', package = 'AncestryMapper')
#Path to CorPheno file
Corpheno <- system.file('extdata', 'CorPheno', package = 'AncestryMapper')
#Path to dbSNP allele data file
All00Frq <- system.file('data', 'MinMaxFreq.rda', package = 'AncestryMapper')
```
####
Calculate the genetic distance of the samples in the PLINK tPED files to the references.
```{r, eval=FALSE}
genetic.distance <- calculateAMidsArith(pathTotpeds = tpeds,
NameOut = 'Example',
pathToAriMedoids = Refs,
pathAll00 = All00Frq)
```
####
The next step is to plot the results. In this example we are using the the normalised values for each individual. (columnPlot = 'I') For more information on the function arguments see ?plotAMids()
```{r, eval=FALSE}
plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = 'I')
```
####
To plot samples by population or place them in a certain order, the samples need to be added to the CorPheno file.
If samples are not in the CorPheno file, the samples will be plotted under 'Undefined'. If one or more references aren't in the CorPheno, then they will be plotted at the start of the y axis before the entries with defined orders. An entry with any ID will suffice for references if the user does not wish to add or use the sample IDs from the data used to create the reference.
The CorPheno can easily be modified at the text level to change things such as population orders, colors or add new entries.
Alternatively the **refAdd** function may be more convenient. A tutorial on using the **refAdd**. For more information see ?refAdd()
####
Users can also create their own population references from tPEDs, containing individuals from a specific population. This is done using the **createMedoid** function.
The required arguments are:
- pathTotpeds: The path to the tPED file(s), one tPED per population
- pathAll00: The path to the file containing the dbSNP alleles
Other options also exist and can be found with ?createMedoid()
Example:
```{r, eval=FALSE}
#Path to folder containing samples for each population in tPED format
tpeds <- system.file("extdata", package = "AncestryMapper")
#Path to dbSNP allele data file
All00Frq <- system.file("data", "MinMaxFreq.rda", package = "AncestryMapper")
createMedoid(pathTotpeds = tpeds, pathAll00 = All00Frq)
```
The resulting rda file will be named with the prefix of 'medoidArithmetic_', followed by the name of the tPED used and the number of SNPs. E.g.
medoidArithmetic_Demo_1000_ChipMan.rda
In order for the new reference to be used, it will need to in the path specified.
For more information on the presence of 'ChipMan' at the end of the file name, see ?**createMedoid**()
#### | /scratch/gouwar.j/cran-all/cranData/AncestryMapper/inst/doc/AncestryMapper2.0.Rmd |
---
title: "AncestryMapper Overview"
author: "Eoghan O'Halloran, Tiago Magalhães, Darren J. Fitzpatrick"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: yes
vignette: >
%\VignetteIndexEntry{AncestryMapper Overview}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
Introduction
============
AncestryMapper is an R package that implements the methods described in "HGDP and HapMap Analysis by Ancestry Mapper Reveals Local and Global
Population Relationships" Magalhães TR, Casey JP, Conroy J, Regan R, Fitzpatrick DJ, et al. PLoS ONE 7(11): e49438. (2012) Ancestry Mapper assigns genetic ancestry to an individual and allows the study of relationships between
local and global populations. The method gives each individual an
Ancestry Mapper Id (AMid), a genetic identifier comprising genetic coordinates that correspond to its relationship to various reference populations. The AMid metrics
have intrinsic biological meaning and provide a tool to measure genetic similarity between world
populations.
Package Functions
=================
The package consists of four functions:
- **calculateAMidsArith**: calculates and assigns Ancestry Mapper Ids (AMids) to each individual using a new, more precise, arithmetic method
- **calculateAMids**: calculates and assigns Ancestry Mapper Ids (AMids) identical to the older versions of AncestryMapper
- **plotAMids**: produces a heatmap representation of AMids
- **createMedoid**: constructs a reference from sample inputs
- **refAdd**: adds user-supplied references or data to a reference file, containing information for order, labelling and color
**calculateAMidsArith**
--------------
For each individual, **calculateAMidsArith** computes the genetic distances
amongst that individual and the set of selected references. As input, the
function requires a tPED file, a standard
file format used by the PLINK software suite. For details on the
format see <http://pngu.mgh.harvard.edu/~purcell/plink/#tped>.
It also
requires the file 'Corpheno', containing the references with columns in the order of
population, reference and order. An example of which can be found with the package.
Pheno_Pop Pheno_Data Pheno_Region Colors_Pop Colors_Region Colors_Data Order UNIQID Fam
--------- ---------- ------------ ---------- ------------- ----------- ----- ------ ---
Example_Pop Fedorova2013 EUR 999999 black black 140 Example_ID Example_Fam
Neanderthal.SP SP EUR 999999 black black 140 AltaiNea AltaiNea
Denisovan.SP SP EUR 999999 black black 140 AltaiDen DenisovaPinky
A file containing the record of the major and minor alleles as described by dbSNP is needed. An example file for use with the toy data can be found as 'MinMaxFreq.rda' included with the package.
Only SNPs with entries in the `MinMaxFreq.rda' used will be used in the analysis.
A larger MinMaxFreq file containing SNPs used on common Illumina and Affymetrix ChIPs in addition to others is available at:
<http://bit.ly/1OUstDP>
Additionally the user can produce their own 'MinMaxFreq' reference file. This should be a data frame with 2 unnamed columns consisting of a column for each allele and row names corresponding to the rsID. This object must be named 'MinMaxFreq' and saved as an rda file.
row.names V1 V2
------ --- ---------
rs6663840 A G
rs548726 C T
rs10803320 C T
**calculateAMidsArith** returns a dataframe containing the genetic distance of each
individual to the references. This provides the raw distance
measures (Starting with the prefix C\_) and indices (Normalized values,
starting with the prefix I\_).
The genetic distance is computed as the Euclidean distance normalized by
the number of SNPs, between each individual and all the references used. AMids for a single individual from any dataset can be
computed provided there is a reasonable overlap between the set of SNPs
for that individual and all the references used. The AMids can take values
from 0 to 2. In our experience, the values are in the range 0.4 to 1.1.
The normalized values of the distances are such that the highest
reference is scored as 100, the lowest as 0 and all others adjusted
accordingly. These indices place the individual in the genomic map, thus, they provide a global overview on the number of relevant
references for each individual.
**calculateAMids**
--------------
For each individual, calculateAMids computes the genetic distances
amongst that individual and the set of HGDP references (or a set provided by the user). As input, the
function requires a PED formatted file. PED formatting is the standard
file format required by the PLINK software suite. For details on the
format see <http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#ped>. It also
requires a file containing the ids of the individuals to be used as references, and the population they correspond to.
As output, returns a dataframe containing the genetic distance of each
individual to the all HGDP references. We provide the raw distance
measures (starting with the prefix C_) and indices (normalized Values,
starting with the prefix I_).
As with calculateAMidsArith, genetic distance is computed as the Euclidean distance normalized by
the number of SNPs, between each individual and the references.
The user can include new references in AMids by editing the file 'HGDP_References.txt', inserting the population and the corresponding invidual’s name.
A demo file using 500 SNPs is included using data from the HGDP called 'HGDP_500SNPs.ped', this is compatiable with the 'CorPheno' file also included in the package. The small number of SNPs and samples will result in a highly noisy example.
For more information on using **calculateAMids** see, ?**calculateAMids**()
###Producing an Input File for calculateAMids###
The PED file should include individuals that will be taken as the population references, which will be used to calculate the ancestry mapper indexes (AMIds) for the user dataset. In our original work we used as references the 51 populations included in the Human Genome Diversity Project. The HGDP dataset can be obtained at <http://hagsc.org/hgdp/files.html>.
To merge a custom ped file with a ped file with the references, users could use PLINK. The commands \-\-bmerge or \-\-merge are used to merge two ped files.
In most cases there will be strand inconsistencies, that can be rectified by flipping snps, using the command \-\-flip. SNPs that are CG AT are impossible to determine which strand they are in and as such be removed.
Ancestry mapper requires the ped files to be in the 1/2 coding system.
The individual Ids are taken as the second column of the ped file; these ids should be unique.
We have produced a bed file with the references for the 51 HGDP populations, with 630,597 snps; the file is named HGDP_51RefAM_AutosomalSnps_630597_ACGT and can be obtained at <http://bit.ly/2bkWDSQ>.
**plotAMids**
---------
**plotAMids** is used to visualize the relationship amongst individuals
and the references. **plotAMids** takes as input the dataframe of genetic
distances returned by **calculateAMidsArith**. The user can also provide a file with phenotypes
for each individual which will be visible in the plot. Colors used were taken from the BlBrewer and RedBl packages.
**createMedoid**
---------
**createMedoid** constructs an arithmetic reference from a tPED containing only one population with
at least 10 individuals. The tPED should contain only individuals of a specific population and be formatted and prepared as described in 'Producing a Sample Input File'.
**refAdd**
---------
**refAdd** adds user-supplied references or data to the CorPheno file, giving population, color and ordering information to cluster the results of samples by population.
Producing a Sample Input File
====================
AncestryMapper's references are produced using sequences in the same strand orientation as that used by dbSNP.
Thus, all data input files for the functions **calculateAMidsArith** and **createMedoid** need also to be in the same orientation as dbSNP.
If your data is already in the same strand orientation as dbSNP you can skip this section.
The format for any sample data to be analysed by the function **calculateAMidsArith** or used to create a reference with **createMedoid** is a PLINK tPED.
All files used in this tutorial can be found in the 'extdata' folder with the package. An example BED file, 'All-00', containing the dbSNP alleles for 1000 SNPs is included.
A full version of the 'All-00' BED file covering 53,509,352 SNPs from the dbSNP database can be found at:
<http://bit.ly/1OUstDP>
It is faster to use PLINK BED format for the following steps; at the end the BED file needs to be converted to tPED format.
It is faster to extract only those SNPs being used in your dataset from the All-00 BED and working with the output.
The most efficient way to get data in the same orientation as that used by dbSNP is to to merge the 00-All file with your BED file using the \-\-merge command in PLINK;
both files should be in the ACGT format.
```{r, eval=FALSE}
$plink --bfile All-00
--bmerge Prep.bed Prep.bim Prep.fam
--make-bed --out PrepMerge
```
If there are SNPs in different strand orientations, a list of SNPs affected will be output in a file ending in '-merge.misssnp'.
Flip these SNPs using the file ending in '-merge.missnp' in your sample file before merging the two files again.
```{r, eval=FALSE}
$plink --bfile Prep --flip PrepMerge-merge.missnp --make-bed --out PrepFlipped
$plink --bfile All-00
--bmerge PrepFlipped.bed PrepFlipped.bim PrepFlipped.fam
--make-bed --out PrepMerge
```
At the end of this merging, the 'individual' from the All-00 file should be removed using the \-\-remove command in PLINK. A file containing
the ID and family ID for the All-00 individual is included as `IndRm'
```{r, eval=FALSE}
$plink --bfile PrepMerge --remove IndRm --make-bed --out PrepMerge
```
SNPs that are CG or AT are invisible to the strand issue. The current AncestryMapper functions exclude them automatically from analysis.
Sites with any missingness are also automatically excluded from analysis. Thus, if you wish to use any SNPs with samples missing you will need to impute replacements or exclude individuals who
have missing genotypes.
The final step is to transform the BED file to a tPED file and to output a list of the SNPs used.
```{r, eval=FALSE}
$plink --bfile PrepMerge --transpose --recode --write-snplist --out PrepFinal
```
It is important to keep a file with the SNPs used in the sample with the ending '.snplist' as **calculateAMidsArith** will search for all the '.snplist' files corresponding to the path of the tPEDs.
Example Data
========
Example Data in the form of samples and references are provided containing 1000 SNPs with data from numerous populations and datasets containing 147 populations and 591 individuals. Due to the low number of SNPs,
the plots will look noisy.
Additional Data
========
Full-sized medoids and a larger dbSNP reference ('00-All') for use with real user samples are currently being hosted at:
<http://bit.ly/1OUstDP>
Tutorial
========
Below is a short tutorial
####
The first step is to call the paths to the example data files distributed with the package.
```{r, eval=FALSE}
library(AncestryMapper)
#Path to folder containing population references
Refs <- system.file('data', package = 'AncestryMapper')
#Path to folder containing samples in tPED format
tpeds <- system.file('extdata', package = 'AncestryMapper')
#Path to CorPheno file
Corpheno <- system.file('extdata', 'CorPheno', package = 'AncestryMapper')
#Path to dbSNP allele data file
All00Frq <- system.file('data', 'MinMaxFreq.rda', package = 'AncestryMapper')
```
####
Calculate the genetic distance of the samples in the PLINK tPED files to the references.
```{r, eval=FALSE}
genetic.distance <- calculateAMidsArith(pathTotpeds = tpeds,
NameOut = 'Example',
pathToAriMedoids = Refs,
pathAll00 = All00Frq)
```
####
The next step is to plot the results. In this example we are using the the normalised values for each individual. (columnPlot = 'I') For more information on the function arguments see ?plotAMids()
```{r, eval=FALSE}
plotAMids(AMids = genetic.distance, phenoFile = Corpheno, columnPlot = 'I')
```
####
To plot samples by population or place them in a certain order, the samples need to be added to the CorPheno file.
If samples are not in the CorPheno file, the samples will be plotted under 'Undefined'. If one or more references aren't in the CorPheno, then they will be plotted at the start of the y axis before the entries with defined orders. An entry with any ID will suffice for references if the user does not wish to add or use the sample IDs from the data used to create the reference.
The CorPheno can easily be modified at the text level to change things such as population orders, colors or add new entries.
Alternatively the **refAdd** function may be more convenient. A tutorial on using the **refAdd**. For more information see ?refAdd()
####
Users can also create their own population references from tPEDs, containing individuals from a specific population. This is done using the **createMedoid** function.
The required arguments are:
- pathTotpeds: The path to the tPED file(s), one tPED per population
- pathAll00: The path to the file containing the dbSNP alleles
Other options also exist and can be found with ?createMedoid()
Example:
```{r, eval=FALSE}
#Path to folder containing samples for each population in tPED format
tpeds <- system.file("extdata", package = "AncestryMapper")
#Path to dbSNP allele data file
All00Frq <- system.file("data", "MinMaxFreq.rda", package = "AncestryMapper")
createMedoid(pathTotpeds = tpeds, pathAll00 = All00Frq)
```
The resulting rda file will be named with the prefix of 'medoidArithmetic_', followed by the name of the tPED used and the number of SNPs. E.g.
medoidArithmetic_Demo_1000_ChipMan.rda
In order for the new reference to be used, it will need to in the path specified.
For more information on the presence of 'ChipMan' at the end of the file name, see ?**createMedoid**()
#### | /scratch/gouwar.j/cran-all/cranData/AncestryMapper/vignettes/AncestryMapper2.0.Rmd |
#' @title anchor_prediction
#'
#' @description Perform a prediction for an Anchor Regression model as described in Rothenhäusler et al.2020
#'
#' @param anchor_model is the Anchor Regression model object
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#'
#' @return A list of predictions.
#' @export
#' @importFrom stats coef lm predict
#' @examples
#' x <- as.data.frame(matrix(data = rnorm(100),nrow = 100,ncol = 10))
#' anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' gamma <- 2
#' target_variable <- 'V2'
#' anchor_model <- anchor_regression(x, anchor, gamma, target_variable)
#' anchor_prediction(anchor_model$model, x, anchor, gamma, target_variable)
anchor_prediction <- function(anchor_model, x, anchor, gamma, target_variable){
# convert to matrix for lm
x <- as.matrix(x)
anchor <- as.matrix(anchor)
# tranform data
fit_const <- lm(x ~ 1)
fit <- lm(x ~ anchor)
anchor_data <- fit_const$fitted.values + fit$residuals + sqrt(gamma)*(fit$fitted.values-fit_const$fitted.values)
indices <- 1:nrow(anchor_data)
j <- match( target_variable, colnames(anchor_data))
x <- anchor_data[indices,-c(j)]
# prediction
prediction <- predict(anchor_model,type="response", newx = x, s = 'lambda.min')
return(prediction)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/anchor_prediction.R |
#' @title anchor_prediction
#'
#' @description Perform a prediction for an Anchor Regression model as described in Rothenhäusler et al.2020
#'
#' @param anchor_model is the Anchor Regression model object
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#' @param bin_factor binary variable that can be transformed to a factor to partial out effects
#'
#' @return A list of predictions.
#' @export
#' @importFrom stats coef lm predict as.formula
#' @examples
#' x <- as.data.frame(matrix(data = rnorm(10000),nrow = 1000,ncol = 10))
#' x$bin <- sample(nrow(x),x = c(1,0),prob = c(0.5,0.5),replace = TRUE)
#' anchor <- as.data.frame(matrix(data = rnorm(2000),nrow = 1000,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' gamma <- 2
#' target_variable <- 'V2'
#'
#' anchor_model <- anchor_regression_gam(x, anchor, gamma, target_variable,"bin")
#' anchor_prediction_gam(anchor_model$model, x, anchor, gamma, target_variable,"bin")
anchor_prediction_gam <- function(anchor_model, x, anchor, gamma, target_variable,bin_factor){
# tranformation
x <- as.matrix(x)
anchor <- as.matrix(anchor)
fit_const <- lm(x ~ 1)
fit <- lm(x ~ anchor)
anchor_data <- fit_const$fitted.values + fit$residuals +
sqrt(gamma) * (fit$fitted.values - fit_const$fitted.values)
# slice set
indices <- 1:nrow(anchor_data)
j <- match(target_variable, colnames(anchor_data))
x_new <- anchor_data[indices, -c(j)]
x_new <- as.data.frame(x_new)
if(is.null(bin_factor) != TRUE){
x_new[bin_factor] <- as.factor(round(x_new[,bin_factor]))
}
# predict with model
y_pred <- predict(anchor_model, x = x_new)
return(y_pred)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/anchor_prediction_gam.R |
#' @title anchor_regression
#'
#' @description Perform an Anchor Regression as described in Rothenhäusler et al.2020
#'
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#' @param lambda indicates the lambda that is used in the Anchor Regression. 'CV' is used if it should be estimated by cross validation on the full subset.
#'
#' @return A list with coefficient values and a list with the respective names \code{overview_print}. Additionally the transformed data as x and y plus the fixed lambda coefficient.
#' @export
#' @importFrom glmnet glmnet cv.glmnet
#' @importFrom stats coef lm
#' @examples
#' x <- as.data.frame(matrix(data = rnorm(1000),nrow = 100,ncol = 10))
#' anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' gamma <- 2
#' target_variable <- 'V2'
#' anchor_regression(x, anchor, gamma, target_variable)
anchor_regression <- function(x, anchor, gamma, target_variable, lambda='CV'){
# preliminary checks
if(ncol(x)<3){
print("unsufficient number of columns")
}
# convert to matrix for lm
x <- as.matrix(x)
anchor <- as.matrix(anchor)
# tranform data
fit_const <- lm(x ~ 1)
fit <- lm(x ~ anchor)
# estimate ideal lambda penalization as proposed by CV or skip and use other
if(lambda=='CV'){
cv_data <- fit_const$fitted.values + fit$residuals
indices <- 1:nrow(cv_data)
j <- match( target_variable, colnames(cv_data))
fit_glmnet_lasso <- cv.glmnet(x = cv_data[indices,-c(j)],cv_data[indices,j])
lambda_cv <- fit_glmnet_lasso$lambda.1se
}
else{lambda_cv=lambda}
# transform data for the Anchor Regression
anchor_data <- fit_const$fitted.values + fit$residuals + sqrt(gamma)*(fit$fitted.values-fit_const$fitted.values)
indices <- 1:nrow(anchor_data)
j <- match( target_variable, colnames(anchor_data))
x <- anchor_data[indices,-c(j)]
y <- anchor_data[indices,j]
fit_glmnet_anchor <- glmnet(x = x,y = y,lambda = lambda_cv)
return_list <- list(coeff = c(as.vector(coef(fit_glmnet_anchor))), names = c('Intercept',c(colnames(anchor_data)[!colnames(anchor_data) %in% target_variable] )), x = x, y = y,lambda = lambda_cv, model = fit_glmnet_anchor)
return(return_list)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/anchor_regression.R |
#' @title anchor_regression_gam
#'
#' @description Perform an Generalized Additive Anchor Regression
#'
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#' @param bin_factor binary variable that can be transformed to a factor to partial out effects
#'
#' @return A list with coefficient values and a list with the respective names \code{overview_print}. Additionally the transformed data as x and y plus the fixed lambda coefficient.
#' @export
#' @importFrom mgcv gam
#' @importFrom stats coef lm as.formula
#' @examples
#' x <- as.data.frame(matrix(data = rnorm(10000),nrow = 1000,ncol = 10))
#' x$bin <- sample(nrow(x),x = c(1,0),prob = c(0.5,0.5),replace = TRUE)
#' anchor <- as.data.frame(matrix(data = rnorm(2000),nrow = 1000,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' gamma <- 2
#' target_variable <- 'V2'
#' anchor_regression_gam(x, anchor, gamma, target_variable,bin_factor = "bin")
anchor_regression_gam <- function (x, anchor, gamma, target_variable, bin_factor = NULL) {
if (ncol(x) < 3) {
print("unsufficient number of columns")
}
# transform data
x <- as.matrix(x)
anchor <- as.matrix(anchor)
fit_const <- lm(x ~ 1)
fit <- lm(x ~ anchor)
# slice data for model fitting
anchor_data <- fit_const$fitted.values + fit$residuals +
sqrt(gamma) * (fit$fitted.values - fit_const$fitted.values)
indices <- 1:nrow(anchor_data)
j <- match(target_variable, colnames(anchor_data))
x <- anchor_data[indices, -c(j)]
y <- anchor_data[indices, j]
# extract binary columns
x <- as.data.frame(anchor_data)
uniq <- lapply(x, unique)
nuniq <- as.data.frame(lengths(uniq))
nuniq_names <- names(as.data.frame(nuniq))[as.vector(nuniq<4)]
vars <- colnames(x)[!colnames(x) %in% target_variable]
vars_non_bin <- vars[!vars %in% nuniq_names]
# generate formula
if(is.null(bin_factor) != TRUE){
x[bin_factor] <- as.factor(round(x[,bin_factor]))
vars_non_bin_fac <- vars_non_bin[!vars_non_bin %in% bin_factor]
if(length(nuniq_names) !=0){
col <- paste0(", by=",bin_factor, ")+s(")
form <- paste(target_variable, "~ s(", paste(vars_non_bin_fac, collapse=col),")+",paste(nuniq_names, collapse=" + "))
}else{
col <- paste0(", by=",bin_factor, ")+s(")
form <- paste(target_variable, "~ s(", paste(vars_non_bin_fac, collapse=col),")")
}
}else{
if(length(nuniq_names) !=0){
form <- paste(target_variable, "~ s(", paste(vars_non_bin, collapse=") + s("),")+",paste(nuniq_names, collapse=" + "))
}else{
form <- paste(target_variable, "~ s(", paste(vars_non_bin, collapse=") + s("),")")
}
}
# estimate model
model <- gam(as.formula(form),data=x, method = "REML")
return_list <- list(model = model)
return(return_list)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/anchor_regression_gam.R |
#' @title anchor_stability
#'
#' @description Perform an Anchor Stability Analysis as described in Rothenhäusler et al.2020
#'
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param target_variable is the target variable name contained in the x dataframe
#' @param lambda indicates the lambda that is used in the Anchor Regression. 'CV' is used if it should be estimated by cross validation on the full subset.
#' @param alpha significance level for test decision on coefficient significance
#' @param p_procedure procedure to estimate stability. Option 1: naive - stable if effect is non-zero in all cases; Option 2: post-lasso - post selection inference using SelectiveInference package
#' @return A dataframe containing the stability values for each coefficient
#' @export
#' @importFrom glmnet glmnet cv.glmnet
#' @importFrom stats coef lm
#' @importFrom selectiveInference fixedLassoInf
#' @examples
#' x <- as.data.frame(matrix(data = rnorm(1000),nrow = 100,ncol = 10))
#' anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' gamma <- 2
#' target_variable <- 'V2'
#' anchor_stability(x, anchor, target_variable, lambda, alpha=0.05, p_procedure = "naive")
anchor_stability <- function(x, anchor, target_variable, lambda=0, alpha=0.05, p_procedure = "naive"){
anchor_gamma_0 <- anchor_regression(x, anchor, 0, target_variable)
anchor_gamma_inf <- anchor_regression(x, anchor, 1e+29, target_variable)
if(p_procedure == "naive"){
stable <- ifelse(anchor_gamma_0$coeff!=0 & anchor_gamma_inf$coeff!=0,"stable", "not-stable")
result <- data.frame(anchor_gamma_0$names,stable)
names(result) <- c("coefficient","anchor_stability")
}
# Post lasso
if(p_procedure == "post-lasso"){
if(sum(coef(anchor_gamma_0$model, s=anchor_gamma_0$lambda/length(anchor_gamma_0$y))[-1])!=0){
p_0 <- selectiveInference::fixedLassoInf(anchor_gamma_0$x,anchor_gamma_0$y,coef(anchor_gamma_0$model, s=anchor_gamma_0$lambda/length(anchor_gamma_0$y))[-1],anchor_gamma_0$lambda)$pv
}else{p_0=NULL}
if(sum(coef(anchor_gamma_inf$model, s=anchor_gamma_inf$lambda/length(anchor_gamma_inf$y))[-1])!=0){
p_inf <- selectiveInference::fixedLassoInf(anchor_gamma_inf$x,anchor_gamma_inf$y,coef(anchor_gamma_inf$model, s=anchor_gamma_inf$lambda/length(anchor_gamma_inf$y))[-1],anchor_gamma_inf$lambda)$pv
}else{p_inf=NULL}
if(!is.null(p_0) | !is.null(p_inf)){
result <- data.frame(anchor_gamma_0$names,"Not Stable")
colnames(result) <- c("coefficient","anchor-stability")
}else{
result <- data.frame(anchor_gamma_0$names,p_0,p_inf)
colnames(result) <- c("coefficient","pv0","pvInf")
result$anchor_stability <- ifelse(result$pv0<alpha & result$pvInf<alpha,"stable","not_stable")
}
}
return(result)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/anchor_stability.R |
#' @title weighted_anchor_regression
#'
#' @description Perform a prediction for a Weighted Anchor Regression model
#'
#' @param names list of variable names corresponding to the coefficients in coeff
#' @param coeff list of coefficients corresponding to the coefficients in names
#' @param x is a dataframe containing the matrix x containing the independent variables
#' @param anchor is a dataframe containing the matrix anchor containing the anchor variable
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#'
#' @return A list of predictions.
#' @export
#' @importFrom stats coef lm
#' @examples
#' # number of observed environments
#' environments <- 10
#'
#' # populate list with generated data of x and anchor
#' data_x_list <- c()
#' data_anchor_list <- c()
#' for(e in 1:environments){
#' x <- as.data.frame(matrix(data = rnorm(100),nrow = 100,ncol = 10))
#' anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' data_x_list[[e]] <- x
#' data_anchor_list[[e]] <- anchor
#' }
#'
#' # estimate model
#' gamma <- 2
#' target_variable <- 'V2'
#' weighted_anchor_model <- weighted_anchor_regression(data_x_list,
#' data_anchor_list,
#' gamma,
#' target_variable,
#' anchor_model_pre=NULL,
#' test_split=0.4,
#' lambda=0)
#' weighted_anchor_prediction(weighted_anchor_model$names,
#' weighted_anchor_model$coeff,
#' x,
#' anchor,
#' gamma,
#' target_variable)
weighted_anchor_prediction <- function(names,coeff, x, anchor, gamma, target_variable){
# convert to matrix for lm
x <- as.matrix(x)
anchor <- as.matrix(anchor)
# tranform data
fit_const <- lm(x ~ 1)
fit <- lm(x ~ anchor)
anchor_data <- fit_const$fitted.values + fit$residuals + sqrt(gamma)*(fit$fitted.values-fit_const$fitted.values)
indices <- 1:nrow(anchor_data)
j <- match( target_variable, colnames(anchor_data))
x <- anchor_data[indices,-c(j)]
# prediction
coefficients_df <- data.frame(names = names,coefficients = coeff)
coefficients_df$names <- NULL
prediction <- as.matrix(x)%*%as.matrix(colMeans(coefficients_df[,2:ncol(coefficients_df)]))
return(prediction)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/weighted_anchor_prediction.R |
#' @title weighted_anchor_regression
#'
#' @description Estimates weighted Anchor Regression coefficients
#'
#' @param data_x_list list containing coefficient dataframes for different environments
#' @param data_anchor_list list containing anchor dataframes for different environments
#' @param gamma is the regularization parameter for the Anchor Regression
#' @param target_variable is the target variable name contained in the x dataframe
#' @param anchor_model_pre is the pre estimated model for the Anchor Regression. In case of NULL a new model is estimated.
#' @param test_split is desired test/train split for the estimation
#' @param lambda penalization coefficient for Anchor Shrinkage. Initially set to 0.
#'
#' @return A list estimated coefficients with names, weights and the raw coefficient matrix
#' @export
#' @importFrom stats coef lm
#' @examples
#' environments <- 10 # number of observed environments
#'
#' # populate list with generated data of x and anchor
#' data_x_list <- c()
#' data_anchor_list <- c()
#' for(e in 1:environments){
#' x <- as.data.frame(matrix(data = rnorm(100),nrow = 100,ncol = 10))
#' anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
#' colnames(anchor) <- c('X1','X2')
#' data_x_list[[e]] <- x
#' data_anchor_list[[e]] <- anchor
#' }
#'
#' # estimate model
#' gamma <- 2
#' target_variable <- 'V2'
#' weighted_anchor_regression(data_x_list,
#' data_anchor_list,
#' gamma,
#' target_variable,
#' anchor_model_pre=NULL,
#' test_split=0.4,
#' lambda=0)
weighted_anchor_regression <- function(data_x_list,data_anchor_list,gamma,target_variable,anchor_model_pre=NULL,test_split=0.4, lambda=0){
# initialize coefficient output matrix and patient score list
coefficient_matrix <- NULL
patient_prediction_scores <- c()
# loop thorugh all environments
for(patient in 1:length(data_anchor_list)){
# create test and train split
smp_size <- floor(test_split * nrow(data_x_list[[patient]]))
train_x_ind <- sample(seq_len(nrow(data_x_list[[patient]])), size = smp_size)
train_anchor_ind <- sample(seq_len(nrow(data_anchor_list[[patient]])), size = smp_size)
train_x <- data_x_list[[patient]][train_x_ind, ]
test_x <- data_x_list[[patient]][-train_x_ind, ]
train_anchor <- data_anchor_list[[patient]][train_anchor_ind, ]
test_anchor <- data_anchor_list[[patient]][-train_anchor_ind, ]
# estimate model if desired
if(is.null(anchor_model_pre) ==TRUE){
anchor_model <- anchor_regression(train_x,train_anchor,gamma,target_variable,lambda)
}else{
anchor_model <- anchor_model_pre
}
# save coefficients
coefficient_matrix <- cbind(coefficient_matrix, anchor_model$coeff)
# predict and calculate the mse for the predictions
prediction <- anchor_prediction(anchor_model$model, test_x, test_anchor, gamma, target_variable)
result <- as.data.frame(test_x[target_variable])
result$prediction <- prediction
result$diff2 <- (result[,1] - result$prediction)^2
mse <- mean(result$diff2)
patient_prediction_scores <- c(patient_prediction_scores, mse)
}
# calculate weights and resulting coefficients
weights <- patient_prediction_scores/sum(patient_prediction_scores)
weighted_coefficients <- weights%*%t(coefficient_matrix)
# return result
return_list = list(coeff = weighted_coefficients, names = anchor_model$names, raw_coeffs = coefficient_matrix,weights = weights)
return(return_list)
}
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/R/weighted_anchor_regression.R |
x <- as.data.frame(matrix(data = rnorm(1000),nrow = 100,ncol = 10))
anchor <- as.data.frame(matrix(data = rnorm(200),nrow = 100,ncol = 2))
colnames(anchor) <- c('X1','X2')
gamma <- 2
target_variable <- 'V2'
anchor_regression(x, anchor, gamma, target_variable)
| /scratch/gouwar.j/cran-all/cranData/AnchorRegression/man/examples/AnchorExample.R |
# Copyright 2024 Observational Health Data Sciences and Informatics
#
# This file is part of Andromeda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @keywords internal
#' @aliases
#' NULL Andromeda-package
#' @importFrom methods slotNames
#' @importFrom utils head setTxtProgressBar txtProgressBar
#' @importFrom tidyselect all_of
#' @importFrom rlang abort warn inform
#' @import dplyr
#'
"_PACKAGE"
# Used to remember when a warning has already been thrown for a temp file location:
andromedaGlobalEnv <- new.env(parent = emptyenv())
| /scratch/gouwar.j/cran-all/cranData/Andromeda/R/Andromeda.R |
# Copyright 2024 Observational Health Data Sciences and Informatics
#
# This file is part of Andromeda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Create an index on one or more columns in an Andromeda table
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#' @param columnNames A vector of column names (character) on which the index is to be created.
#' @param unique Should values in the column(s) be enforced to be unique?
#' @param indexName The name of the index. If not provided, a random name will be generated.
#'
#' @details
#' Indices can speed up subsequent queries that use the indexed columns, but can take time to create,
#' and will take additional space on the drive.
#'
#' @seealso [listIndices()], [removeIndex()]
#'
#' @return
#' Invisibly returns the input table.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' createIndex(andr$cars, "speed")
#'
#' # Will be faster now that speed is indexed:
#' andr$cars %>%
#' filter(speed == 10) %>%
#' collect()
#'
#' close(andr)
#'
#' @export
createIndex <- function(tbl, columnNames, unique = FALSE, indexName = NULL) {
if (!inherits(tbl, "tbl_dbi"))
abort("First argument must be an Andromeda (or DBI) table")
if (!all(columnNames %in% colnames(tbl)))
abort(sprintf("Column(s) %s not found in the table", paste(columnNames[!columnNames %in% names(tbl)], collapse = ", ")))
if (is.null(indexName)) {
indexName <- paste(c("idx_", sample(c(letters, 0:9), 20)), collapse = "")
}
statement <- sprintf("CREATE %s INDEX %s ON %s(%s);",
if (unique) "UNIQUE" else "",
indexName,
as.character(dbplyr::remote_name(tbl)),
paste(columnNames, collapse = ", "))
RSQLite::dbExecute(conn = dbplyr::remote_con(tbl), statement = statement)
invisible(tbl)
}
#' List all indices on an Andromeda table
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#'
#' @details
#' Lists any indices that may have been created using the [createIndex()] function.
#'
#' @seealso [createIndex()], [removeIndex()]
#'
#' @return
#' Returns a tibble listing the indices, indexed columns, and whether the index is unique.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' createIndex(andr$cars, "speed")
#'
#' listIndices(andr$cars)
#' # # A tibble: 1 x 5
#' # indexSequenceId indexName unique columnSequenceId columnName
#' # <int> <chr> <lgl> <int> <chr>
#' #1 0 idx_ocy8we9j2i7ld0rshgb4 FALSE 0 speed
#'
#' close(andr)
#'
#' @export
listIndices <- function(tbl) {
if (!inherits(tbl, "tbl_dbi"))
abort("Argument must be an Andromeda (or DBI) table")
tableName <- as.character(dbplyr::remote_name(tbl))
connection <- dbplyr::remote_con(tbl)
indices <- RSQLite::dbGetQuery(conn = connection,
statement = sprintf("PRAGMA index_list('%s');", tableName)) %>%
dplyr::as_tibble()
if (nrow(indices) == 0) {
return(dplyr::tibble())
}
getIndexInfo <- function(indexName) {
indexInfo <- RSQLite::dbGetQuery(conn = connection,
statement = sprintf("PRAGMA index_info('%s');", indexName)) %>%
dplyr::as_tibble()
indexInfo$indexName <- indexName
return(indexInfo)
}
indexInfo <- lapply(indices$name, getIndexInfo)
indexInfo <- bind_rows(indexInfo) %>%
select(indexName = "indexName",
columnSequenceId = "seqno",
columnName = "name")
result <- indices %>%
mutate(unique = case_when(.data$unique == 1 ~ TRUE, TRUE ~ FALSE)) %>%
select(indexSequenceId = "seq",
indexName = "name",
unique = "unique") %>%
inner_join(indexInfo, by = "indexName")
return(result)
}
#' Removes an index from an Andromeda table
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#' @param columnNames A vector of column names (character) on which the index was created. If not
#' provided, then the `indexName` argument must be provided.
#' @param indexName The name of the index. If not provided, the `columnNames` argument must be
#' provided.
#'
#' @details
#' Remove an index created using the [createIndex()] function. Either the index name or the column
#' names on which the index was created must be provided.
#'
#' @seealso [createIndex()], [listIndices()]
#'
#' @return
#' Invisibly returns the input table.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' createIndex(andr$cars, "speed")
#'
#' # Will be faster now that speed is indexed:
#' andr$cars %>%
#' filter(speed == 10) %>%
#' collect()
#'
#' removeIndex(andr$cars, "speed")
#'
#' close(andr)
#'
#' @export
removeIndex <- function(tbl, columnNames = NULL, indexName = NULL) {
if (!inherits(tbl, "tbl_dbi"))
abort("First argument must be an Andromeda (or DBI) table")
tableName <- as.character(dbplyr::remote_name(tbl))
connection <- dbplyr::remote_con(tbl)
indices <- RSQLite::dbGetQuery(conn = connection,
statement = sprintf("PRAGMA index_list('%s');", tableName))
if (is.null(indexName)) {
for (indexName in indices$name) {
indexInfo <- RSQLite::dbGetQuery(conn = connection,
statement = sprintf("PRAGMA index_info('%s');", indexName))
if (all(columnNames %in% indexInfo$name)) {
indexName <- indexName
break;
}
}
if (is.null(indexName)) {
abort(sprintf("Could not find an index on column(s) %s", paste(columnNames, collapse = ", ")))
}
} else {
if (!indexName %in% indices$name) {
abort(sprintf("Index with name '%s' not found", indexName))
}
}
statement <- sprintf("DROP INDEX %s;", indexName)
RSQLite::dbExecute(conn = dbplyr::remote_con(tbl), statement = statement)
invisible(tbl)
}
| /scratch/gouwar.j/cran-all/cranData/Andromeda/R/Indices.R |
# Copyright 2024 Observational Health Data Sciences and Informatics
#
# This file is part of Andromeda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Save Andromeda to file
#'
#' @param andromeda An object of class [`Andromeda`].
#' @param fileName The path where the object will be written.
#' @param maintainConnection Should the connection be maintained after saving? If `FALSE`, the
#' Andromeda object will be invalid after this operation, but saving will
#' be faster.
#' @param overwrite If the file exists, should it be overwritten? If `FALSE` and the file
#' exists, an error will be thrown.
#'
#' @seealso
#' \code{\link{loadAndromeda}}
#'
#' @description
#' Saves the [`Andromeda`] object in a zipped file. Note that by default the [`Andromeda`] object is
#' automatically closed by saving it to disk. This is due to a limitation of the underlying technology
#' ('RSQLite'). To keep the connection open, use `maintainConnection = TRUE`. This will first
#' create a temporary copy of the [`Andromeda`] object. Note that this can be substantially slower.
#'
#' @return
#' Returns no value. Executed for the side-effect of saving the object to disk.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' # For this example we'll use a temporary file location:
#' fileName <- tempfile()
#'
#' saveAndromeda(andr, fileName)
#'
#' # Cleaning up the file used in this example:
#' unlink(fileName)
#'
#' @seealso
#' [`loadAndromeda()`]
#'
#' @export
saveAndromeda <- function(andromeda, fileName, maintainConnection = FALSE, overwrite = TRUE) {
if (!overwrite && file.exists(fileName)) {
abort(sprintf("File %s already exists, and overwrite = FALSE", fileName))
}
if (!isValidAndromeda(andromeda)) {
abort("andromeda object is closed or not valid.")
}
fileName <- path.expand(fileName)
if (!dir.exists(dirname(fileName))) {
abort(sprintf("The directory '%s' does not exist. Andromeda object cannot be saved", dirname(fileName)))
}
andromedaTempFolder <- .getAndromedaTempFolder()
.checkAvailableSpace()
# Need to save any user-defined attributes as well:
attribs <- attributes(andromeda)
for (name in slotNames(andromeda)) {
attribs[[name]] <- NULL
}
attribs[["class"]] <- NULL
attributesFileName <- tempfile(tmpdir = andromedaTempFolder, fileext = ".rds")
saveRDS(attribs, attributesFileName)
if (maintainConnection) {
# Can't zip while connected, so make copy:
tempFileName <- tempfile(tmpdir = andromedaTempFolder, fileext = ".sqlite")
RSQLite::sqliteCopyDatabase(andromeda, tempFileName)
zip::zipr(fileName, c(attributesFileName, tempFileName), compression_level = 2)
unlink(tempFileName)
} else {
RSQLite::dbDisconnect(andromeda)
zip::zipr(fileName, c(attributesFileName, andromeda@dbname), compression_level = 2)
unlink(andromeda@dbname)
inform("Disconnected Andromeda. This data object can no longer be used")
}
unlink(attributesFileName)
}
#' Load Andromeda from file
#'
#' @param fileName The path where the object was saved using [`saveAndromeda()`].
#'
#' @seealso
#' [`saveAndromeda()`]
#'
#' @return
#' An [`Andromeda`] object.
#'
#' @examples
#' # For this example we create an Andromeda object and save it to
#' # a temporary file locationL
#' fileName <- tempfile()
#' andr <- andromeda(cars = cars)
#' saveAndromeda(andr, fileName)
#'
#' # Using loadAndromeda to load the object back:
#' andr <- loadAndromeda(fileName)
#'
#' # Don't forget to close Andromeda when you are done:
#' close(andr)
#'
#' # Cleaning up the file used in this example:
#' unlink(fileName)
#'
#' @export
#' @import hms
loadAndromeda <- function(fileName) {
if (!file.exists(fileName)) {
abort(sprintf("File %s does not exist", fileName))
}
fileNamesInZip <- utils::unzip(fileName, list = TRUE)$Name
sqliteFilenameInZip <- fileNamesInZip[grepl(".sqlite$", fileNamesInZip)]
rdsFilenameInZip <- fileNamesInZip[grepl(".rds$", fileNamesInZip)]
andromedaTempFolder <- .getAndromedaTempFolder()
.checkAvailableSpace()
# Unzip:
tempDir <- tempfile(tmpdir = andromedaTempFolder)
dir.create(tempDir)
on.exit(unlink(tempDir, recursive = TRUE))
zip::unzip(fileName, exdir = tempDir)
# Rename unzipped files:
newFileName <- tempfile(tmpdir = andromedaTempFolder, fileext = ".sqlite")
file.rename(file.path(tempDir, sqliteFilenameInZip), newFileName)
attributes <- readRDS(file.path(tempDir, rdsFilenameInZip))
andromeda <- RSQLite::dbConnect(RSQLite::SQLite(), newFileName, extended_types = TRUE)
finalizer <- function(ptr) {
# Suppress R Check note:
missing(ptr)
close(andromeda)
}
reg.finalizer(andromeda@ptr, finalizer, onexit = TRUE)
for (name in names(attributes)) {
attr(andromeda, name) <- attributes[[name]]
}
RSQLite::dbExecute(andromeda, "PRAGMA journal_mode = OFF")
RSQLite::dbExecute(andromeda, sprintf("PRAGMA temp_store_directory = '%s'", andromedaTempFolder))
class(andromeda) <- "Andromeda"
attr(class(andromeda), "package") <- "Andromeda"
return(andromeda)
}
.checkAvailableSpace <- function(andromeda = NULL) {
if (.isInstalled("rJava")) {
warnDiskSpace <- getOption("warnDiskSpaceThreshold")
if (is.null(warnDiskSpace)) {
warnDiskSpace <- 10 * 1024 ^ 3
}
if (warnDiskSpace != 0) {
if (is.null(andromeda)) {
folder <- .getAndromedaTempFolder()
} else {
folder <- dirname(andromeda@dbname)
}
if (exists("lowDiskWarnings", envir = andromedaGlobalEnv)) {
lowDiskWarnings <- get("lowDiskWarnings", envir = andromedaGlobalEnv)
if (folder %in% lowDiskWarnings) {
# Already warned about this location. Not warning again.
return()
}
} else {
lowDiskWarnings <- c()
}
space <- getAndromedaTempDiskSpace(andromeda)
if (!is.na(space) && space < warnDiskSpace) {
message <- sprintf("Low disk space in '%s'. Only %0.1f GB left.",
folder,
space / 1024^3)
message <- c(message,
pillar::style_subtle("Use options(warnDiskSpaceThreshold = <n>) to set the number of bytes for this warning to trigger."))
message <- c(message,
pillar::style_subtle("This warning will not be shown for this file location again during this R session."))
warn(paste(message, collapse = "\n"))
assign("lowDiskWarnings", c(lowDiskWarnings, folder), envir = andromedaGlobalEnv)
}
}
}
}
#' Get the available disk space in Andromeda temp
#'
#' @description
#' Attempts to determine how much disk space is still available in the Andromeda temp folder.
#' This function uses Java, so will only work if the `rJava` package is installed.
#'
#' By default the Andromeda temp folder is located in the system temp space, but the location
#' can be altered using `options(andromedaTempFolder = "c:/andromedaTemp")`, where
#' `"c:/andromedaTemp"` is the folder to create the Andromeda objects in.
#'
#' @param andromeda Optional: provide an [Andromeda] object for which to get the available disk
#' space. Normally all [Andromeda] objects use the same temp folder, but the user
#' could have altered it.
#'
#' @return
#' The number of bytes of available disk space in the Andromeda temp folder. Returns NA
#' if unable to determine the amount of available disk space, for example because `rJava`
#' is not installed, or because the user doesn't have the rights to query the available
#' disk space.
#'
#' @examples
#' # Get the number of available gigabytes:
#' getAndromedaTempDiskSpace() / 1024^3
#' #123.456
#'
#' @export
getAndromedaTempDiskSpace <- function(andromeda = NULL) {
if (!is.null(andromeda) && !inherits(andromeda, "SQLiteConnection"))
abort("Andromeda argument must be of type 'Andromeda'.")
# Using Java because no cross-platform functions available in R:
if (!.isInstalled("rJava")) {
return(NA)
} else {
if (is.null(andromeda)) {
folder <- .getAndromedaTempFolder()
} else {
folder <- dirname(andromeda@dbname)
}
space <- tryCatch({
rJava::.jinit()
file <- rJava::.jnew("java.io.File", normalizePath(folder), check = FALSE, silent = TRUE)
rJava::.jcall(file, "J", "getUsableSpace")
# This throws "illegal reflective access operation" warning:
# path <- rJava::J("java.nio.file.Paths")$get(fileName, rJava::.jarray(c("")))
# fileStore <- rJava::J("java.nio.file.Files")$getFileStore(path)
# fileStore$getUsableSpace()
}, error = function(e) NA)
return(space)
}
}
.isInstalled <- function(pkg) {
installedVersion <- tryCatch(utils::packageVersion(pkg),
error = function(e) NA)
return(!is.na(installedVersion))
}
| /scratch/gouwar.j/cran-all/cranData/Andromeda/R/LoadingSaving.R |
# Copyright 2024 Observational Health Data Sciences and Informatics
#
# This file is part of Andromeda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' The Andromeda class
#'
#' @description
#' The `Andromeda` class is an S4 object.
#'
#' This class provides the ability to work with data objects in R that are too large to fit in memory. Instead,
#' these objects are stored on disk. This is slower than working from memory, but may be the only viable option.
#'
#' @section Tables:
#' An `Andromeda` object has zero, one or more tables. The list of table names can be retrieved using the [`names()`]
#' method. Tables can be accessed using the dollar sign syntax, e.g. `andromeda$myTable`, or double-square-bracket
#' syntax, e.g. `andromeda[["myTable"]]`
#'
#'
#' @section Permanence:
#'
#' To mimic the behavior of in-memory objects, when working with data in `Andromeda` the data is stored in a
#' temporary location on the disk. You can modify the data as you can see fit, and when needed can save the data
#' to a permanent location. Later this data can be loaded to a temporary location again and be read and modified,
#' while keeping the saved data as is.
#'
#' @section Inheritance:
#'
#' The `Andromeda` inherits directly from `SQLiteConnection.` As such, it can be used as if it is a `SQLiteConnection`.
#' [`RSQLite`] is an R wrapper around 'SQLite', a low-weight but very powerful single-user SQL database that can run
#' from a single file on the local file system.
#'
#' @name Andromeda-class
#' @aliases Andromeda
#' @seealso [`andromeda()`]
#' @import RSQLite
#' @importClassesFrom DBI DBIObject DBIConnection
#' @export
setClass("Andromeda", contains = "SQLiteConnection")
#' Create an Andromeda object
#'
#' @description
#' By default the `Andromeda` object is created in the systems temporary file location. You can override
#' this by specifying a folder using `options(andromedaTempFolder = "c:/andromedaTemp")`, where
#' `"c:/andromedaTemp"` is the folder to create the Andromeda objects in.
#'
#' @param ... Named objects. See details for what objects are valid. If no objects are provided, an
#' empty Andromeda is returned.
#'
#' @details
#' Valid objects are data frames, `Andromeda` tables, or any other [`dplyr`] table.
#'
#' @return
#' Returns an [`Andromeda`] object.
#'
#' @examples
#' andr <- andromeda(cars = cars, iris = iris)
#'
#' names(andr)
#' # [1] 'cars' 'iris'
#'
#' andr$cars %>% filter(speed > 10) %>% collect()
#' # # A tibble: 41 x 2
#' # speed dist
#' # <dbl> <dbl>
#' # 1 11 17
#' # ...
#'
#' close(andr)
#'
#' @rdname andromeda_constructor
#'
#' @export
andromeda <- function(...) {
arguments <- list(...)
if (length(arguments) > 0) {
if (is.null(names(arguments)) || any(names(arguments) == ""))
abort("All arguments must be named")
}
andromeda <- .createAndromeda()
if (length(arguments) > 0) {
for (name in names(arguments)) {
andromeda[[name]] <- arguments[[name]]
}
}
return(andromeda)
}
#' Copy Andromeda
#'
#' @param andromeda The [`Andromeda`] object to copy.
#'
#' @description
#' Creates a complete copy of an [`Andromeda`] object. Object attributes are not copied.
#'
#' @return
#' The copied [`Andromeda`] object.
#'
#' @examples
#' andr <- andromeda(cars = cars, iris = iris)
#'
#' andr2 <- copyAndromeda(andr)
#'
#' names(andr2)
#' # [1] 'cars' 'iris'
#'
#' close(andr)
#' close(andr2)
#'
#' @export
copyAndromeda <- function(andromeda) {
checkIfValid(andromeda)
newAndromeda <- .createAndromeda()
RSQLite::sqliteCopyDatabase(andromeda, newAndromeda)
return(newAndromeda)
}
.createAndromeda <- function() {
tempFolder <- .getAndromedaTempFolder()
andromeda <- RSQLite::dbConnect(RSQLite::SQLite(),
tempfile(tmpdir = tempFolder, fileext = ".sqlite"),
extended_types = TRUE)
class(andromeda) <- "Andromeda"
attr(class(andromeda),"package") <- "Andromeda"
finalizer <- function(ptr) {
# Suppress R Check note:
missing(ptr)
close(andromeda)
}
reg.finalizer(andromeda@ptr, finalizer, onexit = TRUE)
RSQLite::dbExecute(andromeda, "PRAGMA journal_mode = OFF")
RSQLite::dbExecute(andromeda, sprintf("PRAGMA temp_store_directory = '%s'", tempFolder))
return(andromeda)
}
.getAndromedaTempFolder <- function() {
tempFolder <- getOption("andromedaTempFolder")
if (is.null(tempFolder)) {
tempFolder <- tempdir()
} else {
tempFolder <- path.expand(tempFolder)
if (!file.exists(tempFolder)) {
dir.create(tempFolder, recursive = TRUE)
}
}
return(tempFolder)
}
#' @param object An [`Andromeda`] object.
#' @export
#' @rdname
#' Andromeda-class
setMethod("show", "Andromeda", function(object) {
cli::cat_line(pillar::style_subtle("# Andromeda object"))
if (RSQLite::dbIsValid(object)) {
cli::cat_line(pillar::style_subtle(paste("# Physical location: ", object@dbname)))
cli::cat_line("")
cli::cat_line("Tables:")
for (name in RSQLite::dbListTables(object)) {
cli::cat_line(paste0("$",
name,
" (",
paste(RSQLite::dbListFields(object, name), collapse = ", "),
")"))
}
} else {
cli::cli_alert_danger("Connection closed")
}
invisible(NULL)
})
#' @param x An [`Andromeda`] object.
#' @param name The name of a table in the [`Andromeda`] object.
#' @export
#' @rdname
#' Andromeda-class
setMethod("$", "Andromeda", function(x, name) {
return(x[[name]])
})
#' @param x An [`Andromeda`] object.
#' @param name The name of a table in the [`Andromeda`] object.
#' @param value A data frame, [`Andromeda`] table, or other 'DBI' table.
#' @export
#' @rdname
#' Andromeda-class
setMethod("$<-", "Andromeda", function(x, name, value) {
x[[name]] <- value
return(x)
})
#' @param x An [`Andromeda`] object.
#' @param i The name of a table in the [`Andromeda`] object.
#' @param value A data frame, [`Andromeda`] table, or other 'DBI' table.
#' @export
#' @rdname
#' Andromeda-class
setMethod("[[<-", "Andromeda", function(x, i, value) {
checkIfValid(x)
if (is.null(value)) {
if (i %in% names(x)) {
RSQLite::dbRemoveTable(x, i)
}
} else if (inherits(value, "data.frame")) {
.checkAvailableSpace(x)
RSQLite::dbWriteTable(conn = x, name = i, value = value, overwrite = TRUE, append = FALSE)
} else if (inherits(value, "tbl_dbi")) {
.checkAvailableSpace(x)
if (isTRUE(all.equal(x, dbplyr::remote_con(value)))) {
sql <- dbplyr::sql_render(value, x)
if (RSQLite::dbExistsTable(x, i)) {
# Maybe we're copying data from a table into the same table. So write to temp
# table first, then drop old table, and rename temp to old name:
tempName <- paste(sample(letters, 16), collapse = "")
sql <- sprintf("CREATE TABLE %s AS %s", tempName, sql)
RSQLite::dbExecute(x, sql)
RSQLite::dbRemoveTable(x, i)
sql <- sprintf("ALTER TABLE %s RENAME TO %s;", tempName, i)
RSQLite::dbExecute(x, sql)
} else {
sql <- sprintf("CREATE TABLE %s AS %s", i, sql)
RSQLite::dbExecute(x, sql)
}
} else {
if (RSQLite::dbExistsTable(x, i)) {
RSQLite::dbRemoveTable(x, i)
}
doBatchedAppend <- function(batch) {
RSQLite::dbWriteTable(conn = x, name = i, value = batch, overwrite = FALSE, append = TRUE)
return(TRUE)
}
dummy <- batchApply(value, doBatchedAppend)
if (length(dummy) == 0) {
RSQLite::dbWriteTable(conn = x, name = i, value = dplyr::collect(value), overwrite = FALSE, append = TRUE)
}
}
} else {
abort("Table must be a data frame or dplyr table")
}
x
})
#' @param x An [`Andromeda`] object.
#' @param i The name of a table in the [`Andromeda`] object.
#' @export
#' @rdname
#' Andromeda-class
setMethod("[[", "Andromeda", function(x, i) {
checkIfValid(x)
if (RSQLite::dbExistsTable(x, i)) {
return(dplyr::tbl(x, i))
} else {
return(NULL)
}
})
#' names
#'
#' @description
#' Show the names of the tables in an Andromeda object.
#'
#' @param x An [`Andromeda`] object.
#'
#' @return
#' A vector of names.
#'
#' @examples
#' andr <- andromeda(cars = cars, iris = iris)
#'
#' names(andr)
#' # [1] 'cars' 'iris'
#'
#' close(andr)
#'
#' @rdname
#' Andromeda-class
#'
#' @export
setMethod("names", "Andromeda", function(x) {
checkIfValid(x)
RSQLite::dbListTables(x)
})
#' Set table names in an Andromeda object
#'
#' names(andromedaObject) must be set to a character vector with length equal to the number of
#' tables in the andromeda object (i.e. length(andromedaObject)). The user is
#' responsible for setting valid table names (e.g. not using SQL keywords or numbers as names)
#' This function treats Andromeda table names as case insensitive so if the only difference
#' between the new names and old names is the case then the names will not be changed.
#'
#' @param x An Andromeda object
#' @param value A character vector with the same length as the number of tables in x
#'
#' @export
#'
#' @examples
#' andr <- andromeda(cars = cars, iris = iris)
#' names(andr) <- c("CARS", "IRIS")
#' names(andr)
#' # [1] "CARS" "IRIS"
#' close(andr)
#'
setMethod("names<-", "Andromeda", function(x, value) {
checkIfValid(x)
nm <- names(x)
if(!is.character(value) || !(length(nm) == length(value))) {
rlang::abort("New names must be a character vector with the same length as names(x).")
}
for(i in seq_along(nm)) {
if((nm[i] != value[i]) & (tolower(nm[i]) == tolower(value[i]))) {
# Handle case when names differ only by case
DBI::dbExecute(x, sprintf("ALTER TABLE %s RENAME TO %s;", nm[i], paste0(nm[i], "0")))
DBI::dbExecute(x, sprintf("ALTER TABLE %s RENAME TO %s;", paste0(nm[i], "0"), value[i]))
} else if(nm[i] != value[i]) {
DBI::dbExecute(x, sprintf("ALTER TABLE %s RENAME TO %s;", nm[i], value[i]))
}
}
invisible(x)
})
#' Get the column names of an Andromeda table
#'
#' @param x An table in an Andromeda object
#'
#' @return A character vector of column names
#' @export
#'
#' @examples
#' andr <- andromeda(cars = cars)
#' names(andr$cars)
#' # [1] "speed" "dist"
#' close(andr)
names.tbl_Andromeda <- function(x) {
colnames(x)
}
#' Set column names of an Andromeda table
#'
#' @param x A reference to a table in an andromeda object. (see examples)
#' @param value A character vector of new names that must have length equal to the number of columns in the table.
#'
#' @export
#'
#' @examples
#' andr <- andromeda(cars = cars)
#' names(andr$cars) <- toupper(names(andr$cars))
#' names(andr$cars)
#' # [1] "SPEED" "DIST"
#' close(andr)
"names<-.tbl_Andromeda" <- function(x, value) {
tableName <- dbplyr::remote_name(x)
connection <- dbplyr::remote_con(x)
nm <- names(x)
if(!is.character(value) || !(length(nm) == length(value))) {
rlang::abort("New names must be a character vector with the same length as names(x).")
}
idx <- nm != value
if (any(idx)) {
sql <- sprintf("ALTER TABLE %s RENAME COLUMN %s TO %s;", tableName, nm[idx], value[idx])
lapply(sql, function(statement) DBI::dbExecute(connection, statement))
}
invisible(x)
}
#' @param x An [`Andromeda`] object.
#' @export
#' @rdname
#' Andromeda-class
setMethod("length", "Andromeda", function(x) {
length(names(x))
})
#' Check whether an object is an Andromeda object
#'
#' @param x The object to check.
#'
#' @details
#' Checks whether an object is an Andromeda object.
#'
#' @return
#' A logical value.
#'
#' @export
isAndromeda <- function(x) {
return(inherits(x, "Andromeda"))
}
#' Check whether an Andromeda object is still valid
#'
#' @param x The Andromeda object to check.
#'
#' @details
#' Checks whether an Andromeda object is still valid, or whether it has been closed.
#'
#' @return
#' A logical value.
#'
#' @examples
#' andr <- andromeda(cars = cars, iris = iris)
#'
#' isValidAndromeda(andr)
#' # TRUE
#'
#' close(andr)
#'
#' isValidAndromeda(andr)
#' # FALSE
#'
#' @export
isValidAndromeda <- function(x) {
if(!isAndromeda(x)) rlang::abort(paste(deparse(substitute(x)), "is not an Andromeda object."))
return(RSQLite::dbIsValid(x))
}
#' @param con An [`Andromeda`] object.
#' @param ... Included for compatibility with generic `close()` method.
#' @export
#' @rdname
#' Andromeda-class
setMethod("close", "Andromeda", function(con, ...) {
fileName <- con@dbname
if (RSQLite::dbIsValid(con)) {
RSQLite::dbDisconnect(con)
}
if (file.exists(fileName)) {
unlink(fileName)
}
})
checkIfValid <- function(x) {
if (!isValidAndromeda(x))
abort("Andromeda object is no longer valid. Perhaps it was saved without maintainConnection = TRUE, or R has been restarted?")
}
#' Is the object an Andromeda table?
#'
#' @param tbl A reference to an Andromeda table
#'
#' @return TRUE or FALSE
#' @export
#'
#' @examples
#' \dontrun{
#' andr <- andromeda(cars = cars)
#' isAndromedaTable(andr$cars)
#' close(andr)
#' }
isAndromedaTable <- function(tbl) {
return(
inherits(tbl, "FileSystemDataset") ||
inherits(tbl, "arrow_dplyr_query") ||
inherits(tbl, "tbl_Andromeda") ||
inherits(tbl, "tbl_SQLiteConnection")
)
}
| /scratch/gouwar.j/cran-all/cranData/Andromeda/R/Object.R |
# Copyright 2024 Observational Health Data Sciences and Informatics
#
# This file is part of Andromeda
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Apply a function to batches of data in an Andromeda table
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#' @param fun A function where the first argument is a data frame.
#' @param ... Additional parameters passed to fun.
#' @param batchSize Number of rows to fetch at a time.
#' @param progressBar Show a progress bar?
#' @param safe Create a copy of tbl first? Allows writing to the same Andromeda as being
#' read from.
#'
#' @details
#' This function is similar to the [`lapply()`] function, in that it applies a function to sets of
#' data. In this case, the data is batches of data from an [`Andromeda`] table. Each batch will be
#' presented to the function as a data frame.
#'
#' @seealso [groupApply()]
#'
#' @return
#' Invisibly returns a list of objects, where each object is the output of the user-supplied function
#' applied to a batch
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' fun <- function(x) {
#' return(nrow(x))
#' }
#'
#' result <- batchApply(andr$cars, fun, batchSize = 25)
#'
#' result
#' # [[1]]
#' # [1] 25
#' #
#' # [[2]]
#' # [1] 25
#'
#' close(andr)
#'
#' @export
batchApply <- function(tbl, fun, ..., batchSize = 100000, progressBar = FALSE, safe = FALSE) {
if (!inherits(tbl, "tbl_dbi"))
abort("First argument must be an Andromeda (or DBI) table")
if (!is.function(fun))
abort("Second argument must be a function")
if (safe) {
tempAndromeda <- andromeda()
on.exit(close(tempAndromeda))
tempAndromeda$tbl <- tbl
connection <- dbplyr::remote_con(tempAndromeda$tbl)
sql <- dbplyr::sql_render(tempAndromeda$tbl, connection)
} else {
connection <- dbplyr::remote_con(tbl)
sql <- dbplyr::sql_render(tbl, connection)
}
output <- list()
if (progressBar) {
pb <- txtProgressBar(style = 3)
suppressWarnings({
# suppress warning message: ORDER BY is ignored in subqueries without LIMIT
totalRows <- tbl %>% count() %>% pull()
})
completedRows <- 0
}
result <- DBI::dbSendQuery(connection, sql)
tryCatch({
while (!DBI::dbHasCompleted(result)) {
batch <- DBI::dbFetch(result, n = batchSize)
output[[length(output) + 1]] <- do.call(fun, append(list(batch), list(...)))
if (progressBar) {
completedRows <- completedRows + nrow(batch)
setTxtProgressBar(pb, completedRows/totalRows)
}
}
}, finally = {
DBI::dbClearResult(result)
if (progressBar) {
close(pb)
}
})
invisible(output)
}
#' Apply a function to groups of data in an Andromeda table
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#' @param groupVariable The variable to group by
#' @param fun A function where the first argument is a data frame.
#' @param ... Additional parameters passed to fun.
#' @param batchSize Number of rows fetched from the table at a time. This is not the number of
#' rows to which the function will be applied. Included mostly for testing
#' purposes.
#' @param progressBar Show a progress bar?
#' @param safe Create a copy of `tbl` first? Allows writing to the same Andromeda as being
#' read from.
#'
#' @details
#' This function applies a function to groups of data. The groups are identified by unique values of
#' the `groupVariable`, which must be a variable in the table.
#'
#' @seealso [batchApply()]
#'
#' @return
#' Invisibly returns a list of objects, where each object is the output of the user-supplied function
#' applied to a group.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' fun <- function(x) {
#' return(tibble::tibble(speed = x$speed[1], meanDist = mean(x$dist)))
#' }
#'
#' result <- groupApply(andr$cars, "speed", fun)
#' result <- bind_rows(result)
#' result
#' # # A tibble: 19 x 2
#' # speed meanDist
#' # <dbl> <dbl>
#' # 1 4 6
#' # 2 7 13
#' # 3 8 16
#' # ...
#'
#' close(andr)
#'
#' @export
groupApply <- function(tbl, groupVariable, fun, ..., batchSize = 100000, progressBar = FALSE, safe = FALSE) {
if (!groupVariable %in% colnames(tbl))
abort(sprintf("'%s' is not a variable in the table", groupVariable))
env <- new.env()
assign("output", list(), envir = env)
wrapper <- function(data, userFun, groupVariable, env, ...) {
groups <- split(data, data[groupVariable])
if (!is.null(env$groupValue) && groups[[1]][1, groupVariable] == env$groupValue) {
groups[[1]] <- bind_rows(groups[[1]], env$groupData)
}
if (length(groups) > 1) {
results <- lapply(groups[1:(length(groups) - 1)], userFun, ...)
env$output <- append(env$output, results)
}
env$groupData <- groups[[length(groups)]]
env$groupValue <- groups[[length(groups)]][1, groupVariable]
}
batchApply(tbl = tbl %>% arrange(groupVariable),
fun = wrapper,
userFun = fun,
env = env,
groupVariable = groupVariable,
...,
batchSize = batchSize,
progressBar = progressBar,
safe = safe)
output <- env$output
if (!is.null(env$groupData)) {
output[[length(output) + 1]] <- fun(env$groupData, ...)
names(output)[length(output)] <- as.character(env$groupValue)
}
rm(env)
invisible(output)
}
#' Append to an Andromeda table
#'
#' @param tbl An [`Andromeda`] table. This must be a base table (i.e. it cannot be a query result).
#' @param data The data to append. This can be either a data.frame or another Andromeda table.
#'
#' @description
#' Append a data frame, Andromeda table, or result of a query on an [`Andromeda`] table to an existing
#' [`Andromeda`] table.
#'
#' If data from another [`Andromeda`] is appended, a batch-wise copy process is used, which will be slower
#' than when appending data from within the same [`Andromeda`] object.
#'
#' **Important**: columns are appended based on column name, not on column order. The column names should
#' therefore be identical (but not necessarily in the same order).
#'
#' @return
#' Returns no value. Executed for the side-effect of appending the data to the table.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#' nrow(andr$cars)
#' # [1] 50
#'
#' appendToTable(andr$cars, cars)
#' nrow(andr$cars)
#' # [1] 100
#'
#' appendToTable(andr$cars, andr$cars %>% filter(speed > 10))
#' nrow(andr$cars)
#' # [1] 182
#'
#' close(andr)
#'
#' @export
appendToTable <- function(tbl, data) {
if (!inherits(tbl, "tbl_dbi"))
abort("First argument must be an Andromeda table")
tableName <- as.character(dbplyr::remote_name(tbl))
if (is.null(tableName))
abort("First argument must be a base table (cannot be a query result)")
connection <- dbplyr::remote_con(tbl)
.checkAvailableSpace(connection)
if (inherits(data, "data.frame")) {
RSQLite::dbWriteTable(conn = connection,
name = tableName,
value = data,
overwrite = FALSE,
append = TRUE)
} else if (inherits(data, "tbl_dbi")) {
if (isTRUE(all.equal(connection, dbplyr::remote_con(data)))) {
sql <- dbplyr::sql_render(select(data, all_of(colnames(tbl))), connection)
sql <- sprintf("INSERT INTO %s %s", tableName, sql)
RSQLite::dbExecute(connection, sql)
} else {
doBatchedAppend <- function(batch) {
RSQLite::dbWriteTable(conn = connection,
name = tableName,
value = batch,
overwrite = FALSE,
append = TRUE)
}
batchApply(data, doBatchedAppend)
}
}
invisible(NULL)
}
#' Apply a boolean test to batches of data in an Andromeda table and terminate early
#'
#' @param tbl An [`Andromeda`] table (or any other 'DBI' table).
#' @param fun A function where the first argument is a data frame and returns a logical value.
#' @param ... Additional parameters passed to `fun`.
#' @param batchSize Number of rows to fetch at a time.
#'
#' @details
#' This function applies a boolean test function to sets of
#' data and terminates at the first `FALSE`.
#'
#' @return
#' Returns `FALSE` if any of the calls to the user-supplied function returned `FALSE`, else returns `TRUE`.
#'
#' @examples
#' andr <- andromeda(cars = cars)
#'
#' fun <- function(x) {
#' is.unsorted(x %>% select(speed) %>% collect())
#' }
#'
#' result <- batchTest(andr$cars, fun, batchSize = 25)
#'
#' result
#' # [1] FALSE
#'
#' close(andr)
#'
#' @export
batchTest <- function(tbl, fun, ..., batchSize = 100000) {
if (!inherits(tbl, "tbl_dbi"))
abort("First argument must be an Andromeda (or DBI) table")
if (!is.function(fun))
abort("Second argument must be a function")
connection <- dbplyr::remote_con(tbl)
sql <- dbplyr::sql_render(tbl, connection)
result <- DBI::dbSendQuery(connection, sql)
output <- TRUE
tryCatch({
while (!DBI::dbHasCompleted(result) && output) {
batch <- DBI::dbFetch(result, n = batchSize)
output <- all(do.call(fun, append(list(batch), list(...))))
}
}, finally = {
DBI::dbClearResult(result)
})
return(output)
}
#' Restore dates
#'
#' @description
#' Restores dates that were converted by Andromeda to numeric values back to dates.
#'
#' @param x A numeric vector representing dates.
#'
#' @seealso [restorePosixct()]
#'
#' @return
#' A vector of type `Date`.
#'
#' @examples
#' myData <- data.frame(startDate = as.Date(c("2000-01-01", "2001-01-31", "2004-12-31")))
#' andr <- andromeda(myData = myData)
#'
#' andr$myData %>%
#' collect() %>%
#' mutate(startDate = restoreDate(startDate))
#' # # A tibble: 3 x 1
#' # startDate
#' # <date>
#' # 1 2000-01-01
#' # 2 2001-01-31
#' # 3 2004-12-31
#'
#' close(andr)
#'
#' @export
restoreDate <- function(x) {
if(inherits(x, "Date")) {
rlang::warn("Input to restoreDate is already a Date.")
return(x)
} else {
return(as.Date(x, origin = "1970-01-01"))
}
}
#' Restore timestamps
#'
#' @description
#' Restores datetimes that were converted by Andromeda to numeric values back to datetimes.
#'
#' @param x A numeric vector representing timestamps
#'
#' @seealso [restoreDate()]
#'
#' @return
#' A vector of type `POSIXct`.
#'
#' @examples
#' myData <- data.frame(startTime = as.POSIXct(c("2000-01-01 10:00",
#' "2001-01-31 11:00",
#' "2004-12-31 12:00")))
#' andr <- andromeda(myData = myData)
#'
#' andr$myData %>%
#' collect() %>%
#' mutate(startTime = restorePosixct(startTime))
#' # # A tibble: 3 x 1
#' # startTime
#' # <dttm>
#' # 1 2000-01-01 10:00:00
#' # 2 2001-01-31 11:00:00
#' # 3 2004-12-31 12:00:00
#'
#' close(andr)
#'
#' @export
restorePosixct <- function(x) {
if(inherits(x, "POSIXct")) {
rlang::warn("Input to restorePosixct is already Posixct")
return(x)
} else {
return(as.POSIXct(x, origin = "1970-01-01"))
}
}
| /scratch/gouwar.j/cran-all/cranData/Andromeda/R/Operations.R |
## ----echo = FALSE, message = FALSE, warning = FALSE---------------------------
library(Andromeda)
## ----eval=TRUE----------------------------------------------------------------
library(Andromeda)
andr <- andromeda()
## ----eval=TRUE----------------------------------------------------------------
andr$cars <- cars
andr
## ----eval=FALSE---------------------------------------------------------------
# andr <- andromeda(cars = cars)
## ----eval=TRUE----------------------------------------------------------------
appendToTable(andr$cars, cars)
## ----eval=TRUE----------------------------------------------------------------
andr2 <- andromeda()
andr2$cars <- andr$cars
## ----eval=TRUE----------------------------------------------------------------
andr3 <- copyAndromeda(andr)
## ----eval=TRUE----------------------------------------------------------------
close(andr)
close(andr2)
close(andr3)
## ----eval=TRUE----------------------------------------------------------------
isValidAndromeda(andr)
## ----eval=FALSE---------------------------------------------------------------
# options(andromedaTempFolder = "c:/andromedaTemp")
## ----eval=TRUE----------------------------------------------------------------
andr <- andromeda(cars = cars)
andr$cars %>%
filter(speed > 10) %>%
count() %>%
collect()
## ----eval=TRUE----------------------------------------------------------------
andr$fastCars <- andr$cars %>%
filter(speed > 10)
## ----eval=TRUE----------------------------------------------------------------
names(andr)
colnames(andr$cars)
## ----eval=TRUE----------------------------------------------------------------
RSQLite::dbGetQuery(andr, "SELECT * FROM cars LIMIT 5;")
## ----eval=TRUE----------------------------------------------------------------
myData <- data.frame(someTime = as.POSIXct(c("2000-01-01 10:00",
"2001-01-31 11:00",
"2004-12-31 12:00")),
someDate = as.Date(c("2000-01-01",
"2001-01-31",
"2004-12-31")))
andr$myData <- myData
andr$myData %>%
collect() %>%
mutate(someTime = restorePosixct(someTime),
someDate = restoreDate(someDate))
## ----eval=TRUE----------------------------------------------------------------
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- batchApply(andr$cars, doSomething, multiplier = 2, batchSize = 10)
result <- unlist(result)
result
## ----eval=TRUE----------------------------------------------------------------
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- groupApply(andr$cars %>% filter(speed > 10),
doSomething,
groupVariable = "speed",
multiplier = 2)
result <- unlist(result)
result
## ----eval=TRUE----------------------------------------------------------------
doSomething <- function(batch) {
batch$speedSquared <- batch$speed^2
if (is.null(andr$cars2)) {
andr$cars2 <- batch
} else {
appendToTable(andr$cars2, batch)
}
}
batchApply(andr$cars, doSomething, safe = TRUE)
## ----eval=TRUE----------------------------------------------------------------
andr$cars2 <-
andr$cars %>%
mutate(speedSquared = speed^2)
## ----eval=FALSE---------------------------------------------------------------
# saveAndromeda(andr, "c:/temp/andromeda.zip")
## ----eval=TRUE, echo=FALSE----------------------------------------------------
writeLines("Disconnected Andromeda. This data object can no longer be used")
## ----eval=FALSE---------------------------------------------------------------
# andr <- loadAndromeda("c:/temp/andromeda.zip")
## ----eval=FALSE---------------------------------------------------------------
# andr$cars %>%
# filter(speed > 10)
## ----eval=FALSE---------------------------------------------------------------
# andr$cars %>%
# filter(.data$speed > 10)
## ----eval=FALSE---------------------------------------------------------------
# speed <- 10
# andr$cars %>%
# filter(.data$speed == speed)
## ----eval=FALSE---------------------------------------------------------------
# speed <- 10
# andr$cars %>%
# filter(.data$speed == !!speed)
| /scratch/gouwar.j/cran-all/cranData/Andromeda/inst/doc/UsingAndromeda.R |
---
title: "Using Andromeda"
author: "Martijn J. Schuemie"
date: "`r Sys.Date()`"
output:
pdf_document:
number_sections: yes
toc: yes
html_document:
number_sections: yes
toc: yes
vignette: >
%\VignetteIndexEntry{Using Andromeda}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE, message = FALSE, warning = FALSE}
library(Andromeda)
```
# Introduction
The `Andromeda` package provides the ability to work with data objects in R that are too large to fit in memory. Instead, these objects are stored on disk. This is slower than working from memory, but may be the only viable option. `Andromeda` aims to replace the now orphaned `ff` package.
## Permanence
To mimic the behavior of in-memory objects, when working with data in `Andromeda` the data is stored in a temporary location on the disk. You can modify the data as you can see fit, and when needed can save the data to a permanent location. Later this data can be loaded to a temporary location again and be read and modified, while keeping the saved data as is.
## Technology
`Andromeda` heavily relies on `RSQLite`, an R wrapper around SQLite. SQLite is a low-weight but very powerful single-user SQL database that can run from a single file on the local file system. Although SQLite and therefore `Andromeda` can be queried using SQL, `Andromeda` favors using `dbplyr`, a `dplyr` implementation, to work with the data.
# Creating an Andromeda object
Creating a empty `Andromeda` object is easy:
```{r eval=TRUE}
library(Andromeda)
andr <- andromeda()
```
We can add new tables to the `Andromeda` environment like this:
```{r eval=TRUE}
andr$cars <- cars
andr
```
We could have achieved the same by adding the table when creating the `Andromeda` object:
```{r eval=FALSE}
andr <- andromeda(cars = cars)
```
Of course, we probably want to add data to the `Andromeda` environment that is much larger than can fit in memory. One way to achieve this is by iteratively adding more and more data to the same table. As an example here we simply add the same data to the existing table:
```{r eval=TRUE}
appendToTable(andr$cars, cars)
```
The data to append should have the same columns as the existing data, or else an error will be thrown.
Data can be copied from one Andromeda to another:
```{r eval=TRUE}
andr2 <- andromeda()
andr2$cars <- andr$cars
```
For very large tables this may be slow. A faster option might be to copy the entire `Andromeda` object:
```{r eval=TRUE}
andr3 <- copyAndromeda(andr)
```
## Closing Andromeda objects
Every `Andromeda` object will have a corresponding data file in a temporary location on your local file system. This file will be automatically deleted when the `Andromeda` object is no longer used. It is best practice not to rely on R to decide when to do this, but explicitly cause the file to be cleaned up by calling the `close` statement:
```{r eval=TRUE}
close(andr)
close(andr2)
close(andr3)
```
Once an `Andromeda` is closed the underlying file is deleted, and it can no longer be used. You can check whether an Andromeda object is still valid:
```{r eval=TRUE}
isValidAndromeda(andr)
```
## Temporary file location
By default `Andromeda` uses the default temporary file location of your operating system to store the Andromeda objects while your are working on them. You can override the location by setting the `andromedaTempFolder` option:
```{r eval=FALSE}
options(andromedaTempFolder = "c:/andromedaTemp")
```
This only applies to `Andromeda` objects that are created from that point onward. Prior objects will stay where they are.
# Querying data from an Andromeda object
`Andromeda` relies on `dbplyr`, a `dplyr` implementation, for querying the data. A key aspect of `dbplyr` is lazy execution. This means that we can define a query, but the query will not be executed until we explicitly request so using the `collect()` statement. For example, we may want to know the number of cars that can go faster than 10:
```{r eval=TRUE}
andr <- andromeda(cars = cars)
andr$cars %>%
filter(speed > 10) %>%
count() %>%
collect()
```
Here we first filter the table to those rows where `speed > 10`, after which we count the number of remaining records. This query is not executed until we call `collect()`. We can also have `Andromeda` call `collect` for us, by assigning the query to a table:
```{r eval=TRUE}
andr$fastCars <- andr$cars %>%
filter(speed > 10)
```
This way the query result does not have to pass through memory, but instead is directly stored in Andromeda.
## Simple meta-data
If we wish to know the tables that exist in an `Andromeda` object, we can call the generic `names` function. Similarly, we can call `colnames` to get the names of the columns in a table:
```{r eval=TRUE}
names(andr)
colnames(andr$cars)
```
## Using SQL
In the end, an `Andromeda` is still an `RSQLite` database, and can be queried using SQL:
```{r eval=TRUE}
RSQLite::dbGetQuery(andr, "SELECT * FROM cars LIMIT 5;")
```
However, for consistency it is recommended to use the `dplyr` functions instead.
## Dates and times
One limitation of SQLite - the technology underlying Andromeda - is that it does not support date and time formats. Dates and times are automatically converted to numeric values when adding them to an Andromeda table. Converting them back needs to be done manually:
```{r eval=TRUE}
myData <- data.frame(someTime = as.POSIXct(c("2000-01-01 10:00",
"2001-01-31 11:00",
"2004-12-31 12:00")),
someDate = as.Date(c("2000-01-01",
"2001-01-31",
"2004-12-31")))
andr$myData <- myData
andr$myData %>%
collect() %>%
mutate(someTime = restorePosixct(someTime),
someDate = restoreDate(someDate))
```
# Batch operations
Often we'll need to perform some action against the data that is not supported by `dplyr.` For example, we may want to write to data to a CSV file, or perform some complex computation. Since we cannot assume an entire table (or query result) will fit in memory, we must assume we should do this batch-wise. For this the `Andromeda` package provides two functions: `batchApply` and `groupApply`. The former executes a function on batches of predefined length. We can specify the batch size by setting the `batchSize` argument, which defaults to 100,000. Here is a silly example, where take the number of rows in a batch, and multiply it by some number:
```{r eval=TRUE}
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- batchApply(andr$cars, doSomething, multiplier = 2, batchSize = 10)
result <- unlist(result)
result
```
Alternatively, using `groupApply` we can execute a function on groups of rows, defined by the value in some variable in the table. In this example, we first filter to fast cars, and then perform the same meaningless computation as in the previous example, this time on groups of rows defined by having the same speed:
```{r eval=TRUE}
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- groupApply(andr$cars %>% filter(speed > 10),
doSomething,
groupVariable = "speed",
multiplier = 2)
result <- unlist(result)
result
```
(For example, there were 2 rows where `speed = 11`, and multiplied by 2 this gives 4 for item 11.)
## Safe mode
For technical reasons it is not possible to write to the same `Andromeda` while reading from it. Writing to an `Andromeda` environment while inside a `batchApply` or `groupApply` on the that same Andromeda environment will therefore result in an error message. To avoid this, you can set the `safe` argument to `TRUE`. This will cause the table to first be copied to a temporary `Andromeda` before executing the function. However, this might not be very fast:
```{r eval=TRUE}
doSomething <- function(batch) {
batch$speedSquared <- batch$speed^2
if (is.null(andr$cars2)) {
andr$cars2 <- batch
} else {
appendToTable(andr$cars2, batch)
}
}
batchApply(andr$cars, doSomething, safe = TRUE)
```
Note that this only a restriction of `batchApply` and `groupApply`. We could have achieved the same task as the example above much faster using only `dplyr`:
```{r eval=TRUE}
andr$cars2 <-
andr$cars %>%
mutate(speedSquared = speed^2)
```
# Saving and loading Andromeda objects
To reuse an `Andromeda` at a later point in time, we can save it to a permanent location:
```{r eval=FALSE}
saveAndromeda(andr, "c:/temp/andromeda.zip")
```
```{r eval=TRUE, echo=FALSE}
writeLines("Disconnected Andromeda. This data object can no longer be used")
```
For technical reasons, saving an Andromeda object closes it. If we want to continue using the Andromeda object, we can set `maintainConnection` to `TRUE` when calling `saveAndromeda`. This causes a temporary copy to be created first, which is then saved and closed. Obviously this will take additional time, so if you know you will no longer need the object in R after saving, it is best not to use this option.
We can load the object again using:
```{r eval=FALSE}
andr <- loadAndromeda("c:/temp/andromeda.zip")
```
# Using Andromeda in your packages
Andromeda is intended to be used inside of other packages. Here are some tips:
## Import dplyr
Make sure `dplyr` is imported in the NAMESPACE. That way all `dplyr` functions can be used on the Andromeda objects in your functions.
## Import .data from rlang
If we reference variables in our `dplyr` function calls we should precede them with `.data$` to avoid R check warnings about using an unknown variable. For example, instead of
```{r eval=FALSE}
andr$cars %>%
filter(speed > 10)
```
we should write
```{r eval=FALSE}
andr$cars %>%
filter(.data$speed > 10)
```
to avoid R check warnings about `speed` being an unknown variable.
## Beware of variable name confusion
`dplyr` sometimes confuses variable names, so we have to help. For example, this code:
```{r eval=FALSE}
speed <- 10
andr$cars %>%
filter(.data$speed == speed)
```
will not actually filter anything, because the second `speed` variable in the filter statement is interpreted to refer to the `speed` field in the data, not the variable we defined earlier. One way to avoid this is by forcing early evaluation of the variable:
```{r eval=FALSE}
speed <- 10
andr$cars %>%
filter(.data$speed == !!speed)
```
| /scratch/gouwar.j/cran-all/cranData/Andromeda/inst/doc/UsingAndromeda.Rmd |
---
title: "Using Andromeda"
author: "Martijn J. Schuemie"
date: "`r Sys.Date()`"
output:
pdf_document:
number_sections: yes
toc: yes
html_document:
number_sections: yes
toc: yes
vignette: >
%\VignetteIndexEntry{Using Andromeda}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE, message = FALSE, warning = FALSE}
library(Andromeda)
```
# Introduction
The `Andromeda` package provides the ability to work with data objects in R that are too large to fit in memory. Instead, these objects are stored on disk. This is slower than working from memory, but may be the only viable option. `Andromeda` aims to replace the now orphaned `ff` package.
## Permanence
To mimic the behavior of in-memory objects, when working with data in `Andromeda` the data is stored in a temporary location on the disk. You can modify the data as you can see fit, and when needed can save the data to a permanent location. Later this data can be loaded to a temporary location again and be read and modified, while keeping the saved data as is.
## Technology
`Andromeda` heavily relies on `RSQLite`, an R wrapper around SQLite. SQLite is a low-weight but very powerful single-user SQL database that can run from a single file on the local file system. Although SQLite and therefore `Andromeda` can be queried using SQL, `Andromeda` favors using `dbplyr`, a `dplyr` implementation, to work with the data.
# Creating an Andromeda object
Creating a empty `Andromeda` object is easy:
```{r eval=TRUE}
library(Andromeda)
andr <- andromeda()
```
We can add new tables to the `Andromeda` environment like this:
```{r eval=TRUE}
andr$cars <- cars
andr
```
We could have achieved the same by adding the table when creating the `Andromeda` object:
```{r eval=FALSE}
andr <- andromeda(cars = cars)
```
Of course, we probably want to add data to the `Andromeda` environment that is much larger than can fit in memory. One way to achieve this is by iteratively adding more and more data to the same table. As an example here we simply add the same data to the existing table:
```{r eval=TRUE}
appendToTable(andr$cars, cars)
```
The data to append should have the same columns as the existing data, or else an error will be thrown.
Data can be copied from one Andromeda to another:
```{r eval=TRUE}
andr2 <- andromeda()
andr2$cars <- andr$cars
```
For very large tables this may be slow. A faster option might be to copy the entire `Andromeda` object:
```{r eval=TRUE}
andr3 <- copyAndromeda(andr)
```
## Closing Andromeda objects
Every `Andromeda` object will have a corresponding data file in a temporary location on your local file system. This file will be automatically deleted when the `Andromeda` object is no longer used. It is best practice not to rely on R to decide when to do this, but explicitly cause the file to be cleaned up by calling the `close` statement:
```{r eval=TRUE}
close(andr)
close(andr2)
close(andr3)
```
Once an `Andromeda` is closed the underlying file is deleted, and it can no longer be used. You can check whether an Andromeda object is still valid:
```{r eval=TRUE}
isValidAndromeda(andr)
```
## Temporary file location
By default `Andromeda` uses the default temporary file location of your operating system to store the Andromeda objects while your are working on them. You can override the location by setting the `andromedaTempFolder` option:
```{r eval=FALSE}
options(andromedaTempFolder = "c:/andromedaTemp")
```
This only applies to `Andromeda` objects that are created from that point onward. Prior objects will stay where they are.
# Querying data from an Andromeda object
`Andromeda` relies on `dbplyr`, a `dplyr` implementation, for querying the data. A key aspect of `dbplyr` is lazy execution. This means that we can define a query, but the query will not be executed until we explicitly request so using the `collect()` statement. For example, we may want to know the number of cars that can go faster than 10:
```{r eval=TRUE}
andr <- andromeda(cars = cars)
andr$cars %>%
filter(speed > 10) %>%
count() %>%
collect()
```
Here we first filter the table to those rows where `speed > 10`, after which we count the number of remaining records. This query is not executed until we call `collect()`. We can also have `Andromeda` call `collect` for us, by assigning the query to a table:
```{r eval=TRUE}
andr$fastCars <- andr$cars %>%
filter(speed > 10)
```
This way the query result does not have to pass through memory, but instead is directly stored in Andromeda.
## Simple meta-data
If we wish to know the tables that exist in an `Andromeda` object, we can call the generic `names` function. Similarly, we can call `colnames` to get the names of the columns in a table:
```{r eval=TRUE}
names(andr)
colnames(andr$cars)
```
## Using SQL
In the end, an `Andromeda` is still an `RSQLite` database, and can be queried using SQL:
```{r eval=TRUE}
RSQLite::dbGetQuery(andr, "SELECT * FROM cars LIMIT 5;")
```
However, for consistency it is recommended to use the `dplyr` functions instead.
## Dates and times
One limitation of SQLite - the technology underlying Andromeda - is that it does not support date and time formats. Dates and times are automatically converted to numeric values when adding them to an Andromeda table. Converting them back needs to be done manually:
```{r eval=TRUE}
myData <- data.frame(someTime = as.POSIXct(c("2000-01-01 10:00",
"2001-01-31 11:00",
"2004-12-31 12:00")),
someDate = as.Date(c("2000-01-01",
"2001-01-31",
"2004-12-31")))
andr$myData <- myData
andr$myData %>%
collect() %>%
mutate(someTime = restorePosixct(someTime),
someDate = restoreDate(someDate))
```
# Batch operations
Often we'll need to perform some action against the data that is not supported by `dplyr.` For example, we may want to write to data to a CSV file, or perform some complex computation. Since we cannot assume an entire table (or query result) will fit in memory, we must assume we should do this batch-wise. For this the `Andromeda` package provides two functions: `batchApply` and `groupApply`. The former executes a function on batches of predefined length. We can specify the batch size by setting the `batchSize` argument, which defaults to 100,000. Here is a silly example, where take the number of rows in a batch, and multiply it by some number:
```{r eval=TRUE}
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- batchApply(andr$cars, doSomething, multiplier = 2, batchSize = 10)
result <- unlist(result)
result
```
Alternatively, using `groupApply` we can execute a function on groups of rows, defined by the value in some variable in the table. In this example, we first filter to fast cars, and then perform the same meaningless computation as in the previous example, this time on groups of rows defined by having the same speed:
```{r eval=TRUE}
doSomething <- function(batch, multiplier) {
return(nrow(batch) * multiplier)
}
result <- groupApply(andr$cars %>% filter(speed > 10),
doSomething,
groupVariable = "speed",
multiplier = 2)
result <- unlist(result)
result
```
(For example, there were 2 rows where `speed = 11`, and multiplied by 2 this gives 4 for item 11.)
## Safe mode
For technical reasons it is not possible to write to the same `Andromeda` while reading from it. Writing to an `Andromeda` environment while inside a `batchApply` or `groupApply` on the that same Andromeda environment will therefore result in an error message. To avoid this, you can set the `safe` argument to `TRUE`. This will cause the table to first be copied to a temporary `Andromeda` before executing the function. However, this might not be very fast:
```{r eval=TRUE}
doSomething <- function(batch) {
batch$speedSquared <- batch$speed^2
if (is.null(andr$cars2)) {
andr$cars2 <- batch
} else {
appendToTable(andr$cars2, batch)
}
}
batchApply(andr$cars, doSomething, safe = TRUE)
```
Note that this only a restriction of `batchApply` and `groupApply`. We could have achieved the same task as the example above much faster using only `dplyr`:
```{r eval=TRUE}
andr$cars2 <-
andr$cars %>%
mutate(speedSquared = speed^2)
```
# Saving and loading Andromeda objects
To reuse an `Andromeda` at a later point in time, we can save it to a permanent location:
```{r eval=FALSE}
saveAndromeda(andr, "c:/temp/andromeda.zip")
```
```{r eval=TRUE, echo=FALSE}
writeLines("Disconnected Andromeda. This data object can no longer be used")
```
For technical reasons, saving an Andromeda object closes it. If we want to continue using the Andromeda object, we can set `maintainConnection` to `TRUE` when calling `saveAndromeda`. This causes a temporary copy to be created first, which is then saved and closed. Obviously this will take additional time, so if you know you will no longer need the object in R after saving, it is best not to use this option.
We can load the object again using:
```{r eval=FALSE}
andr <- loadAndromeda("c:/temp/andromeda.zip")
```
# Using Andromeda in your packages
Andromeda is intended to be used inside of other packages. Here are some tips:
## Import dplyr
Make sure `dplyr` is imported in the NAMESPACE. That way all `dplyr` functions can be used on the Andromeda objects in your functions.
## Import .data from rlang
If we reference variables in our `dplyr` function calls we should precede them with `.data$` to avoid R check warnings about using an unknown variable. For example, instead of
```{r eval=FALSE}
andr$cars %>%
filter(speed > 10)
```
we should write
```{r eval=FALSE}
andr$cars %>%
filter(.data$speed > 10)
```
to avoid R check warnings about `speed` being an unknown variable.
## Beware of variable name confusion
`dplyr` sometimes confuses variable names, so we have to help. For example, this code:
```{r eval=FALSE}
speed <- 10
andr$cars %>%
filter(.data$speed == speed)
```
will not actually filter anything, because the second `speed` variable in the filter statement is interpreted to refer to the `speed` field in the data, not the variable we defined earlier. One way to avoid this is by forcing early evaluation of the variable:
```{r eval=FALSE}
speed <- 10
andr$cars %>%
filter(.data$speed == !!speed)
```
| /scratch/gouwar.j/cran-all/cranData/Andromeda/vignettes/UsingAndromeda.Rmd |
#' Calculate the Relative Standard Error of a numeric vector
#'
#' @author Steven H. Ranney
#'
#' @description Calculates relative standard error of a vector of numbers.
#'
#' @return This function returns a single value that is the relative standard
#' error of a vector of numbers.
#'
#' @param x The numeric vector of numbers from which relative standard error
#' should be calculated.
#'
#' @details Relative standard error is returned as a proportion. It is sometimes
#' also referred to as "proportional standard error."
#'
#' @details Relative standard error is the standard error divided by the mean:
#' \deqn{Relative Standard Error = \frac{\frac{s}{\sqrt{n}}}{\bar{x}}}
#'
#' @references Malvestuto, S. P. 1996. Sampling the recreational creel. Pages
#' 591-623 in B. R. Murphy and D. W. Willis, editors. Fisheries techniques,
#' 2nd edition. American Fisheries Society, Bethesda, Maryland.
#'
#' @examples
#' calculate_rse(rnorm(100, 10, 3))
#'
#' @export
calculate_rse <- function(x){
if(is.null(x) | length(x) < 2){
stop("Value is either NULL or length(x) is < 2.")
} else {
std_error <- sd(x)/sqrt(length(x))
return(std_error/mean(x))
}
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/calculate_rse.R |
# Created 3/25/14
#' Conduct multiple simulations of a survey
#'
#' @author Steven H. Ranney
#'
#' @description This function uses \code{make_anglers} and \code{get_total_values}
#' to conduct multiple bus-route or traditional access point creel surveys (from
#' the number provided to the \code{n_sims} argument) of a population of anglers.
#'
#' @return Estimate catch (Ehat), the catch rate calculated by the ratio of means,
#' the true, observed catch, and the actual catch rate (mean_lambda).
#'
#' @param n_sims The number of simulations to be conducted in the simulation of interest.
#' @param ... Arguments to be passed to other subfunctions
#'
#' @details Because this function is merely a wrapper for the \code{\link{simulate_bus_route}}
#' code, the user still needs to set \code{start_time}, \code{wait_time},
#' \code{n_anglers}, \code{n_sites}, and \code{sampling_prob} as objects. These
#' can be passed through the \code{...} argument or through setting \code{wait_time}
#' and others outside of the function call itself.
#'
#' @seealso \code{\link{make_anglers}}
#' @seealso \code{\link{get_total_values}}
#' @seealso \code{\link{simulate_bus_route}}
#'
#' @examples
#'
#' #Simulation 1
#' start_time <- c(1, 3.5, 6.5)
#' wait_time <- c(2, 2, 3)
#' n_anglers <- c(10,10,50)
#' n_sites <- 3
#' sampling_prob <- sum(wait_time)/12
#' mean_catch_rate <- 3
#'
#' n_sims <- 10
#'
#' set.seed(256)
#'
#' conduct_multiple_surveys(n_sims = n_sims, start_time = start_time, wait_time = wait_time,
#' n_anglers = n_anglers, n_sites = n_sites,
#' sampling_prob = sampling_prob, mean_catch_rate = mean_catch_rate)
#'
#' #Simulation 2
#' start_time <- 0
#' wait_time <- 8
#' n_anglers <- 100
#' n_sites <- 1
#' sampling_prob <- 8/10
#' mean_catch_rate <- 2.5
#'
#' #One survey/week for a year
#' conduct_multiple_surveys(n_sims = 52, start_time, wait_time, n_anglers, n_sites, sampling_prob,
#' mean_catch_rate, fishing_day_length = 10)
#'
#' @export
conduct_multiple_surveys <- function(n_sims, ...){
bus_route <- as.data.frame(matrix(data = NA, ncol = 5, nrow = n_sims, byrow=T))
names(bus_route) <- c("Ehat", "catch_rate_ROM", "true_catch", "true_effort", "mean_lambda")
for(i in 1:nrow(bus_route)){
bus_route[i,] <- simulate_bus_route(...)
}
return(bus_route)
} | /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/conduct_multiple_surveys.R |
if(getRversion() >= "2.15.1") utils::globalVariables(c("true_effort", "Ehat",
"true_catch", "catch_rate_ROM"))
# Created: 1/10/15
#' Create a plot from a creel survey simulation
#'
#' @author Steven H. Ranney
#'
#' @description Generates a plot of either \code{Ehat} or \code{Ehat*catch_rate_ROM}
#' as a function of \code{true_effort} or \code{true_catch}, respectively. Adds
#' \code{link{lm()}} to the plot and returns the \code{link{summary()}} of the
#' fitted model.
#'
#' @param data The data frame from which to draw the \code{Ehat} and \code{true_effort}
#' values
#' @param value The value of interest from the simulation. Other values include
#' \code{"catch"}
#' @param color The color of the points in the plot, passed to \code{\link{ggplot}}.
#'
#' @examples
#'
#' start_time <- 0
#' wait_time <- 8
#' n_anglers <- 50
#' n_sites <- 1
#' sampling_prob <- wait_time/12
#' mean_catch_rate <- 10
#'
#' tmp <- conduct_multiple_surveys(91, start_time, wait_time, n_anglers, n_sites, sampling_prob,
#' mean_catch_rate, fishing_day_length = 12, mean_trip_length = 4)
#'
#' create_plot_from_simulation(tmp, "catch")
#'
#' @export
create_plot_from_simulation <- function(data, value = "effort", color = "black"){
if(value == "effort"){
mod <- lm(Ehat~true_effort, data = data)
g <-
data %>%
ggplot2::ggplot(aes(x = true_effort, y = Ehat)) +
ggplot2::geom_point(colour = color) +
ggplot2::labs(x = "Actual effort", y = "Estimated effort") +
ggplot2::geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
}
if(value == "catch"){
mod <- lm((Ehat*catch_rate_ROM)~true_catch, data = data)
g <-
data %>%
ggplot2::ggplot(aes(x = true_catch, y = Ehat*catch_rate_ROM)) +
ggplot2::geom_point(colour = color) +
ggplot2::labs(x = "Actual catch", y = "Estimated catch") +
ggplot2::geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
}
print(g)
return(summary(mod))
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/create_plot_from_simulation.R |
# Created: 12/19/13
#' Conduct a creel survey of a population of anglers at an access site.
#'
#' @author Steven H. Ranney
#'
#' @description This function uses the output from \code{make_anglers} to conduct
#' a bus-route or traditional access point creel survey of the population of anglers
#' from \code{make_anglers} and provide clerk-observed counts of anglers and their effort.
#'
#' @param data The dataframe returned from \code{\link{make_anglers}}
#'
#' @param start_time The start time of the clerk.
#'
#' @param end_time the end time of the clerk.
#'
#' @param wait_time the wait time of the clerk.
#'
#' @param sampling_prob The sampling probability of the survey. The default is
#' \code{1} but will need to be changed if the survey is conducted during only
#' half of the fishing day (i.e., \code{.5}) or over longer time periods (e.g.,
#' \code{9.5/12}, if the survey is 9.5 hours long and the fishing day length is 12 hours)
#'
#' @param mean_catch_rate The mean catch rate for the fishery.
#'
#' @param ... Arguments to be passed to other functions.
#'
#' @details Total effort is the sum of the trip lengths from \code{data}
#'
#' @details The total number of anglers is equal to the \code{nrow()} of the
#' dataframe in \code{data}
#'
#' @details Catch rates are assigned to anglers based upon the Gamma distribution
#' with a mean of \code{mean_catch_rate}
#'
#' @details If both \code{end_time=NULL} and \code{wait_time=NULL} then \code{wait_time}
#' will be 0.5 (one-half hour). If a value is passed to \code{end_time}, then
#' \code{wait_time} becomes \code{end_time - start_time}.
#'
#' @details If \code{start_time=NULL}, then a \code{start_time} is generated from the
#' uniform distribution between \code{0} and \code{11.5} hours into the fishing day.
#'
#' @details If \code{end_time=NULL}, then \code{end_time = start_time+wait_time}
#'
#' @details Incomplete trip effort is observed two ways: 1) by counting anglers
#' that were at the site for the entire time that the surveyor was at the site
#' and 2) counting anglers that arrived after the surveyor arrived at the site
#' but remained at the site after the surveyor left. These anglers are counted
#' and their effort calculated based upon surveyor \code{start_time} and \code{end_time}.
#'
#' @details Completed trip effort is observed two ways: 1) by interviewing anglers
#' that left while the surveyor was at the site. The surveyor can determine
#' effort and catch. 2) by interviewing anglers that both arrived and departed
#' while the surveyor was on site. When \code{wait_time} is short, these cases are
#' are rare; however, when \code{wait_time} is long (e.g., all day), then these
#' cases are much more likely.
#'
#' @details Trip lengths of observed trips (both incomplete and complete) are
#' scaled by the \code{sampling_prob} value. The \code{sampling_prob} is used to estimate
#' effort and catch.
#'
#' @references Pollock, K. H., C. M. Jones, and T. L. Brown. 1994. Angler survey
#' methods and their applications in fisheries management. American Fisheries
#' Society, Special Publication 25, Bethesda, Maryland.
#'
#' @examples
#' library(dplyr)
#' set.seed(256)
#'
#' start_time = .001 #start of fishing day
#' end_time = 12 #end of fishing day
#' mean_catch_rate = 0.1 #this will cause VERY few fish to be caught!
#'
#' make_anglers(100) %>%
#' get_total_values(start_time = start_time,
#' end_time = end_time, mean_catch_rate = mean_catch_rate)
#'
#' start_time = .001 #start of fishing day
#' end_time = 6 #halfway through the fishing day
#' sampling_prob = .5 #this needs to be .5 because we are sampling only 50% of the fishing day
#' mean_catch_rate = 0.1 #this will cause VERY few fish to be caught!
#'
#' make_anglers(100) %>%
#' get_total_values(start_time = start_time, end_time = end_time,
#' sampling_prob = sampling_prob, mean_catch_rate = mean_catch_rate)
#'
#' @export
get_total_values <- function(data, start_time = NULL, end_time = NULL,
wait_time = NULL, sampling_prob = 1,
mean_catch_rate = NULL, ...){
t_effort <- sum(data$trip_length)
n_anglers <- nrow(data)
lambda <- rgamma(n_anglers, mean_catch_rate)
#Calculate true total catch for all anglers
total_catch <- sum(data$trip_length * lambda)
data <-
data %>%
mutate(catch = data$trip_length * lambda)
#Provide a 'standard' wait time of .5 hours for the clerk
if(is.null(wait_time) & is.null(end_time)){
wait_time <- 0.5
}
if(!is.null(end_time)){
wait_time <- end_time - start_time
}
if(is.null(start_time)){
start_time <- runif(1, 0, 11.5)
}
# how long into the fishing day did the creel clerk arrive?
if(is.null(end_time)){
end_time <- start_time + wait_time # how long into the fishing day did the creel clerk depart?
}
################
#Effort of anglers that were onsite for the duration of the time that the clerk
# was onsite
#How many anglers were at the site the entire time the creel surveyor was there?
n_anglers_entire_time <- length(which(data$start_time <= start_time & data$departure_time >= end_time))
entire_time <- which(data$start_time <= start_time & data$departure_time >= end_time)
#how long were the anglers that arrived after the creel there before the clerk left?
if(n_anglers_entire_time > 0){
entire_time_sum_effort <- n_anglers_entire_time * (wait_time)
} else {
entire_time_sum_effort <- 0
}
################
#Effort of anglers that arrived after the clerk arrived and stayed beyond the
# clerk's wait time
#how many anglers arrived while the clerk was on site?
angler_arrivals <- length(which(data$start_time > start_time & data$start_time < end_time & data$departure_time > end_time))
arrivals <- which(data$start_time > start_time & data$start_time < end_time & data$departure_time > end_time)
#how long were the anglers that arrived after the creel there before the clerk left?
if(angler_arrivals > 0){
arrival_sum_effort <- sum(end_time - data$start_time[arrivals])
} else {
arrival_sum_effort <- 0
}
################
#Completed trip information; i.e., anglers that LEFT while the creel clerk
# was on site
#Did any anglers depart (complete their trips?) while the creel clerk was there
#OR did any anglers both arrive AND depart while the clerk was on site?
angler_departures <- length(which(data$start_time < start_time & (start_time < data$departure_time) & (data$departure_time < end_time)))
which_angler_departures <- which(data$start_time < start_time & (start_time < data$departure_time) & (data$departure_time < end_time))
arr_dep <- length(which(data$start_time > start_time & data$departure_time < end_time))
which_arr_dep <- which(data$start_time > start_time & data$departure_time < end_time)
completed_trips <- c(which(data$start_time < start_time & data$departure_time < end_time & data$departure_time > start_time),
which(data$start_time > start_time & data$departure_time < end_time))
if((angler_departures + arr_dep) > 0){
total_completed_trip_effort <- sum(data$trip_length[completed_trips]/sampling_prob)
total_completed_trip_catch <- sum(data$catch[completed_trips]/sampling_prob)
} else {
total_completed_trip_effort <- 0
total_completed_trip_catch <- 0
}
#Convert tripLengths
data$trip_length[entire_time] <- wait_time
data$trip_length[arrivals] <- end_time - data$start_time[arrivals]
data$trip_length[which_angler_departures] <- data$departure_time[which_angler_departures] - start_time
data$trip_length[which_arr_dep] <- data$departure_time[which_arr_dep] - data$start_time[which_arr_dep]
data$trip_length[completed_trips] <- data$departure_time[completed_trips] - data$start_time[completed_trips]
#Scale triplength based upon the sampling probability
data$trip_length_adj <- data$trip_length/sampling_prob
observed_trips <- data$trip_length_adj[c(entire_time, arrivals, which_angler_departures, which_arr_dep)]
n_observed_trips <- length(observed_trips)
total_observed_trip_effort <- sum(observed_trips)
data.frame(n_observed_trips = n_observed_trips,
total_observed_trip_effort = total_observed_trip_effort,
n_completed_trips = sum(angler_departures, arr_dep),
total_completed_trip_effort = total_completed_trip_effort,
total_completed_trip_catch = total_completed_trip_catch,
start_time = start_time,
wait_time = wait_time,
total_catch = total_catch,
true_effort = t_effort,
mean_lambda = mean(lambda)) %>%
return()
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/get_total_values.R |
# Created: 12/19/13
#' @title Create a population of anglers.
#'
#' @author Steven H. Ranney
#'
#' @description Creates a population of \code{n_anglers} with trip length and fishing day length provided by the user.
#'
#' @param n_anglers The number of anglers in the population
#'
#' @param mean_trip_length The mean trip length to be used in the function. \code{3.88}
#' is the default. The default is from data from the 2008 Lake Roosevelt Fishing
#' Evaluation Program.
#'
#' @param fishing_day_length The fishing day length to be used in the function.
#' Anglers are not be allowed to be fishing past this day length. The default here
#' is set to 12 hours, which may not be a suitable day length for fisheries at higher
#' latitudes (i.e., sunrise-sunset is > 12 hours) or during shorter seasons.
#'
#' @details All trip lengths will be limited so that anglers have finished their
#' fishing trip by the end of the fishing day. The function uses a \code{while}
#' loop to ensure that the number of anglers = \code{n_anglers} provided in the
#' function argument. \code{fishing_day_length} is passed to the argument. The
#' default is set to 12 hours.
#'
#' @details \code{starttimes} are assigned by the uniform distribution
#'
#' @details \code{triplengths} are assigned by the gamma distribution where the
#' default mean value comes from the 2008 Lake Roosevelt Fisheries Evaluation Program data.
#'
#' @return A data frame called that includes variables \code{start_time}, \code{trip_length},
#' and \code{departure_time}. Summing the \code{trip_length} field returns the true
#' fishing effort.
#'
#' @examples
#' make_anglers(100, mean_trip_length = 4, fishing_day_length = 10)
#' #make_anglers(10000)
#'
#' @export
make_anglers <- function(n_anglers = 100,
mean_trip_length = 3.88,
fishing_day_length = 12){
i=1
startTime=tripLength=departureTime=NULL
while(i <= n_anglers){
startTime.tmp <- c(runif(1, 0, fishing_day_length - 0.25))
tripLength.tmp <- rgamma(1, mean_trip_length, scale = 1)
departureTime.tmp <- startTime.tmp+tripLength.tmp
if(departureTime.tmp < fishing_day_length){
i=i+1
startTime <- c(startTime, startTime.tmp)
tripLength <- c(tripLength, tripLength.tmp)
departureTime <- c(departureTime, departureTime.tmp)
}
}
anglers <-
data.frame(start_time = startTime,
trip_length = tripLength,
departure_time = departureTime)
return(anglers)
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/make_anglers.R |
# Created 12/31/14
#' Plot multiple ggplots in one window
#'
#' @author Steven H. Ranney
#'
#' @description This function takes multiple \code{\link{ggplot}} objects and
#' puts them in one plot window.
#'
#' @param ... The plots to include in the window
#' @param plotlist If a \code{plotlist} exists, it should be included here
#' @param file A plot file
#' @param cols How many columns should the window have?
#' @param layout A matrix specifying the layout. If present, \code{cols} is ignored.
#'
#' @details If the layout is something like \code{matrix(c(1,2,3,3), nrow=2, byrow=TRUE)},
#' then plot 1 will go in the upper left, 2 will go in the upper right, and
#' 3 will go all the way across the bottom.
#'
#' @export
plot_multiple_objects <- function(..., plotlist=NULL, file, cols=1, layout=NULL){
#Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
} #end for loop
} # end else
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/plot_multiple_objects.R |
# Created: 12/19/13
#' Simulate a bus route survey
#'
#' @author Steven H. Ranney
#'
#' @description This function uses the output from \code{make_anglers} and
#' \code{get_total_values} to conduct a bus-route or traditional access point
#' creel survey of the population of anglers from \code{make_anglers} and
#' provide clerk-observed counts of anglers and their effort.
#'
#' @return Estimate catch (\code{Ehat}), the catch rate calculated by the ratio of means,
#' the true, observed catch, and the actual catch rate (mean_lambda).
#'
#' @param start_time The start time of the surveyor at each site. This can be a
#' vector of start times to simulate a bus route or one \code{startTime} to simulate
#' a traditional access survey.
#' @param wait_time The wait time of the surveyor at each site. This can be a
#' vector of wait times to simulate a bus route or one \code{waitTime} to simulate
#' a traditional access survey.
#' @param n_anglers the number of anglers at each site, either a vector or a single number
#' for single sites
#' @param n_sites The number of sites being visited.
#' @param sampling_prob What is the sampling probability for the survey? If all
#' sites will be visited during the first or second half of the fishing day,
#' \code{samplingProb=0.5}. If the survey will take the entire fishing day, then
#' \code{samplingProb=1}.
#' @param mean_catch_rate The mean catch rate for the fishery
#' @param ... Arguments to be passed to other subfunctions, specifically to the
#' \code{\link{make_anglers}} function, including \code{mean_trip_length} and
#' \code{fishing_day_length}.
#'
#' @seealso \code{\link{make_anglers}}
#' @seealso \code{\link{get_total_values}}
#'
#' @details Effort and catch are estimated from the the Bus Route Estimator
#' equation in Robson and Jones (1989), Jones and Robson (1991; eqn. 1) and Pollock
#' et al. 1994.
#'
#' @details Catch rate is calculated from the Ratio of Means equation (see
#' Malvestuto (1996) and Jones and Pollock (2012) for discussions).
#'
#' @references Jones, C. M., and D. Robson. 1991. Improving precision in angler
#' surveys: traditional access design versus bus route design. American Fisheries
#' Society Symposium 12:177-188.
#'
#' @references Jones, C. M., and K. H. Pollock. 2012. Recreational survey methods:
#' estimation of effort, harvest, and released catch. Pages 883-919 in A. V. Zale,
#' D. L. Parrish, and T. M. Sutton, editors. Fisheries Techniques, 3rd edition.
#' American Fisheries Society, Bethesda, Maryland.
#'
#' @references Malvestuto, S. P. 1996. Sampling the recreational creel. Pages
#' 591-623 in B. R. Murphy and D. W. Willis, editors. Fisheries techniques,
#' 2nd edition. American Fisheries Society, Bethesda, Maryland.
#'
#' @references Pollock, K. H., C. M. Jones, and T. L. Brown. 1994. Angler survey
#' methods and their applications in fisheries management. American Fisheries
#' Society, Special Publication 25, Bethesda, Maryland.
#'
#' @references Robson, D., and C. M. Jones. 1989. The theoretical basis of an
#' access site angler survey design. Biometrics 45:83-98.
#'
#' @details The Ratio of means is calculated by
#' \deqn{\widehat{R_1} = \frac{\sum\limits_{i=1}^n{c_i/n}}{\sum\limits_{i=1}^n{L_i/n}}}
#' where \emph{\eqn{c_i}} is the catch for the \emph{\eqn{i^{th}}} sampling unit
#' and \emph{\eqn{L_i}} is thelength of the fishing trip at the time of the
#' interview. For incomplete surveys, \emph{\eqn{L_i}} represents in incomplete
#' trip.
#'
#' @details The bus route estimator is
#' \deqn{\widehat{E} = T\sum\limits_{i=1}^n{\frac{1}{w_{i}}}\sum\limits_{j=1}^m{\frac{e_{ij}}{\pi_{j}}}}
#' where \emph{E} = estimated total party-hours of effort; \emph{T} = total time
#' to complete a full circuit of the route, including travelling and waiting;
#' \emph{\eqn{w_i}} = waiting time at the \emph{\eqn{i^{th}}} site
#' (where \emph{i} = 1, ..., \emph{n} sites); \emph{\eqn{e_{ij}}} =
#' total time that the \emph{\eqn{j^{th}}} car is parked at the \emph{\eqn{i^{th}}}
#' site while the agent is at that site (where \emph{j} = 1, ..., \emph{n} sites).
#'
#' @examples
#' # To simulate one bus route survey that takes place in the morning, these values are used
#' #start time at access sites
#' startTimeAM <- c(1, 2,3,4,5)
#' #wait time at access sites
#' waitTimeAM <- c(.5, .5, .5, .5, 2)
#' #the number of anglers that will visit access site throughout the day
#' nanglersAM <- c(10,10,10,10,50)
#' # the number of sites to be visited
#' nsitesAM <- 5
#' # the sampling probability. Here it is .5 because we are only conducting this
#' # survey during the first 50% of the fishing day
#' sampling_prob <- .5
#' # the mean catch rate. Here it is 2.5 which equals 2.5 fish/hour
#' mean_catch_rate <- 2.5
#'
#' simulate_bus_route(start_time = startTimeAM, wait_time = waitTimeAM, n_anglers = nanglersAM,
#' n_sites = nsitesAM, sampling_prob = sampling_prob, mean_catch_rate = mean_catch_rate)
#'
#' # To simulate one traditional access point survey where the creel clerk arrives,
#' # counts anglers, and interviews anglers that have completed their trips
#' start_time = 0.001
#' wait_time = 8
#' #nanglers can be informed by previously-collected data
#' n_anglers = 1000
#' n_sites = 1
#' # sampling probability here is 8/12 because we are staying at the access site
#' # for 8 hours of a 12-hour fishing day. To adjust the fishing day length, an
#' # additional 'fishing_day_length' argument needs to be passed to this function.
#' sampling_prob <- (8/12)
#' # the mean catch rate.
#' mean_catch_rate <- 5
#'
#' simulate_bus_route(start_time, wait_time, n_anglers, n_sites, sampling_prob, mean_catch_rate)
#'
#' @export
simulate_bus_route <- function(start_time, wait_time, n_anglers, n_sites,
sampling_prob = 1, mean_catch_rate, ... ){
#Create a dataFrame to fill with the results
dF <- as.data.frame(matrix(data = NA, nrow = n_sites, ncol = 10, byrow=TRUE))
names(dF) <- c("n_observed_trips", "total_observed_trip_effort",
"n_completed_trips", "total_completed_trip_effort",
"total_completed_trip_catch", "start_time", "wait_time",
"total_catch", "true_effort", "mean_lambda")
#Run make_anglers() and get_total_vaslues() iteratively for however many sites are
# provided in the n_sites argument
for(i in 1:nrow(dF)){
tmp <- make_anglers(n_anglers=n_anglers[i], ...)
dF[i,] <- get_total_values(data = tmp, t_effort = sum(tmp$triplength), n_anglers = length(tmp$start_time),
start_time = start_time[i], wait_time = wait_time[i],
end_time = NULL, sampling_prob, mean_catch_rate, ...)
}
bigT <- (start_time + wait_time)[length(start_time + wait_time)]-start_time[1]
#########
#Calculate estimated effort (Ehat) based upon the bigT equation
sum_effort <- apply(data.frame(dF$total_observed_trip_effort), 1, sum)
Ehat <- bigT*sum(1/dF$wait_time * sum_effort)
#Complete Effort
sum_completed_effort <- dF$total_completed_trip_effort
completed_effort <- bigT*sum(1/dF$wait_time * sum_completed_effort)
########
#Complete catch
#Calculate Catch based on the bigT equation
sum_completed_catch <- dF$total_completed_trip_catch
completed_catch <- bigT*sum(1/dF$wait_time * sum_completed_catch)
#Total ROM catchRate
catch_rate_ROM <- completed_catch/completed_effort
#trueTotalCatch
true_catch <- sum(dF$total_catch)
#totalTrueEffort
true_effort <- sum(dF$true_effort)
#meanLambda
mean_lambda <- mean(dF$mean_lambda)
data.frame(Ehat = Ehat,
catch_rate_ROM = catch_rate_ROM,
true_catch = true_catch,
true_effort = true_effort,
mean_lambda = mean_lambda) %>%
return()
}
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/R/simulate_bus_route.R |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(AnglerCreelSurveySimulation)
anglers <- make_anglers(n_anglers = 100, mean_trip_length = 3.5, fishing_day_length = 12)
## ------------------------------------------------------------------------
head(anglers)
## ----warning = FALSE-----------------------------------------------------
library(dplyr)
library(ggplot2)
# Histogram overlaid with kernel density curve
anglers %>%
ggplot(aes(x=trip_length)) +
geom_histogram(aes(y=..density..),
binwidth=.1,
colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666")
## ------------------------------------------------------------------------
anglers %>%
get_total_values(start_time = 0, wait_time = 8, sampling_prob = 8/12, mean_catch_rate = 2.5)
## ------------------------------------------------------------------------
sim <- simulate_bus_route(start_time = 0, wait_time = 8, n_sites = 1, n_anglers = 100,
sampling_prob = 8/12, mean_catch_rate = 2.5, fishing_day_length = 12)
sim
## ------------------------------------------------------------------------
sim <- conduct_multiple_surveys(n_sims = 20, start_time = 0, wait_time = 8, n_sites = 1,
n_anglers = 100, sampling_prob = 8/12,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
## ------------------------------------------------------------------------
mod <-
sim %>%
lm((Ehat * catch_rate_ROM) ~ true_catch, data = .)
summary(mod)
## ------------------------------------------------------------------------
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim <-
sim %>%
mutate(est_catch = Ehat * catch_rate_ROM)
sim %>%
ggplot(aes(x = true_catch, y = est_catch)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
## ------------------------------------------------------------------------
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
## ------------------------------------------------------------------------
start_time <- 0
wait_time <- 12
sampling_prob <- 1
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = 1, n_anglers = 100, sampling_prob = 1,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
## ----echo = FALSE--------------------------------------------------------
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
## ------------------------------------------------------------------------
start_time <- c(0, 4.5)
wait_time <- c(4, 3.5)
n_sites = 2
n_anglers <- c(50, 50)
fishing_day_length <- 12
sampling_prob <- sum(wait_time)/fishing_day_length
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = n_sites, n_anglers = n_anglers,
sampling_prob = sampling_prob, mean_catch_rate = 2.5,
fishing_day_length = fishing_day_length)
sim
## ----echo = FALSE--------------------------------------------------------
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
## ------------------------------------------------------------------------
#Weekend clerks
start_time_w <- 2
wait_time_w <- 10
n_sites <- 1
n_anglers_w <- 75
fishing_day_length <- 12
sampling_prob <- 8/12
sim_w <- conduct_multiple_surveys(n_sims = 8, start_time = start_time_w,
wait_time = wait_time_w, n_sites = n_sites,
n_anglers = n_anglers_w, sampling_prob = sampling_prob,
mean_catch_rate = 2.5, fishing_day_length = fishing_day_length)
sim_w
#Add the weekday survey and weekend surveys to the same data frame
mon_survey <-
sim_w %>%
bind_rows(sim)
mod <-
mon_survey %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
## ----echo = FALSE--------------------------------------------------------
mon_survey %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
| /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/inst/doc/creel_survey_simulation.R |
---
title: "Simulating Creel Surveys"
author: "Steven H. Ranney"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulating Creel Surveys}
%\VignetteEngine{knitr::rmarkdown}
\VignetteEncoding[utf8]{inputenc}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
#Introduction
Creel surveys allow fisheries scientists and managers to collect data on catch and harvest, an angler popuation (including effort expended), and, depending on survey design, biological data on fish populations. Though important methods of collecting data on the user base of the fishery, creel surveys are difficult to implement and, in graduate fisheries programs, creel surveys are paid little attention. As a result, fisheries managers--the first job for many fisheries-program graduates--often inherit old surveys or are told to institute new surveys with little knowledge of how to do so.
Fisheries can cover large spatial extents: large reservoirs, coast-lines, and river systems. A creel survey has to be statistically valid, adaptable to the geographic challenges of the fishery, and cost efficient. Limited budgets can prevent agencies from implementing creel surveys; the [AnglerCreelSurveySimulation](https://CRAN.R-project.org/package=AnglerCreelSurveySimulation) was designed to help managers explore the type of creel survey that is most appropriate for their fishery, including fisheries with multiple access points, access points that are more popular than others, variation in catch rate, the number of surveyors, and seasonal variation in day-lengths.
The `AnglerCreelSurveySimulation` package does require that users know something about their fishery and the human dimensions of that fishery. *A prior* knowledge includes mean trip length for a party (or individual), the mean catch rate of the
The `AnglerCreelSurveySimulation` package is simple, but powerful. Four functions provide the means for users to create a population of anglers, limit the length of the fishing day to any value, and provide a mean trip length for the population. Ultimately, the user only needs to know the final function `ConductMultipleSurveys` but because I'd rather this *not* be a *black box* of functions, this brief introduction will be a step-by-step process through the package.
##A walk-through of the package
This tutorial assumes that we have a very simple, small fishery with only one access point that, on any given day, is visited by 100 anglers. The fishing day length for our theoretical fishery is 12 hours (say, from 6 am to 6pm) and all anglers are required to have completed their trip by 6pm. Lastly, the mean trip length is known to be 3.5 hours.
*For the purposes of this package, all times are functions of the fishing day. In other words, if a fishing day length is 12 hours (e.g., from 6 am to 6pm) and an angler starts their trip at `2` and ends at `4` that means that they started their trip at 8 am and ended at 10 am.*
The `make_anglers()` function builds a population of anglers:
```{r}
library(AnglerCreelSurveySimulation)
anglers <- make_anglers(n_anglers = 100, mean_trip_length = 3.5, fishing_day_length = 12)
```
`make_anglers()` returns a dataframe with `start_time`, `trip_length`, and `departure_time` for all anglers.
```{r}
head(anglers)
```
In the `head(anglers)` statement, you can see that `starttime`, `triplength`, and `departureTime` are all available for each angler. The first angler started their trip roughly `r round(anglers[[1]][1], 2)` hours into the fishing day, continued to fish for `r round(anglers[[2]][1], 2)` hours, and left the access point at `r round(anglers[[3]][1], 2)` hours into the fishing day. Angler start times are assigned by the `uniform` distribution and trip lengths are assigned by the `gamma` distribution. To get true effort of all the anglers for this angler population, summing `trip_length` is all that's needed: `r sum(anglers$triplength)`.
The distribution of angler trip lengths can be easily visualized:
```{r warning = FALSE}
library(dplyr)
library(ggplot2)
# Histogram overlaid with kernel density curve
anglers %>%
ggplot(aes(x=trip_length)) +
geom_histogram(aes(y=..density..),
binwidth=.1,
colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666")
```
Once the population of anglers has been created, the next function to apply is the `get_total_values()` function. In `get_total_values()`, the user specifies the start time of the creel surveyor, the end time of the surveyor, and the wait time of the surveyor. Here is where the user also specifies the sampling probability of the anglers (in most cases, equal to $\frac{waitTime}{fishingDayLength}$) and the mean catch rate of the fishery. There are a number of a default settings in the `get_total_values()` function; see `?get_total_values` for a description of how the function handles `NULL` values for `startTime`, `endTime`, and `waitTime`. `startTime` and `waitTime` are the times that the surveyor started and waited at the access point. `totalCatch` and `trueEffort` are the total (or *real*) values for catch and effort. `meanLambda` is the mean catch rate for all anglers. Even though we assigned `meanCatchRate` to `get_total_values()`, individual mean catch rates are simulated by `rgamma()` with shape equal to `meanCatchRate` and rate equal to `1`.
For this walk through, we'll schedule the surveyor to work for a total of eight hours at the sole access point in our fishery:
```{r}
anglers %>%
get_total_values(start_time = 0, wait_time = 8, sampling_prob = 8/12, mean_catch_rate = 2.5)
```
`get_total_values()` returns a single row data frame with several columns. The output of `get_total_values()` is the catch and effort data observed by the surveyor during their wait at the accss point along with the "true" values for catch and effort. (Obviously, we can't simulate biological data but, if an agency's protocol directed the surveyor to collect biological data, that could be analyzed with other `R` functions.)
In the output from `get_total_values()`, `n_observed_trips` is the number of trips that the surveyor observed, including anglers that arrived after she started her day and anglers that were there for the duration of her time at the access point. `total_observed_trip_effort` is the effort expended by those parties; because the observed trips were not complete, she did not count their catch. `n_completed_trips` is the number of anglers that completed their trips while she was onsite, `total_completed_trip_effort` is the effort expended by those anglers, and `total_completed_trip_catch` is the number of fish caught by those parties. Catch is both the number of fish harvested and those caught and released.
### Estimating catch and effort
Effort and catch are estimated from the Bus Route Estimator:
$$
\widehat{E} = T\sum\limits_{i=1}^n{\frac{1}{w_{i}}}\sum\limits_{j=1}^m{\frac{e_{ij}}{\pi_{j}}}
$$
where
* *E* = estimated total party-hours of effort;
* *T* = total time to complete a full circuit of the route, including travelling and waiting;
* *w~i~* = waiting time at the *i^th^* site (where *i* = 1, ..., *n* sites);
and
* *e~ij~* = total time that the *j^th^* car (or trailer) is parked at the *i^th^* site while the agent is at that shite (where *j* = 1, ..., *n* sites).
Catch rate is calculated from the Ratio of Means equation:
$$
\widehat{R_1} = \frac{\sum\limits_{i=1}^n{c_i/n}}{\sum\limits_{i=1}^n{L_i/n}}
$$
where
* *c~i~* is the catch for the *i^th^* sampling unit
and
* *L~i~* is the length of the fishing trip at the tie of the interview.
For incomplete surveys, *L~i~* represents an incomplete trip.
`simulate_bus_route()` calculates effort and catch based upon these equations. See `?simulate_bus_route` for references that include a more detailed discussion of these equations.
`simulate_bus_route()` calls `make_anglers()` and `get_total_values()` so many of the same arguments we passed in the previous functions will need to be passed to `simulate_bus_route()`. The new arguent, `nsites`, is the number of sites visited by the surveyor. In more advanced simulations (see the examples in `?simulate_bus_route`), you can pass strings of values for `startTime`, `waitTime`, `nsites`, and `nanglers` to simulate a bus route-type survey rather than just a single access-point survey.
```{r}
sim <- simulate_bus_route(start_time = 0, wait_time = 8, n_sites = 1, n_anglers = 100,
sampling_prob = 8/12, mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
The output from `simulate_bus_route()` is a dataframe with values for `Ehat`, `catchRateROM` (the ratio of means catch rate), `trueCatch`, `trueEffort`, and `meanLambda`. `Ehat` is the estimated total effort from the Bus Route Estimator above and `catchRateROM` is catch rate estimated from the Ratio of Means equation. `trueCatch`, `trueEffort`, and `meanLambda` are the same as before. Multiplying `Ehat` by `catchRateROM` gives an estimate of total catch: `r sim[1]*sim[2]`.
###Conducting multiple simulations
With information about the fishery, the start and wait times of the surveyor, the sampling probability, mean catch rate, and fishing day length, we can run multiple simulations with `conduct_multiple_surveys()`. `conduct_multiple_surveys()` is a wrapper that calls the other three functions in turn and compiles the values into a data frame for easy plotting or analysis. The only additional argument needed is the `nsims` value which tells the function how many simulations to conduct. For the sake of this simple simulation, let's assume that the creel survery works five days a week for four weeks (i.e. 20 days):
```{r}
sim <- conduct_multiple_surveys(n_sims = 20, start_time = 0, wait_time = 8, n_sites = 1,
n_anglers = 100, sampling_prob = 8/12,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
With the output from multiple simulations, an analyst can evaluate how closely the creel survey they've designed mirrors reality. A `lm()` of estimated catch as a function of `trueCatch` can tell us if the survey will over or under estimate reality:
```{r}
mod <-
sim %>%
lm((Ehat * catch_rate_ROM) ~ true_catch, data = .)
summary(mod)
```
Plotting the data and the model provide a good visual means of evaluating how close our estimates are to reality:
```{r}
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim <-
sim %>%
mutate(est_catch = Ehat * catch_rate_ROM)
sim %>%
ggplot(aes(x = true_catch, y = est_catch)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
The closer the slope parameter estimate is to 1 and the intercept parameter estimate is to 0, the closer our estimate of catch is to reality.
We can create a model and plot of our effort estimates, too:
```{r}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Can we observe ALL trips?
If the start and wait time equals 0 and the length of the fishing day, respectively, the creel surveyor can observe all completed trips, though she'd likely be unhappy having to work 12 hours. The inputs have to be adjusted to allow her to arrive at time 0, stay for all 12 hours, and have a probability of 1.0 at catching everyone:
```{r}
start_time <- 0
wait_time <- 12
sampling_prob <- 1
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = 1, n_anglers = 100, sampling_prob = 1,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
```{r echo = FALSE}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Another simulation
If our hypothetical fishery suddenly gained another access point and the original 100 anglers were split between the two access points equally, what kind of information would a creel survey capture? We could ask our surveyor to split her eight-hour work day between both access points, but she'll have to drive for 0.5 hours to get from one to another. Of course, that 0.5 hour of drive time will be a part of her work day so she'll effectively have 7.5 hours to spend at access points counting anglers and collecting data.
```{r}
start_time <- c(0, 4.5)
wait_time <- c(4, 3.5)
n_sites = 2
n_anglers <- c(50, 50)
fishing_day_length <- 12
sampling_prob <- sum(wait_time)/fishing_day_length
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = n_sites, n_anglers = n_anglers,
sampling_prob = sampling_prob, mean_catch_rate = 2.5,
fishing_day_length = fishing_day_length)
sim
```
```{r echo = FALSE}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Even more simulations
Ultimately, the creel survey simulation can be as complicated as a creel survey. If a survey requires multiple clerks, several simulations can be coupled together to act as multiple surveryors. To accomodate weekends or holidays (i.e., increased angler pressure), additional simulations with different wait times and more anglers (to simulate higher pressure) can be built into the simulation. For example, if we know that angler pressure is 50% higher at the two access points on weekends, we can hire a second clerk to sample 8 hours a day on the weekends--one day at each access point--and add the weekend data to the weekday data.
```{r}
#Weekend clerks
start_time_w <- 2
wait_time_w <- 10
n_sites <- 1
n_anglers_w <- 75
fishing_day_length <- 12
sampling_prob <- 8/12
sim_w <- conduct_multiple_surveys(n_sims = 8, start_time = start_time_w,
wait_time = wait_time_w, n_sites = n_sites,
n_anglers = n_anglers_w, sampling_prob = sampling_prob,
mean_catch_rate = 2.5, fishing_day_length = fishing_day_length)
sim_w
#Add the weekday survey and weekend surveys to the same data frame
mon_survey <-
sim_w %>%
bind_rows(sim)
mod <-
mon_survey %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
```
```{r echo = FALSE}
mon_survey %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Choose your own advenure
Hopefully, this vignette has shown you how to build and simulate your own creel survey. It's flexible enough to estimate monthly or seasonal changes in fishing day length, changes in the mean catch rate, increased angler pressure on weekends, and any number of access sites, start times, wait times, and sampling probabilities. The output from `conduct_multiple_surveys()` allows the user to estiate variability in the catch and effort estimates (e.g., relative standard error) to evaluate the most efficient creel survey for *their* fishery. | /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/inst/doc/creel_survey_simulation.Rmd |
---
title: "Simulating Creel Surveys"
author: "Steven H. Ranney"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulating Creel Surveys}
%\VignetteEngine{knitr::rmarkdown}
\VignetteEncoding[utf8]{inputenc}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
#Introduction
Creel surveys allow fisheries scientists and managers to collect data on catch and harvest, an angler popuation (including effort expended), and, depending on survey design, biological data on fish populations. Though important methods of collecting data on the user base of the fishery, creel surveys are difficult to implement and, in graduate fisheries programs, creel surveys are paid little attention. As a result, fisheries managers--the first job for many fisheries-program graduates--often inherit old surveys or are told to institute new surveys with little knowledge of how to do so.
Fisheries can cover large spatial extents: large reservoirs, coast-lines, and river systems. A creel survey has to be statistically valid, adaptable to the geographic challenges of the fishery, and cost efficient. Limited budgets can prevent agencies from implementing creel surveys; the [AnglerCreelSurveySimulation](https://CRAN.R-project.org/package=AnglerCreelSurveySimulation) was designed to help managers explore the type of creel survey that is most appropriate for their fishery, including fisheries with multiple access points, access points that are more popular than others, variation in catch rate, the number of surveyors, and seasonal variation in day-lengths.
The `AnglerCreelSurveySimulation` package does require that users know something about their fishery and the human dimensions of that fishery. *A prior* knowledge includes mean trip length for a party (or individual), the mean catch rate of the
The `AnglerCreelSurveySimulation` package is simple, but powerful. Four functions provide the means for users to create a population of anglers, limit the length of the fishing day to any value, and provide a mean trip length for the population. Ultimately, the user only needs to know the final function `ConductMultipleSurveys` but because I'd rather this *not* be a *black box* of functions, this brief introduction will be a step-by-step process through the package.
##A walk-through of the package
This tutorial assumes that we have a very simple, small fishery with only one access point that, on any given day, is visited by 100 anglers. The fishing day length for our theoretical fishery is 12 hours (say, from 6 am to 6pm) and all anglers are required to have completed their trip by 6pm. Lastly, the mean trip length is known to be 3.5 hours.
*For the purposes of this package, all times are functions of the fishing day. In other words, if a fishing day length is 12 hours (e.g., from 6 am to 6pm) and an angler starts their trip at `2` and ends at `4` that means that they started their trip at 8 am and ended at 10 am.*
The `make_anglers()` function builds a population of anglers:
```{r}
library(AnglerCreelSurveySimulation)
anglers <- make_anglers(n_anglers = 100, mean_trip_length = 3.5, fishing_day_length = 12)
```
`make_anglers()` returns a dataframe with `start_time`, `trip_length`, and `departure_time` for all anglers.
```{r}
head(anglers)
```
In the `head(anglers)` statement, you can see that `starttime`, `triplength`, and `departureTime` are all available for each angler. The first angler started their trip roughly `r round(anglers[[1]][1], 2)` hours into the fishing day, continued to fish for `r round(anglers[[2]][1], 2)` hours, and left the access point at `r round(anglers[[3]][1], 2)` hours into the fishing day. Angler start times are assigned by the `uniform` distribution and trip lengths are assigned by the `gamma` distribution. To get true effort of all the anglers for this angler population, summing `trip_length` is all that's needed: `r sum(anglers$triplength)`.
The distribution of angler trip lengths can be easily visualized:
```{r warning = FALSE}
library(dplyr)
library(ggplot2)
# Histogram overlaid with kernel density curve
anglers %>%
ggplot(aes(x=trip_length)) +
geom_histogram(aes(y=..density..),
binwidth=.1,
colour="black", fill="white") +
geom_density(alpha=.2, fill="#FF6666")
```
Once the population of anglers has been created, the next function to apply is the `get_total_values()` function. In `get_total_values()`, the user specifies the start time of the creel surveyor, the end time of the surveyor, and the wait time of the surveyor. Here is where the user also specifies the sampling probability of the anglers (in most cases, equal to $\frac{waitTime}{fishingDayLength}$) and the mean catch rate of the fishery. There are a number of a default settings in the `get_total_values()` function; see `?get_total_values` for a description of how the function handles `NULL` values for `startTime`, `endTime`, and `waitTime`. `startTime` and `waitTime` are the times that the surveyor started and waited at the access point. `totalCatch` and `trueEffort` are the total (or *real*) values for catch and effort. `meanLambda` is the mean catch rate for all anglers. Even though we assigned `meanCatchRate` to `get_total_values()`, individual mean catch rates are simulated by `rgamma()` with shape equal to `meanCatchRate` and rate equal to `1`.
For this walk through, we'll schedule the surveyor to work for a total of eight hours at the sole access point in our fishery:
```{r}
anglers %>%
get_total_values(start_time = 0, wait_time = 8, sampling_prob = 8/12, mean_catch_rate = 2.5)
```
`get_total_values()` returns a single row data frame with several columns. The output of `get_total_values()` is the catch and effort data observed by the surveyor during their wait at the accss point along with the "true" values for catch and effort. (Obviously, we can't simulate biological data but, if an agency's protocol directed the surveyor to collect biological data, that could be analyzed with other `R` functions.)
In the output from `get_total_values()`, `n_observed_trips` is the number of trips that the surveyor observed, including anglers that arrived after she started her day and anglers that were there for the duration of her time at the access point. `total_observed_trip_effort` is the effort expended by those parties; because the observed trips were not complete, she did not count their catch. `n_completed_trips` is the number of anglers that completed their trips while she was onsite, `total_completed_trip_effort` is the effort expended by those anglers, and `total_completed_trip_catch` is the number of fish caught by those parties. Catch is both the number of fish harvested and those caught and released.
### Estimating catch and effort
Effort and catch are estimated from the Bus Route Estimator:
$$
\widehat{E} = T\sum\limits_{i=1}^n{\frac{1}{w_{i}}}\sum\limits_{j=1}^m{\frac{e_{ij}}{\pi_{j}}}
$$
where
* *E* = estimated total party-hours of effort;
* *T* = total time to complete a full circuit of the route, including travelling and waiting;
* *w~i~* = waiting time at the *i^th^* site (where *i* = 1, ..., *n* sites);
and
* *e~ij~* = total time that the *j^th^* car (or trailer) is parked at the *i^th^* site while the agent is at that shite (where *j* = 1, ..., *n* sites).
Catch rate is calculated from the Ratio of Means equation:
$$
\widehat{R_1} = \frac{\sum\limits_{i=1}^n{c_i/n}}{\sum\limits_{i=1}^n{L_i/n}}
$$
where
* *c~i~* is the catch for the *i^th^* sampling unit
and
* *L~i~* is the length of the fishing trip at the tie of the interview.
For incomplete surveys, *L~i~* represents an incomplete trip.
`simulate_bus_route()` calculates effort and catch based upon these equations. See `?simulate_bus_route` for references that include a more detailed discussion of these equations.
`simulate_bus_route()` calls `make_anglers()` and `get_total_values()` so many of the same arguments we passed in the previous functions will need to be passed to `simulate_bus_route()`. The new arguent, `nsites`, is the number of sites visited by the surveyor. In more advanced simulations (see the examples in `?simulate_bus_route`), you can pass strings of values for `startTime`, `waitTime`, `nsites`, and `nanglers` to simulate a bus route-type survey rather than just a single access-point survey.
```{r}
sim <- simulate_bus_route(start_time = 0, wait_time = 8, n_sites = 1, n_anglers = 100,
sampling_prob = 8/12, mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
The output from `simulate_bus_route()` is a dataframe with values for `Ehat`, `catchRateROM` (the ratio of means catch rate), `trueCatch`, `trueEffort`, and `meanLambda`. `Ehat` is the estimated total effort from the Bus Route Estimator above and `catchRateROM` is catch rate estimated from the Ratio of Means equation. `trueCatch`, `trueEffort`, and `meanLambda` are the same as before. Multiplying `Ehat` by `catchRateROM` gives an estimate of total catch: `r sim[1]*sim[2]`.
###Conducting multiple simulations
With information about the fishery, the start and wait times of the surveyor, the sampling probability, mean catch rate, and fishing day length, we can run multiple simulations with `conduct_multiple_surveys()`. `conduct_multiple_surveys()` is a wrapper that calls the other three functions in turn and compiles the values into a data frame for easy plotting or analysis. The only additional argument needed is the `nsims` value which tells the function how many simulations to conduct. For the sake of this simple simulation, let's assume that the creel survery works five days a week for four weeks (i.e. 20 days):
```{r}
sim <- conduct_multiple_surveys(n_sims = 20, start_time = 0, wait_time = 8, n_sites = 1,
n_anglers = 100, sampling_prob = 8/12,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
With the output from multiple simulations, an analyst can evaluate how closely the creel survey they've designed mirrors reality. A `lm()` of estimated catch as a function of `trueCatch` can tell us if the survey will over or under estimate reality:
```{r}
mod <-
sim %>%
lm((Ehat * catch_rate_ROM) ~ true_catch, data = .)
summary(mod)
```
Plotting the data and the model provide a good visual means of evaluating how close our estimates are to reality:
```{r}
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim <-
sim %>%
mutate(est_catch = Ehat * catch_rate_ROM)
sim %>%
ggplot(aes(x = true_catch, y = est_catch)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
The closer the slope parameter estimate is to 1 and the intercept parameter estimate is to 0, the closer our estimate of catch is to reality.
We can create a model and plot of our effort estimates, too:
```{r}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
#Create a new vector of the estimated effort multiplied by estimated catch rate
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Can we observe ALL trips?
If the start and wait time equals 0 and the length of the fishing day, respectively, the creel surveyor can observe all completed trips, though she'd likely be unhappy having to work 12 hours. The inputs have to be adjusted to allow her to arrive at time 0, stay for all 12 hours, and have a probability of 1.0 at catching everyone:
```{r}
start_time <- 0
wait_time <- 12
sampling_prob <- 1
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = 1, n_anglers = 100, sampling_prob = 1,
mean_catch_rate = 2.5, fishing_day_length = 12)
sim
```
```{r echo = FALSE}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Another simulation
If our hypothetical fishery suddenly gained another access point and the original 100 anglers were split between the two access points equally, what kind of information would a creel survey capture? We could ask our surveyor to split her eight-hour work day between both access points, but she'll have to drive for 0.5 hours to get from one to another. Of course, that 0.5 hour of drive time will be a part of her work day so she'll effectively have 7.5 hours to spend at access points counting anglers and collecting data.
```{r}
start_time <- c(0, 4.5)
wait_time <- c(4, 3.5)
n_sites = 2
n_anglers <- c(50, 50)
fishing_day_length <- 12
sampling_prob <- sum(wait_time)/fishing_day_length
sim <- conduct_multiple_surveys(n_sims = 20, start_time = start_time, wait_time = wait_time,
n_sites = n_sites, n_anglers = n_anglers,
sampling_prob = sampling_prob, mean_catch_rate = 2.5,
fishing_day_length = fishing_day_length)
sim
```
```{r echo = FALSE}
mod <-
sim %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
sim %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Even more simulations
Ultimately, the creel survey simulation can be as complicated as a creel survey. If a survey requires multiple clerks, several simulations can be coupled together to act as multiple surveryors. To accomodate weekends or holidays (i.e., increased angler pressure), additional simulations with different wait times and more anglers (to simulate higher pressure) can be built into the simulation. For example, if we know that angler pressure is 50% higher at the two access points on weekends, we can hire a second clerk to sample 8 hours a day on the weekends--one day at each access point--and add the weekend data to the weekday data.
```{r}
#Weekend clerks
start_time_w <- 2
wait_time_w <- 10
n_sites <- 1
n_anglers_w <- 75
fishing_day_length <- 12
sampling_prob <- 8/12
sim_w <- conduct_multiple_surveys(n_sims = 8, start_time = start_time_w,
wait_time = wait_time_w, n_sites = n_sites,
n_anglers = n_anglers_w, sampling_prob = sampling_prob,
mean_catch_rate = 2.5, fishing_day_length = fishing_day_length)
sim_w
#Add the weekday survey and weekend surveys to the same data frame
mon_survey <-
sim_w %>%
bind_rows(sim)
mod <-
mon_survey %>%
lm(Ehat ~ true_effort, data = .)
summary(mod)
```
```{r echo = FALSE}
mon_survey %>%
ggplot(aes(x = true_effort, y = Ehat)) +
geom_point() +
geom_abline(intercept = mod$coefficients[1], slope = mod$coefficients[2],
colour = "red", size = 1.01)
```
### Choose your own advenure
Hopefully, this vignette has shown you how to build and simulate your own creel survey. It's flexible enough to estimate monthly or seasonal changes in fishing day length, changes in the mean catch rate, increased angler pressure on weekends, and any number of access sites, start times, wait times, and sampling probabilities. The output from `conduct_multiple_surveys()` allows the user to estiate variability in the catch and effort estimates (e.g., relative standard error) to evaluate the most efficient creel survey for *their* fishery. | /scratch/gouwar.j/cran-all/cranData/AnglerCreelSurveySimulation/vignettes/creel_survey_simulation.Rmd |
#' @title Generate networks characterising habitat physical configurations
#' @description Generate undirected networks (weighted or unweighted, connected or disconnected) characterising the physical attributes and spatial organizations (or distributions) of habitat components (i.e. habitat configurations).
#'
#' @param N The number of nodes
#' @param L A side length of the rectangle landscape within which nodes are anchored
#' @param mu the critical \code{Dij} (i.e. Euclidean distance between node \code{i} and \code{j}) at which the link removing probability curve \code{P(Dij, mu, lamda)} transits from concave to convex (see \code{\link{ahn_prob}})
#' @param lamda the steepness of the link removing probability curve \code{P(Dij, mu, lamda)}, see \code{\link{ahn_prob}}
#' @param Connected \code{TRUE} for connected while \code{FALSE} ignores whether the networks are connected or not
#' @param Weighted \code{TRUE} for weighted while \code{FALSE} for unweighted networks
#' @param eta mediates the weight, i.e. \code{(Dij)^-eta}, of the link rewiring node \code{i} from one network component and node \code{j} from another network component (\code{i} and \code{j} are with an Euclidean distance of \code{Dij}) when the network becomes disconnected after removing links from the initial complete network with the probability \code{P(Dij, mu, lamda) = [1 + exp(-lamda(Dij - mu))]^-1} when both \code{Connected = TRUE} and \code{Weighted = TRUE}
#' @param A The area of the rectangle landscape within which the network is defined
#' @param X A vector of \code{X} coordinates for the \code{N} nodes (sampled from \code{[0, L]} uniformly at random if \code{NULL})
#' @param Y A vector of \code{Y} coordinates for the \code{N} nodes (sampled from \code{[0, A/L]} uniformly at random if \code{NULL})
#' @param U A vector with \code{N} elements specifying node attributes (qualitative or quantitive), by default \code{NULL}
#' @param V A vector with \code{N} elements specifying node attributes (qualitative or quantitive), by default \code{NULL}
#'
#' @importFrom stats runif dist rnorm
#' @import igraph
#' @export
#' @return Return an animal habitat network (an \code{igraph} object)
#' @examples
#' # generate a connected and weighted network
#' ahn_gen(N = 10, L = 5, mu = 1, lamda = 5)
#'
#'\donttest{
#'
#' N <- 10
#' x <- runif(N, 0, 5)
#' ql <- sample(LETTERS, N, replace = TRUE)
#' qn <- sample(1:20, N, replace = TRUE)
#'
#' # specify the X coordinates, node attributes U and V for a connected and unweighted network
#' ahn_gen(N, L = 5, mu = 1, lamda = 5, Weighted = FALSE, X = x, U = ql, V = qn)
#'
#' # specify the Y coordinates, node attributes U and V for a weighted network, no matter if the
#' # network will be connected or not
#' ahn_gen(N, L = 5, mu = 1, lamda = 5, Weighted = TRUE, Connected = FALSE, Y = x, U = ql, V = qn)
#'
#'}
#'
ahn_gen <- function(N, L, mu, lamda, Connected = TRUE, Weighted = TRUE, eta = 1, A = 25, X = NULL, Y = NULL, U = NULL, V = NULL){
ifelse(is.null(X), x <- runif(N, 0, L), ifelse(max(X) > L || min(X) < 0 || length(X) != N, stop('Wrong X coordinate(s)!'), x <- X))
ifelse(is.null(Y), y <- runif(N, 0, A/L), ifelse(max(Y) > A/L || min(Y) < 0 || length(Y) != N, stop('Wrong Y coordinate(s)!'), y <- Y))
xy_coords <- data.frame(x = x, y = y)
dm <- as.matrix(dist(xy_coords), method = 'euclidean', diag = FALSE)
dm_0 <- 1/dm
dm_0[is.infinite(dm_0)] <- 0
ahn_wei_matrix <- dm_0
ahn_wei_matrix[lower.tri(ahn_wei_matrix, diag = TRUE)] <- NA
tr <- which(!is.na(ahn_wei_matrix))
prob <- 1/(1 + exp(-lamda*(dm[tr] - mu)))
for(u in 1:length(tr)){
if(sample(c(1, 0), size = 1, prob = c(prob[u], 1 - prob[u]))){
ahn_wei_matrix[tr][u] <- 0
}
}
ahn_wei_matrix[lower.tri(ahn_wei_matrix)] <- t(ahn_wei_matrix)[lower.tri(ahn_wei_matrix)]
if(Weighted){
ahn <- graph_from_adjacency_matrix(ahn_wei_matrix, mode = 'undirected', diag = FALSE, weighted = TRUE)
} else{
ahn_wei_matrix[ahn_wei_matrix > 0] <- 1
ahn <- graph_from_adjacency_matrix(ahn_wei_matrix, mode = 'undirected', diag = FALSE, weighted = NULL)
}
if(!is.connected(ahn) && Connected){
memb <- unname(components(ahn)$membership)
ncomp <- max(memb)
while(ncomp > 1){
r_memb <- sample(memb, 1)
temp <- dm_0[which(memb == r_memb), which(memb != r_memb), drop = FALSE]
rn <- as.numeric(rownames(temp)[which(temp == max(temp), arr.ind = T)[1]])
cn <- as.numeric(colnames(temp)[which(temp == max(temp), arr.ind = T)[2]])
if(Weighted){
ahn_wei_matrix[rn, cn] <- (dm_0[rn, cn])^eta
ahn_wei_matrix[cn, rn] <- (dm_0[rn, cn])^eta
ahn <- graph_from_adjacency_matrix(ahn_wei_matrix, mode = 'undirected', diag = FALSE, weighted = TRUE)
} else{
ahn_wei_matrix[rn, cn] <- 1
ahn_wei_matrix[cn, rn] <- 1
ahn_wei_matrix[ahn_wei_matrix > 0] <- 1
ahn <- graph_from_adjacency_matrix(ahn_wei_matrix, mode = 'undirected', diag = FALSE, weighted = NULL)
}
memb <- unname(components(ahn)$membership)
ncomp <- max(memb)
}
}
if(!is.null(U)){vertex_attr(ahn, name = 'U') <- U}
if(!is.null(V)){vertex_attr(ahn, name = 'V') <- V}
vertex_attr(ahn, name = 'X') <- xy_coords$x
vertex_attr(ahn, name = 'Y') <- xy_coords$y
return(ahn)
}
| /scratch/gouwar.j/cran-all/cranData/AnimalHabitatNetwork/R/ahn_gen.R |
#' @title Plot networks
#' @description Visualise networks generated by the function \code{\link{ahn_gen}}.
#' @param ahn Networks returned by \code{\link{ahn_gen}}
#' @param NodeLabels The labels of nodes in \code{ahn} (node IDs by default)
#' @param NodeColors The colors of nodes in \code{ahn} (each node has a unique color by default)
#' @param NodeSizes The sizes of nodes in \code{ahn} (nodes are with the identical size of 3 by default)
#'
#' @import ggplot2
#' @export
#' @return Return a plot of the network
#' @examples
#' # generate a weighted and connected network and plot it by default
#' N <- 10
#' x <- runif(N, 0, 5)
#' ahn <- ahn_gen(N, L = 5, mu = 1, lamda = 5, X = x)
#' ahn_plot(ahn)
#'
#' \donttest{
#'
#' # plot the network with specified colors, labels and sizes for nodes
#' ahn_plot(
#' ahn,
#' NodeColors = sample(4, N, replace = TRUE),
#' NodeLabels = letters[1:N],
#' NodeSizes = seq(1, 5, length.out = N))
#'
#' }
#'
ahn_plot <- function(ahn,
NodeLabels = unname(V(ahn)),
NodeColors = unname(V(ahn)),
NodeSizes = rep(3, length(V(ahn)))){
if(is.weighted(ahn)){
dm <- as_adjacency_matrix(ahn, attr = "weight", sparse = F)
} else{
dm <- as_adjacency_matrix(ahn, sparse = F)
dm[dm > 0] <- 1
}
xy <- data.frame(x = vertex_attr(ahn, name = 'X'), y = vertex_attr(ahn, name = 'Y'))
el_temp <- which(dm != 0, arr.ind = T)
coor_dis <- data.frame()
for (u in 1:nrow(el_temp)){
x0 = xy$x[el_temp[u, 1]]
y0 = xy$y[el_temp[u, 1]]
x1 = xy$x[el_temp[u, 2]]
y1 = xy$y[el_temp[u, 2]]
coor_dis <- rbind(coor_dis, data.frame(x0, y0, x1, y1, dis = dm[el_temp[u, ][1], el_temp[u, ][2]]))
}
return(ggplot() +
geom_segment(aes(x = coor_dis[, 1], y = coor_dis[, 2], xend = coor_dis[, 3], yend = coor_dis[, 4], alpha = 0.999),
colour = "black", lineend = "round", linejoin = "round", size = sqrt(coor_dis[, 5])) +
geom_point(aes(x = xy$x, y = xy$y, colour = as.factor(NodeColors), alpha = 1), size = NodeSizes, shape = 16, data = xy) +
geom_point(aes(x = xy$x, y = xy$y, colour = as.factor(NodeColors), alpha = 1), size = NodeSizes + 1, shape = 1, data = xy) +
geom_text(aes(x = xy$x, y = xy$y, label = as.character(NodeLabels)), size = 1.8, hjust = 0.5, vjust = 0.5, data = xy) +
ylab("Y") +
xlab("X") +
coord_fixed() +
scale_x_continuous(breaks = seq(0, round(max(xy$x), digits = 1), length.out = 3)) +
scale_y_continuous(breaks = seq(0, round(max(xy$y), digits = 1), length.out = 3)) +
theme(legend.position = "none",
panel.border = element_rect(fill = NA, color = "black", size = 0.5),
panel.grid.major = element_line(color = "black", size = .015),
panel.grid.minor = element_line(color = "black", size = .015),
panel.background = element_rect(fill = "transparent",colour = NA),
plot.background = element_rect(fill = "transparent",colour = NA),
axis.title.x = element_text(color = "black", size = 10),
axis.title.y = element_text(color = "black", size = 10),
axis.text.x = element_text(color = "black", size = 10),
axis.text.y = element_text(color = "black", size = 10),
axis.ticks = element_line(color = "black", size = .2)))
}
| /scratch/gouwar.j/cran-all/cranData/AnimalHabitatNetwork/R/ahn_plot.R |
#' @title Plot probability curves
#' @description Plot the probability curve \code{P(Dij, mu, lamda)} for removing links from the initial complete network
#' @param Dij A vector of Euclidean distances between node \code{i} and \code{j}
#' @param mu The concave-to-convex transition point of the probability curves \code{P(Dij, mu, lamda) = [1 + exp(-lamda(Dij - mu))]^-1}, where \code{Dij} is the Euclidean distance between node \code{i} and \code{j}
#' @param lamda The steepness of the probability curves
#'
#' @export
#' @return Return a plot with probability curves
#' @examples
#' # plot the probabilities for removing network links between node i and j with
#' # Euclidean distances Dij
#'
#' dis <- seq(.05, 10, length.out = 20)
#' m <- c(.1, 2, 5, 10)
#' l <- c(.0001, .15, .35, .75, 1.25, 5, 30)
#' ahn_prob(dis, m, l)
#'
ahn_prob <- function(Dij = seq(.05, 10, length.out = 30), mu = c(.1, 2, 5, 10), lamda = c(.0001, .15, .35, .75, 1.25, 5, 30)){
df <- data.frame()
for(u in 1:length(Dij)){
for(v in 1:length(mu)){
for(w in 1:length(lamda)){
t <- data.frame(Dij = Dij[u],
mu = paste('mu = ', as.character(mu[v]), sep = ''),
lamda = as.character(lamda[w]),
Prob = 1/(1 + exp(-lamda[w]*(Dij[u] - mu[v]))))
df <- rbind(df, t)
}
}
}
return(ggplot(data = df, mapping = aes(x = df$Dij, y = df$Prob, color = lamda)) +
geom_line() +
geom_point(size = 0.5) +
facet_wrap(facets = vars(mu)) +
ylab('Probability of removing the link between node i and j') +
xlab('Euclidean distance between node i and j') +
theme(panel.border = element_rect(fill = NA, color = "black", size = 0.5),
panel.grid.major = element_line(color = "black", size = .015),
panel.grid.minor = element_line(color = "black", size = .015),
panel.background = element_rect(fill = "transparent",colour = NA),
plot.background = element_rect(fill = "transparent",colour = NA),
axis.title.x = element_text(color = "black", size = 10),
axis.title.y = element_text(color = "black", size = 10),
axis.text.x = element_text(color = "black", size = 10),
axis.text.y = element_text(color = "black", size = 10),
axis.ticks = element_line(color = "black", size = .2)))
}
| /scratch/gouwar.j/cran-all/cranData/AnimalHabitatNetwork/R/ahn_prob.R |
##' Annotate gene IDs according to GTF files in gencode
##'
##' annoGene will return a data.frame of gene information or write them to a file (csv or html format).
##' The user should set a list of genes to be annotated, with "ENSEMBL" or "SYMBOL" style.
##'
##' @param IDs a list of genes
##' @param ID_type the type of input IDs, should be "ENSEMBL" or "SYMBOL"
##' @param species choose human or mouse, or rat, default: human
##' @param out_file the filename, should be ".csv" or ".html".
##' @importFrom DT datatable saveWidget
##' @importFrom methods hasArg
##' @importFrom utils write.csv
##' @return a dataframe which columns contain genesymbol, biotypes, ensembl ids and the positions of genes
##' @examples
##' IDs <- c("DDX11L1", "MIR6859-1", "OR4G4P", "OR4F5")
##' ID_type = "SYMBOL"
##' annoGene(IDs, ID_type)
##' \donttest{
##' annoGene(IDs, ID_type,out_file = tempfile(fileext = ".html"))
##' annoGene(IDs, ID_type,out_file = tempfile(fileext = ".csv"))
##' }
##' @export
annoGene <- function(IDs,ID_type,species='human',out_file){
if(length(unique(IDs))<1){
stop("You should give me some genes to be annotated!!!")
}
if(!ID_type %in% c("ENSEMBL" ,"SYMBOL")){
stop("We only accept ENSEMBL or SYMBOL !!!")
}
if(species=='human'){
GTF <- humanGTF
}else if(species=='mouse'){
GTF <- mouseGTF
}else if(species=='rat'){
GTF <- ratGTF
}else{
stop("We only accept human or mouse, or rat, ")
}
res <- GTF[eval(parse(text=paste0("GTF$",ID_type))) %in% IDs, ]
missIds <- IDs[!(IDs %in% eval(parse(text=paste0("res$",ID_type))))]
missIdsPercentage = round((length(missIds)/length(IDs))*100,2)
if(length(missIds)!=0){
warning(
paste0(missIdsPercentage ,"% of input IDs are fail to annotate... ")
# example: 5.29% of input gene IDs are fail to map...
)
}
if (hasArg(out_file)) {
results=res
if(grepl('.html$',out_file)){
Ensembl_prefix <- "https://asia.ensembl.org/Homo_sapiens/Gene/Summary?g="
href = paste0(Ensembl_prefix, results$ENSEMBL)
results$ENSEMBL = paste0("<b><a target=\"_black\" href=", shQuote(href), ">", results$ENSEMBL, "</a></b>")
symbol_prefix <- "http://www.ncbi.nlm.nih.gov/gene?term="
href = paste0(symbol_prefix, results$SYMBOL)
results$SYMBOL = paste0("<b><a target=\"_black\" href=", shQuote(href), ">", results$SYMBOL, "</a></b>")
y <- DT::datatable(results, escape = F, rownames = F)
DT::saveWidget(y,file = out_file)
}else if(grepl('.csv$',out_file)){
write.csv(results,file =out_file )
}else{
stop("We only accept csv or html format !!!")
}
}
return(res)
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/annoGene.R |
#' @title Check a list of genes how they show difference.
#'
#' @description How does a gene or a list of genes show difference between two group.
#' The boxplot or heatmap will be drawed.
#' just a wrap function of ggpubr and pheatmap.
#' @param gene A vector contains all gene ids of interest. Gene ids should
#' be gene symbol.
#' @param genes_expr An expression matrix, the rownames should be gene symbol.
#' @param group_list A vector contains the group information of each samples in expression matrix
#' @export
#' @importFrom ggpubr ggboxplot
#' @importFrom pheatmap pheatmap
#' @return A figure : boxplot or heatmap
#' @examples
#' attach(GSE95166)
#' check_diff_genes('LRCH3',genes_expr,group_list )
#' \donttest{
#' x=DEG$logFC
#' names(x)=rownames(DEG)
#' cg=c(names(head(sort(x),100)), names(tail(sort(x),100)))
#' check_diff_genes(cg,genes_expr,group_list )
#' }
check_diff_genes <- function(gene,genes_expr,group_list ){
if(length(gene)==1){
if(! gene %in% rownames(genes_expr)){
stop(paste0(gene,' in not in your expression matrix'))
}
df=data.frame(value=as.numeric(genes_expr[gene,]),
group=group_list)
ggpubr::ggboxplot(df, "group", "value",
color = "group", palette =c("#00AFBB", "#E7B800"),
add = "jitter", shape = "group")
}else{
cg=gene
cg=cg[cg %in% rownames(genes_expr) ]
warning(paste0('Only ',length(cg),' in ',length(gene),' genes are in your expression matrix'))
if(length(cg)<1){
stop('None of the gene in your expression matrix')
}
n=t(scale(t(genes_expr[cg,])))
n[n>2]=2
n[n< -2]= -2
n[1:4,1:4]
ac=data.frame(group_list=group_list)
rownames(ac)=colnames(n)
pheatmap::pheatmap(n,show_colnames =F,show_rownames = F,
annotation_col=ac)
}
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/check_diff_genes.R |
#' An example dataset
#'
#' A dataset containing eSet, probes_expr, probe2gene, genes_expr, group_list, DEG
#'
#' @format A list with 6 elements:
#' \describe{
#' \item{probes_expr}{probes_expr, probes_expr}
#' \item{probe2gene}{probe2gene, probe2gene}
#' \item{genes_expr}{genes_expr, genes_expr}
#' \item{group_list}{group_list, group_list}
#' \item{DEG}{DEG, DEG}
#' ...
#' }
"GSE95166"
#' An example dataset
#'
#' A dataset containing genes_expr, group_list, DEG
#'
#' @format A list with 6 elements:
#' \describe{
#' \item{genes_expr}{genes_expr, genes_expr}
#' \item{DEG}{DEG, DEG}
#' ...
#' }
"GSE27533"
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/data.R |
##' draw a heatmap for DEG result
##'
##' \code{deg_heatmap} will draw a heatmap for you.
##'
##' @param deg the result from limma.
##' @param genes_expr the expression matrix
##' @param group_list, a vector
##' @param topn the number of genes in heatmap, default:20
##' @import ggplot2
##' @importFrom pheatmap pheatmap
##' @importFrom utils head tail
##' @return a ggplot2 style figure.
##' @examples
##' attach(GSE27533)
##' deg_heatmap(DEG,genes_expr,group_list)
##' @export
deg_heatmap <- function(deg,genes_expr,group_list,topn=20){
x=deg[,1]
names(x)=rownames(deg)
cg=c(names(head(sort(x),topn)),
names(tail(sort(x),topn)))
n=t(scale(t(genes_expr[cg,])))
n[n>2]=2
n[n< -2]= -2
n[1:4,1:4]
ac=data.frame(group_list=group_list)
rownames(ac)=colnames(n)
pheatmap(n,show_colnames =F,show_rownames = T,
annotation_col=ac)
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/deg_heatmap.R |
##' draw a volcano for DEG result
##'
##' \code{deg_volcano} will draw a volcano for you.
##'
##' @param need_deg should be 3 columns : gene, logFC, p.value(or p.adjust
##' @param style you can try 1 or 2, default: 1
##' @param p_thred default:0.05
##' @param logFC_thred default:1
##' @importFrom ggplot2 ggplot aes geom_point theme_set theme_bw xlab ylab ggtitle theme element_text scale_colour_manual
##' @importFrom ggpubr ggscatter
##' @importFrom utils head
##' @export
##' @return a ggplot2 style figure.
##' @examples
##' deg=GSE27533$DEG
##' need_deg=data.frame(symbols=rownames(deg), logFC=deg$logFC, p=deg$P.Value)
##' deg_volcano(need_deg,2)
##' \donttest{
##' deg_volcano(need_deg,1)
##' }
deg_volcano <- function(need_deg,style=1,p_thred=0.05,logFC_thred=1){
# need_deg should be 3 columns : gene, logFC, p.value(or p.adjust)
colnames(need_deg)=c('gene','logFC','p')
if(!(is.numeric(need_deg$logFC) & is.numeric(need_deg$p))){
stop('we only need a data.frame which should be 3 columns : gene, logFC, p.value(or p.adjust)')
}
if(style==1){
if(! logFC_thred){
logFC_thred <- with(need_deg,mean(abs( logFC)) + 2*sd(abs( logFC)) )
}
# logFC_thred=1
need_deg$change = as.factor(ifelse(need_deg$p < p_thred & abs(need_deg$logFC) > logFC_thred,
ifelse(need_deg$logFC > logFC_thred ,'UP','DOWN'),'NOT')
)
this_tile <- paste0('Cutoff for logFC is ',round(logFC_thred,3),
'\nThe number of up gene is ',nrow(need_deg[need_deg$change =='UP',]) ,
'\nThe number of down gene is ',nrow(need_deg[need_deg$change =='DOWN',])
)
# message(this_tile)
g = ggplot(data=need_deg,
aes(x=logFC, y=-log10(p),
color=change)) +
geom_point(alpha=0.4, size=1.75) +
theme_set(theme_set(theme_bw(base_size=20)))+
xlab("log2 fold change") + ylab("-log10 p-value") +
ggtitle( this_tile ) + theme(plot.title = element_text(size=15,hjust = 0.5))+
scale_colour_manual(values = c('blue','black','red')) ## corresponding to the levels(res$change)
return(g)
}
if(style==2){
# p_thred=0.05;logFC_thred=1
need_deg$g=ifelse(need_deg$p > p_thred,'stable',
ifelse( need_deg$logFC > logFC_thred,'up',
ifelse( need_deg$logFC < -logFC_thred,'down','stable') )
)
need_deg$p = -log10( need_deg$p)
# message(table(need_deg$g))
p=ggscatter(need_deg, x = "logFC", y = "p", color = "g",size = 0.5,
label = "gene", repel = T,
label.select =head(need_deg$gene),
palette = c("#00AFBB", "#E7B800", "#FC4E07") )
return(p)
}
## TODO:
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/deg_volcano.R |
##' Filter expression matrix based on annotation
##'
##' \code{filterEM} will annotate the probes in expression matrix and remove the duplicated gene symbols.
##' because there will be many probes mapped to same genes, we will only keep the max value one.
##' @param probes_expr is an expression matrix which rownames are probes of probe2gene and each column is a sample
##' @param probe2gene the first column is probes and the second column is corresponding gene symbols
##' @return a expression matrix which has been filtered duplicated gene symbols
##' @importFrom utils head
##' @importFrom stats na.omit median
##' @examples
##' attach(GSE95166)
##' # head(probes_expr)
##' # head(probe2gene)
##' genes_expr <- filterEM(probes_expr,probe2gene)
##' # head(genes_expr)
##' @export
filterEM <- function(probes_expr,probe2gene){
colnames(probe2gene) <- c("probeid","symbol")
probe2gene$probeid=as.character(probe2gene$probeid)
probe2gene$symbol=trimws(probe2gene$symbol)
# head(probe2gene)
message(paste0('input expression matrix is ',nrow(probes_expr),' rows(genes or probes) and ',ncol(probes_expr),' columns(samples).\n'))
message(paste0('input probe2gene is ',nrow(probe2gene),' rows(genes or probes)\n'))
probe2gene=na.omit(probe2gene)
# if one probe mapped to many genes, we will only keep one randomly.
probe2gene=probe2gene[!duplicated(probe2gene$probeid),]
# 这个地方是有问题的,随机挑选一个注释进行后续分析。
probe2gene = probe2gene[probe2gene$probeid %in% rownames(probes_expr),]
message(paste0('after remove NA or useless probes for probe2gene, ',nrow(probe2gene),' rows(genes or probes) left\n'))
#probes_expr <- exprs(eSet);dim(probes_expr)
probes_expr <- as.data.frame(probes_expr)
message(paste0('There are ',
sum(rownames(probes_expr) %in% probe2gene$probeid),
' of ',nrow(probes_expr),' probes can be annotated.\n'))
probes_expr=probes_expr[as.character(probe2gene$probeid),]
# probes_expr[1:4,1:4]
probe2gene$median=apply(probes_expr,1,median)
probe2gene=probe2gene[order(probe2gene$symbol,probe2gene$median,decreasing = T),]
probe2gene=probe2gene[!duplicated(probe2gene$symbol),]
genes_expr=probes_expr[as.character(probe2gene$probeid),]
rownames(genes_expr)=probe2gene$symbol
# genes_expr[1:4,1:4]
message(paste0('output expression matrix is ',nrow(genes_expr),' rows(genes or probes) and ',ncol(genes_expr),' columns(samples).'))
# probes_expr['AGAP6',]
return(genes_expr)
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/filterEM.R |
##' Download expression dataset by GSE id
##'
##' \code{geoChina} will download the expression matrix and phenotype data as ExpressionSet format
##' from cloud in mainland China,
##' it's a alternative method for getGEO function from GEOquery package.
##' geoChina('gse1009') is the same as eSet=getGEO('gse1009', getGPL = F)
##'
##' @param gse input GSE id, such as GSE1009, GSE2546, gse1009.
##' @param mirror "tencent" only for now.
##' @param destdir The destination directory for data downloads.
##' @return a list of ExpressionSet, which contains the expression matrix and phenotype data
##' @importFrom utils download.file
##' @importClassesFrom Biobase ExpressionSet
##' @examples
##' \dontrun{
##' geoChina('GSE1009',destdir=tempdir())
##' }
##' @export geoChina
geoChina <- function(gse='GSE2546',mirror='tencent',destdir=getwd()){
# eSet=getGEO('GSE2546', destdir=".", AnnotGPL = F, getGPL = F)
# http://49.235.27.111/GEOmirror/GSE2nnn/GSE2546_eSet.Rdata
# gse='GSE2546';mirror='tencent'
gse=toupper(gse)
if(!gse %in% series.accession){
stop('Your GSE may not be expression by array, or even not a GSE')
}
if (is.null('http://49.235.27.111')) {
message("Data source broken.")
}
down=ifelse(as.numeric(gsub('GSE','',gse))<1000,
paste0('/GEOmirror/GSEnnn/',gse,
'_eSet.Rdata'),
paste0('/GEOmirror/',
gsub('[0-9][0-9][0-9]$','nnn',gse),'/',gse,
'_eSet.Rdata'))
if(mirror=='tencent'){
up='http://49.235.27.111'
}
OS <- .Platform$OS.type
if (OS == "unix"){
tpf=paste0(destdir,"/", gse, '_eSet.Rdata') # MAC file path
} else if (OS == "windows"){
tpf=paste0(destdir,"\\", gse, '_eSet.Rdata') # windows file path
} else {
stop("ERROR: OS could not be identified")
}
download.file(paste0(up,down),tpf,mode = "wb")
suppressWarnings(load(tpf))
# getGEO('GSE2546', destdir=".", AnnotGPL = F, getGPL = F)
message(paste0("file downloaded in ",destdir,'\nyou can also use getGEO from GEOquery, by \ngetGEO(',
shQuote(gse),
', destdir=".", AnnotGPL = F, getGPL = F)'
))
return(gset)
}
## https://community.rstudio.com/t/internet-resources-should-fail-gracefully/49199/11
gracefully_fail <- function(remote_file) {
try_GET <- function(x, ...) {
tryCatch(
httr::GET(url = x, httr::timeout(1), ...),
error = function(e) conditionMessage(e),
warning = function(w) conditionMessage(w)
)
}
is_response <- function(x) {
class(x) == "response"
}
# First check internet connection
if (!curl::has_internet()) {
message("No internet connection.")
return(invisible(NULL))
}
# Then try for timeout problems
resp <- try_GET(remote_file)
if (!is_response(resp)) {
message(resp)
return(invisible(NULL))
}
# Then stop if status > 400
if (httr::http_error(resp)) {
httr::message_for_status(resp)
return(invisible(NULL))
}
# If you are using rvest as I do you can easily read_html in the response
xml2::read_html(resp)
} | /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/geoChina.R |
utils::globalVariables(c("humanGTF", "mouseGTF", "ratGTF", "logFC", "gset", "change", "GSE95166"))
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/globals.R |
##' Get Probe Annotation
##'
##' \code{idmap} returns probe annotations for input gpl
##' @param gpl GPL(GEO platform) number, eg: GPL570
##' @param type source of probe anntation stored, one of "pipe", "bioc", "soft", default:"pipe"
##' @param mirror "tencent" only for now
##' @param destdir The destination directory for data downloads.
##' @return probe annotaions
##' @importFrom utils download.file data
##' @examples
##' ids=idmap('GPL570',destdir=tempdir())
##' \donttest{
##' ids=idmap('GPL570',type='soft',destdir=tempdir())
##' ids=idmap('GPL18084',type='pipe',destdir=tempdir())
##' }
##' @export
##'
idmap <- function(gpl='GPL570',type='bioc',mirror='tencent',destdir=getwd()){
gpl=toupper(gpl)
gpl_anno=paste(gpl,c('bioc','soft','pipe'),sep='_')
if(mirror=='tencent'){
up='http://49.235.27.111'
}
if(!checkGPL(gpl)){
stop("This platform is not in our list, please use our shinyAPP to custom annotate your probe sequences, or ask us to process and then update the R package!")
}else{
tryCatch("exists_anno_list")
gpl_anno=gpl_anno [gpl_anno %in% exists_anno_list]
# if( paste(gpl, type,sep='_') %in% exists_anno_list){
if(T){
tpf=paste0( paste(gpl, type,sep='_'),'.rda')
OS <- .Platform$OS.type
if (OS == "unix"){
dt = paste0( destdir,"/", tpf) # MAC file path
} else if (OS == "windows"){
dt = paste0( destdir,"\\", tpf) # windows file path
} else {
stop("ERROR: OS could not be identified")
}
down=paste0('/GEOmirror/GPL/',tpf)
download.file(paste0(up,down),dt,mode = "wb")
message(paste0("file downloaded in ",destdir))
load(dt)
return(get(paste(gpl, type,sep='_')))
}else{
stop('We have that platform, but just offer other type of annotaion.')
}
}
}
##' Check whether the input gpl in our platform list or not
##' @param GPL GPL(GEO platform) number, eg: GPL570
##' @return returns a boolean value
##' @examples
##' checkGPL('GPL570')
##' checkGPL('GPL15314')
##' checkGPL('GPL10558')
##' @export
checkGPL <- function(GPL=NULL){
if(length(GPL)==0){
stop("please input GPL number")
}
GPLList <- getGPLList()
flag = (GPL %in% GPLList[,1])
return(flag)
}
##' Print GPL information
##' @param GPL GPL(GEO platform) number, eg: GPL570
##' @return print detail information of the input GEO platform
##' @examples
##' printGPLInfo('GPL93')
##' @export
printGPLInfo <- function(GPL=NULL){
if(length(GPL)!=0){
flag=checkGPL(GPL)
if(!flag){
stop("This platform is not in our list, please use our shinyAPP to custom annotate your probe sequences, or ask us to process and then update the R package!")
}
tryCatch("gpl_list")
gpl_list <- gpl_list[gpl_list[,1]==GPL,]
}else{
gpl_list <- t(getGPLList())
}
return(t(gpl_list))
}
##' Get all GPL list in our package
##' \code{getGPLList} returns all the GPL number checklist stored in package
##' @return a data.frame which contains the gpl and name of array.
##' @export
getGPLList <- function(){
tryCatch("gpl_list")
GPLList <- get("gpl_list")
return(GPLList[,1:2])
}
| /scratch/gouwar.j/cran-all/cranData/AnnoProbe/R/idmap.R |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.