content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' Estimate mode (most frequent value)
#'
#' Estimate mode, ie most frequent value. In case of continuous numeric data, the most frequent values may not be the most frequently repeated exact term.
#' This function offers various approches to estimate the mode of a numeric vector.
#' Besides, it can also be used to identify the most frequentexact term (in this case also from character vectors).
#'
#' @details
#' The argument \code{method} allows to choose among (so far) 4 different methods available.
#' If "density" is chosen, the most dense region of sqrt(n) values will be chosen;
#' if "binning", the data will be binned (like in histograms) via rounding to a user-defined number of significant values ("rangeSign").
#' If \code{method} is set to "BBmisc", the function \code{computeMode()} from package \href{https://CRAN.R-project.org/package=BBmisc}{BBmisc} will be used.
#' If "mode" is chosen, the first most frequently occuring (exact) value will be returned, if "allModes", all ties will be returned. This last mode also works with character input.
#'
#' @param x (numeric, or character if 'method='mode') data to find/estimate most frequent value
#' @param method (character) There are 3 options : BBmisc, binning and density (default). If "binning" the function will search context dependent, ie like most frequent class of histogram.
#' Using "binning" mode the search will be refined if either 80 percent of values in single class or >50 percent in single class.
#' @param finiteOnly (logical) suppress non-finite values; allows avoiding \code{NULL} as result in presence of some \code{Inf} values; \code{NA} will be ignored in any case
#' @param bandw (integer) only used when \code{method="binning"} or \code{method="density"} : defines the number of points to look for density or number of classes used;
#' very "critical" parameter, may change results in strong way. Note: with \code{method="binning"}: At higher values for "bandw" you will finally loose advantage of histLike-type search of mode !
#' @param rangeSign (integer) only used when \code{method="binning"}: range of numbers used as number of significant values
#' @param silent (logical) suppress messages
#' @param callFrom (character) allows easier tracking of messages produced
#' @param debug (logical) additional messages for debugging
#' @return This function returns a numeric vector with value of mode, the name of the value indicates it's position
#' @seealso \code{computeMode()} in package \href{https://CRAN.R-project.org/package=BBmisc}{BBmisc}
#' @examples
#' set.seed(2012); dat <- round(c(rnorm(50), runif(100)),3)
#' stableMode(dat)
#' @export
stableMode <- function(x, method="density", finiteOnly=TRUE, bandw=NULL, rangeSign=1:6, silent=FALSE, callFrom=NULL, debug=FALSE) {
## stable mode
fxNa <- .composeCallName(callFrom, newNa="stableMode")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
## prepare data: treat NA or non-finite values
if(finiteOnly) { chFin <- is.finite(x)
if(all(!chFin)) x <- NULL else if(any(!chFin)) {x <- x[which(chFin)]
if(!silent) message(fxNa,"Removing ",sum(!chFin)," (out of ",length(chFin),") non-finite values") }
} else x <- naOmit(x)
if(length(unique(x)) < 0) {
method <- NULL
return(NULL)
if(!silent) message(fxNa, "NO numeric values (nothing to do)")
} else if(length(unique(x)) == 1) {
method <- NULL
return(x[1])
if(!silent && length(x) >1) message(fxNa, "All values are the same (= mode)") }
if(identical(method, "dens")) method <- "density"
if(identical(method, "bin")) method <- "binning"
if(identical(method, "histLike")) {
method <- "binning"
if(!silent) message(fxNa, "Note: argument option 'histLike' has been depreciated and replaced by 'binning'") }
out <- NULL
isNum <- is.numeric(x)
## check type of input
if(any(sapply(c("BBmisc","density","binning"), identical, method)) && !isNum) {
chNum <- try(as.numeric(if(is.factor(x)) as.character(x) else x), silent=TRUE)
if(inherits(chNum, "try-error")) {
if(!silent) message(fxNa,"Note : Input is NOT numeric, not compatible with method chosen, thus setting method='mode' !")
method <- "mode"
}}
## find simply most frequent exact value(s)
if(any(sapply(c("allModes","mode","asIs"), identical, method))) {
if(!is.factor(x)) x <- factor(as.character(x))
tabX <- tabulate(x)
out <- if(identical(method, "allModes")) {
levels(x)[which(tabX == max(tabX))]
} else levels(x)[which.max(tabX)]
if(isNum) out <- as.numeric(out)
if(debug) {message(fxNa," stM1"); stM1 <- list(method=method,bandw=bandw,x=x,tabX=tabX) }
} else chDu <- sum(duplicated(x))
## BBmisc
if(identical(method, "BBmisc")) {
if(!requireNamespace("BBmisc")) { method <- "density"
message(fxNa,"Package 'BBmisc' not found ! Please install first from CRAN \n setting 'method' to 'density'") }
}
if(identical(method, "BBmisc")) {
mo <- try(sapply(rangeSign, function(y) BBmisc::computeMode(signif(x, y))), silent=TRUE)
if(inherits(mo, "try-error")) { method <- "density"
warning(fxNa,"UNABLE to calulate BBmisc::computeMode(), setting 'method' to 'density'")
} else { posi <- .firstMin(diff(mo)/mo[-length(mo)], positionOnly = TRUE)
out <- mo[posi] }
if(debug) {message(fxNa," stM2"); stM2 <- list(method=method,bandw=bandw,x=x,mo=mo) }
}
## density
if(identical(method, "density")) {
if(length(bandw) <1) {bandw <- round(1.4* sqrt(length(x)))
if(!silent) message(fxNa,"Method='density', length of x =",length(x),", 'bandw' has been set to ",bandw)}
x <- sort(x)
raX <- max(x) - min(x)
nExt <- bandw - 1
x <- c(min(x) - raX * (nExt:1)/nExt, x, max(x) + raX * (1:nExt)/nExt)
dif <- x[(bandw + 1):length(x)] - x[1:(length(x) - bandw)]
maxDi <- which(dif == min(dif) & is.finite(dif))
if(length(maxDi) > 1) maxDi <- maxDi[round(length(maxDi)/2)]
out <- x[maxDi +nExt]
names(out) <- maxDi
if(debug) {message(fxNa," stM3"); stM3 <- list(method=method,bandw=bandw,x=x,raX=raX,nExt=nExt,maxDi=maxDi,dif=dif) }
}
## binning
if(identical(method, "binning")) {
if(!all(length(bandw) >0, is.numeric(bandw))) bandw <- ceiling(sqrt(length(x)))
if(70 * bandw > length(x) && !silent) message(fxNa,"Method='binning', value of 'bandw'=", bandw, " may be too high for good functioning !")
xRa <- range(x[which(is.finite(x))])
frq <- table(cut(x, breaks = seq(xRa[1], xRa[2], length.out = bandw)))
che <- max(frq, na.rm=TRUE) > c(0.5, 0.8) * length(x)
if (che[2]) {
if (!silent) message(fxNa, ">80% of values in class no ", which.max(frq), ", refining mode estimation")
mxF <- which.max(frq)
mxF <- signif(seq(xRa[1], xRa[2], length.out = bandw)[c(max(mxF -3, 1), min(mxF +3, bandw))], 4)
frq <- table(cut(x, breaks = seq(mxF[1], mxF[2], length.out = bandw)))
} else {
if(che[1] && sum(frq < length(x)/5000) > 0.5 * bandw) {
if(!silent) message(fxNa, ">50% of values in class no ",
which.max(frq), " & >50% of other classes almost empty, refining result")
useBr <- range(which(frq > 0.05 * length(x))) +c(-1, 1)
useBr <- c(max(useBr[1], 1), min(useBr[2], bandw))
mxF <- signif(seq(xRa[1], xRa[2], length.out = bandw)[useBr], 4)
frq <- table(cut(x, breaks = seq(mxF[1], mxF[2], length.out = bandw)))
}
}
out <- which.max(frq)
out <- names(frq)[which.max(frq)]
out <- as.numeric(unlist(strsplit(substr(out, 2, nchar(out) -1), ",")))
out <- sum(out)/length(out)
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/stableMode.R
|
#' Standardize (scale) data
#'
#' This functions work similar to \code{\link[base]{scale}}, however, it evaluates the entire input and not column-wise (and independeltly as \code{scale} does).
#' With Standarizing we speak of transforming the data to end up with mean=O and sd=1.
#' Furthermore, in case of 3-dim arrays, this function returns also an object with the same dimensions as the input.
#'
#' @param mat (matrix, data.frame or array) data that need to get standardized.
#' @param byColumn (logical) if \code{TRUE} the function will be run independently over all columns such as as \code{apply(mat,2,standardW)}
#' @param na.rm (logical) if \code{NA}s in the data don't get ignored via this argument, the output will be all \code{NA}
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This functions retruns a vector of rescaled data (in dimensions as input)
#' @seealso \code{\link[base]{scale}}
#' @examples
#' dat <- matrix(2*round(runif(100),2), ncol=4)
#' mean(dat); sd(dat)
#'
#' dat2 <- standardW(dat)
#' apply(dat2, 2, sd)
#' summary(dat2)
#'
#' dat3 <- standardW(dat, byColumn=TRUE)
#' apply(dat2, 2, sd)
#' summary(dat2)
#' mean(dat2); sd(dat2)
#'
#' @export
standardW <- function(mat, byColumn=FALSE, na.rm=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## standardize as entire matrix (ie not column-wise, relative differences in row get thus conserved), specific cols may be selected
## used to be standEntMatr
fxNa <- .composeCallName(callFrom, newNa="standardW")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(debug) message(fxNa,"sW1")
if(is.data.frame(mat)) mat <- as.matrix(mat)
if(byColumn && length(dim(mat) >1)) { std <- function(x) (mat -mean(x, na.rm=na.rm))/stats::sd(x, na.rm=na.rm)
if(length(dim(mat)) ==2) apply(mat, 2, std) else {if(length(dim(mat)) ==3) apply(mat, 2:3, std) }
} else (mat -mean(mat, na.rm=na.rm)) /stats::sd(mat, na.rm=na.rm)
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/standardW.R
|
#' Standard eror of median by boot-strap
#'
#' \code{stdErrMedBoot} estimate standard eror of median by boot-strap approach.
#' Note: requires package \href{https://CRAN.R-project.org/package=boot}{boot}
#'
#' @param x (numeric) vector to estimate median and it's standard error
#' @param nBoot (integer) number for iterations
#' @return This function returns a (numeric) vector with estimated standard error
#' @seealso \code{\link[boot]{boot}}
#' @examples
#' set.seed(2014); ra1 <- c(rnorm(9,2,1),runif(8,1,2))
#' rat1 <- ratioAllComb(ra1[1:9],ra1[10:17])
#' median(rat1); stdErrMedBoot(rat1)
#' @export
stdErrMedBoot <- function(x, nBoot=99) {
## uses package boot
chPa <- requireNamespace("boot", quietly=TRUE)
if(!chPa) { warning("package 'boot' not found ! Please install first from CRAN")
NULL
} else {
median.fun <- function(dat, indices) stats::median(dat[indices], na.rm=TRUE)
out <- try(stats::sd(boot::boot(data=x, statistic=median.fun, R=nBoot)$t))
if(inherits(out, "try-error")) stop("Unable to run bootstrap, check format of data and argument 'nBoot'")
out }}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/stdErrMedBoot.R
|
#' Count number of NAs per sub-set of columns
#'
#' This function will count the number of \code{NA}s per group (defined by argument \code{grp}) while summing over all lines of a matrix or data.frame.
#' The row-position has no influence on the counting.
#' Using the argument \code{asRelative=TRUE} the result will be given as (average) number of \code{NA}s per row and group.
#'
#' @param x matrix or data.frame which may contain \code{NA}s
#' @param grp factor describing which column of 'dat' belongs to which group
#' @param asRelative (logical) return as count of \code{NA}s per row and group
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns an integer vector with count of \code{NA}s per group
#' @seealso \code{\link[base]{NA}}, filter \code{NA}s by line \code{\link{presenceFilt}}
#' @examples
#' mat <- matrix(1:25, ncol=5)
#' mat[lower.tri(mat)] <- NA
#' sumNAperGroup(mat, rep(1:2,c(3,2)))
#' sumNAperGroup(mat, rep(1:2,c(3,2)), asRelative=TRUE)
#'
#' @export
sumNAperGroup <- function(x, grp, asRelative=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## count number of NAs per set of columns defined by grp
fxNa <- .composeCallName(callFrom, newNa="sumNAperGroup")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(length(dim(x)) <2) stop("Argument 'x' should be matrix or data.frame")
if(length(grp) != ncol(x)) stop("Length of argument 'x' should match number of columns in 'x'")
if(is.data.frame(x)) x <- as.matrix(x)
if(debug) message(fxNa,"sNAG1")
grpLev <- unique(naOmit(grp))
out <- as.integer(by(t(x), grp, function(y) sum(is.na(y))))[rank(grpLev)]
names(out) <- grpLev
if(isTRUE(asRelative)) {
nGrp <- table(grp)[rank(grpLev)]
out <- out/(nrow(x)*nGrp) }
out }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/sumNAperGroup.R
|
#' Summarize columns (as median,mean,min,last or other methods)
#'
#' \code{summarizeCols} summarizes all columns of matrix (or data.frame).
#' In case of text-columns the sorted middle (~median) will be given, unless 'maxLast', 'minLast',
#' 'maxLast','maxAbsLast' or 'minLast' .. consider only last column of 'matr' : choose from all columns the line where (max of) last col is at min;
#' 'medianComplete' or 'meanComplete' consideres only lines/rows where no NA occur (NA have influence other columns !)
#' @param matr data.frame matrix of data to be summarized by comlumn (may do different method for text and numeric comlumns)
#' @param meth (character) summarization method (eg 'maxLast','minLast','maxLast','maxAbsLast', 'minLast', 'medianComplete' or 'meanComplete')
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return vector with summary for each column
#' @seealso \code{rowMeans} in \code{\link[base]{colSums}}
#' @examples
#' t1 <- matrix(round(runif(30,1,9)), nc=3); rownames(t1) <- letters[c(1:5,3:4,6:4)]
#' summarizeCols(t1, me="median")
#' t(sapply(by(t1,rownames(t1), function(x) x), summarizeCols,me="maxLast"))
#' t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
#' matrix(round(runif(30,-3,2),1), ncol=2), stringsAsFactors=FALSE)
#' by(t3,t3[,1], function(x) x)
#' t(sapply(by(t3,t3[,1], function(x) x), summarizeCols,me="maxAbsLast"))
#' @export
summarizeCols <- function(matr, meth="median", silent=FALSE, debug=FALSE, callFrom=NULL) {
## summarize all columns of matrix (or data.frame) 'x' (most methods will call apply)
## in case of text-columns the sorted middle (~median) will be given, unless 'maxLast' or 'minLast'
## 'maxLast','maxAbsLast' or 'minLast' .. consider only last column of 'matr' : choose from all columns the line where (max of) last col is at min,max...
## 'medianComplete' or 'meanComplete' consideres only lines/rows where no NA occur (NA have influence other columns !)
## return vector with summary for each column
fxNa <- .composeCallName(callFrom, newNa="summarizeCols")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
argOpt <- c("median","mean","aver","average","min","max","maxOfRef","minOfRef","maxAbsOfRef","firstLi","lastLi","first","last")
argOpt <- c(argOpt,paste(argOpt,"Complete",sep=""),"Null")
txt <- c("Argument '","' should be "," seeting to first/default (meth='median')")
if(length(dim(matr)) <2) {if(!silent) message(fxNa,txt[1],"matr",txt[2],"matrix or data.frame")
if(length(dim(matr)) <1) {meth <- "Null"; matr=matrix(NA)} else matr <- as.matrix(matr)}
if(length(meth) <1 & !silent) {message(fxNa,txt[3]); meth="median"}
if(length(meth) !=1) {if(!silent) message(fxNa,txt[1],"meth",txt[2]," of length=1, ",txt[3]); meth <- meth[1]}
maxLaArg2 <- cbind(old=c("maxAbsLast","maxLast","minLast"),new=c("maxOfRef","minOfRef","maxAbsOfRef"))
if(meth %in% maxLaArg2[,1]) { tmp <- names(meth)
meth <- maxLaArg2[which(meth %in% maxLaArg2[,1]),2]
if(!silent) message(fxNa,"Argument 'meth' renamed to '",meth,"'") }
if(!meth %in% argOpt) stop(fxNa,txt[1],"meth",txt[2],"either one of :",pasteC(argOpt,quoteC="'"))
colMod <- rep(NA,ncol(matr))
for(i in 1:ncol(matr)) colMod[i] <- mode(matr[,i])
colMod <- colMod != "numeric" # check which col is not numeric
if(meth=="Null") out <- NULL else { if(any(colMod)) { # has character columns ..
if(debug) message(fxNa," any(maxLast,minLast) %in% meth ",any(c("maxLast","minLast") %in% meth))
if(any(c("maxLast","maxAbsLast","minLast") %in% meth)) out <- .summarizeCols(matr,meth) else {
out <- rep(NA, ncol(matr))
out[which(colMod)] <- if(sum(colMod)==1) .sortMid(matr[,which(colMod)]) else apply(matr[,which(colMod)], 2, .sortMid)
if(any(!colMod)) out[which(!colMod)] <- .summarizeCols(matr[,which(!colMod)],meth) # and put in initial order
}
} else out <- .summarizeCols(matr,meth)}
out }
#' Summarize columns of matrix (or data.frame) 'x' using apply (main)
#'
#' This function summarizes columns of matrix (or data.frame) 'x' using apply
#' Note, it cannot handle character entries ! (all results will be NA)
#'
#'
#' @param x data.frame matrix of data to be summarized by comlumn
#' @param me (character, length=1) summarization method (eg 'maxLast','minLast','maxLast','maxAbsLast', 'minLast', 'medianComplete' or 'meanComplete')
#' @param vectAs1row (logical) if TRUE will interprete non-matrix 'x' as matrix with 1 row (correct effect of automatic conversion when extracting 1 line)
#' @return vector with summary for each column
#' @seealso \code{summarizeCols}
#' @examples
#' t1 <- matrix(round(runif(30,1,9)), nc=3); rownames(t1) <- letters[c(1:5,3:4,6:4)]
#' @export
.summarizeCols <- function(x, me=c("median","medianComplete","mean","meanComplete","aver","average","min","max","maxOfRef","minOfRef",
"maxAbsOfRef","lastLi","last","firstComplete","first","firstLi","summary"),vectAs1row=TRUE) {
## summarize columns of matrix (or data.frame) 'x' using apply
## CANNOT handle character entries ! (all results will be NA)
## 'vectAs1row' .. if TRUE will interprete non-matrix 'x' as matrix with 1 row (correct effect of automatic conversion when extracting 1 line)
## me='maxOfRef','maxAbsOfRef' or 'minOfRef': return line where last col of 'x' is at (first) max (or min) ...
## me='lastLi' .. return last line of 'x'
## any term of me containing 'Complete' (eg 'firstComplete' ).. first filter to lines of 'x' wo any NA
## me='medianComplete' .. median only of 'x' where no NA per line
## me='summary' will return matrix instead of vector !! (eah col for init cols of 'x')
if(me=="med") me <- "median"
if(me %in% c("av","aver","average")) me <- "mean" # synonyms ..
if(length(dim(x)) <2) x <- if(vectAs1row) matrix(x, nrow=1, dimnames=list(NULL,names(x))) else as.matrix(x)
if(length(grep("Complete", me)) >0) { # reduce x to complete rows only
compl <- which(rowSums(is.na(x)) <1)
me <- sub("Complete","",me) # term 'Complete' disappears from me ...
if(length(compl) <1) me <- "Null" # this way output will be NULL if 0 lines wo NAs
x <- if(length(compl) >1) x[compl,] else if(length(compl) ==1) matrix(x[compl,], nrow=1, dimnames=list(rownames(x)[compl],colnames(x)))}
switch(me, maxOfRef=x[which.max(x[,ncol(x)]),], minOfRef=x[which.min(x[,ncol(x)]),],
maxAbsOfRef=x[which.max(abs(x[,ncol(x)])),], Null=NULL,
median=apply(x, 2, stats::median, na.rm=TRUE), mean=colMeans(x, na.rm=TRUE),
max=apply(x, 2, max,na.rm=TRUE), min=apply(x, 2, min, na.rm=TRUE),
summary=apply(x, 2, summary),
lastLi= x[nrow(x),], last= x[nrow(x),], firstLi= x[1,], first=x[1,])}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/summarizeCols.R
|
#' System-date (compressed format)
#'
#' This function returns current date (based on Sys.Date) in different format options.
#'
#' @details
#' Multiple options for formatting exist :
#' 'univ1' or 'wr' ... (default) compact sytle using day, first 3 letters of English name of month (lowercaps) and last 2 letters of year as ddmmmyy, eg 14jun21
#'
#' 'univ2' ... as ddMmmyy, eg 14Jun21
#'
#' 'univ3' ... as ddMonthyyyy, eg 14June2021
#'
#' 'univ4' ... as ddmonthyyyy, eg 14june2021
#'
#' 'univ5' ... as yyyy-mm-dd (output of \code{Sys.Date()}), eg 2021-06-14
#'
#' 'univ6' ... as yyyy-number of day (in year), eg 2021-165
#'
#' 'local1' ... compact sytle using day, first 3 letters of current locale name of month (not necessarily unique !) and last 2 letters of year as ddmmmyy, eg 14jui21
#'
#' 'local2' ... as ddMmmyy, month based on current locale (not necessarily unique !), eg 14Jui21
#'
#' 'local3' ... as ddMonthyyyy, month based on current locale , eg 14Juin2021
#'
#' 'local4' ... as ddmonthyyyy, month based on current locale , eg 14juin2021
#'
#' 'local5' ... as dd-month-yyyy, month based on current locale , eg 14-juin-2021
#'
#' 'local6' ... as yyyymonthddd, month based on current locale , eg 2021juin14
#'
#'
#'
#' @param style (character) choose style (default 'univ1' for very compact style)
#' @return character vector with formatted date
#'
#' @seealso \code{\link[base]{date}}, \code{Sys.Date} and \code{\link[base]{Sys.time}},
#' @examples
#' sysDate()
#'
#' @export
sysDate <- function(style="univ1") {
## return date in compressed format ('wr-style') day/month/year eg 15sep20
if(length(style) <1) style <- "univ1" else if(any(is.na(style))) style <- "univ1"
if("wr" %in% style) style <- "univ1"
out <- switch(style,
abc="abc",
local1=paste0(format(Sys.Date(),"%d"), substr(format(Sys.Date(),"%b"),1,3), substr(format(Sys.Date(),"%Y"),3,4)), # ddmmmyy
local2=paste0(format(Sys.Date(),"%d"), gsub("(^[[:alpha:]])", "\\U\\1", substr(format(Sys.Date(),"%b"),1,3), perl=TRUE), substr(format(Sys.Date(),"%Y"),3,4)), # ddMmmyy
local3=paste0(format(Sys.Date(),"%d"), gsub("(^[[:alpha:]])", "\\U\\1", format(Sys.Date(),"%b"), perl=TRUE), format(Sys.Date(),"%Y")), # ddMonthyy
local4=format(Sys.Date(), "%d%b%Y"), # ddmonthyyyy
local5=format(Sys.Date(),"%v"), # separated by -, dd-month-yyyy
local6=format(Sys.Date(), "%Y%b%d"), # yyyymonthdd
univ1=paste0(format(Sys.Date(),"%d"), tolower(month.abb[as.integer(format(Sys.Date(),"%m"))]), substr(format(Sys.Date(),"%Y"),3,4)), # ddmmmyy
univ2=paste0(format(Sys.Date(),"%d"), month.abb[as.integer(format(Sys.Date(),"%m"))], substr(format(Sys.Date(),"%Y"),3,4)), # ddMmmyy
univ3=paste0(format(Sys.Date(),"%d"), month.name[as.integer(format(Sys.Date(),"%m"))], format(Sys.Date(),"%Y")), # ddmonthyyyy
univ4=paste0(format(Sys.Date(),"%d"), tolower(month.name[as.integer(format(Sys.Date(),"%m"))]), format(Sys.Date(),"%Y")), # ddmonthyyyy
univ5=Sys.Date(), #yyyy-mm-dd
univ6=format(Sys.Date(), "%Y-%j") )
out }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/sysDate.R
|
#' t.test on all individual values against all other values
#'
#' Run t.test on each indiv value of x against all its neighbours (=remaining values of same vector) in order to test if tis value is likely to belong to vector x.
#' This represents a repeated leave-one-out testing. Mutiple choices for multiple testing correction are available.
#'
#' @param x matrix or data.frame
#' @param alph (numeric) threshold alpha (passed to \code{t.test})
#' @param alternative (character) will be passed to \code{t.test} as argument 'alternative', may be "two.sided",..
#' @param p.adj (character) multiple test correction : may be NULL (no correction), "BH","BY","holm","hochberg" or "bonferroni" (but not 'fdr' since this may be confounded with local false discovery rate), see \code{\link[stats]{p.adjust}}
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a numeric vector with p-values or FDR (depending on argument \code{p.adj})
#' @seealso \code{\link[stats]{t.test}}, \code{\link[stats]{p.adjust}}
#' @examples
#' set.seed(2016); x1 <- rnorm(100)
#' allTests1 <- tTestAllVal(x1)
#' hist(allTests1,breaks="FD")
#' @export
tTestAllVal <- function(x, alph=0.05, alternative="two.sided", p.adj=NULL, silent=FALSE, debug=FALSE, callFrom=NULL){
fxNa <- .composeCallName(callFrom, newNa="tTestAllVal")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(debug) message(fxNa,"tTA1")
x <- cbind(x=as.numeric(x), no=1:length(x))
pVal <- sapply(1:length(x), function(z) stats::t.test(x[-z], x[z], conf.level=alph, alternative=alternative, var.equal=TRUE)$p.value )
if(!is.null(p.adj)) if(any(p.adj %in% c("BH","BY","holm","hochberg","bonferroni"))) pVal <- stats::p.adjust(pVal,method=p.adj)
pVal }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/t.testAllInd.R
|
#' Print matrix-content as plot
#'
#' This function prints all columns of matrix in plotting region for easier inclusion to reports (default values are set to work for output as A4-sized pdf).
#' It was made for integrating listings of text to graphical output to devices like \code{png}, \code{jpeg} or \code{pdf}.
#'
#' @details
#' This function was initially designed for listings with small/medium 1st col (eg couner or index), 2nd & 3rd col small and long 3rd col (like file paths).
#' Obviously, the final number of lines one can pack and still read correctly into the graphical output depends on the size of the device
#' (on a pdf of size A4 one can pack up to apr. 11O lines).
#' Of ourse, \code{\link[utils]{Sweave}}, combined with LaTeX, provides a powerful alternative for wrapping text to pdf-output (and further combining text and graphics).
#' Note: The final result on pdf devices may vary depending on screen-size (ie with of current device), the parameters 'colPos' and 'titOffS' may need some refinements.
#' Note: In view of typical page/figure layouts like A4, the plotting region will be split to avoid too wide spacing between rows with less than 30 rows.
#'
#' @param matr (matrix) main (character) matrix to display
#' @param colPos (numeric) postion of columns on x-scale (from 0 to 1)
#' @param useCex (numeric) cex expension factor forsiez of text (may be different for each column)
#' @param useAdj (numeric) left/cneter/right alignment for text (may be different for each column)
#' @param useCol color specification for text (may be different for each column)
#' @param titOffS (numeric) offset for title line (ralive to 'colPos')
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns NULL (no R-object returned), print 'plot' in current device only
#' @seealso \code{\link[utils]{Sweave}} for more flexible framework
#' @importFrom graphics par
#' @examples
#' ## as example let's make a listing of file-names and associated parameters in current directory
#' mat <- dir()
#' mat <- cbind(no=1:length(mat),fileName=mat,mode=file.mode(mat),
#' si=round(file.size(mat)/1024),path=getwd())
#' ## Now, we wrap all text into a figure (which could be saved as jpg, pdf etc)
#' tableToPlot(mat[,-1],colPos=c(0.01,0.4,0.46,0.6),titOffS=c(0.05,-0.03,-0.01,0.06))
#' tableToPlot(mat,colPos=c(0,0.16,0.36,0.42,0.75),useAdj=0.5,titOffS=c(-0.01,0,-0.01,0,-0.1))
#' @export
tableToPlot <- function(matr, colPos=c(0.05,0.35,0.41,0.56), useCex=0.7, useAdj=c(0,1,1,0), titOffS=0, useCol=1, silent=FALSE, callFrom=NULL){
fxNa <- .composeCallName(callFrom, newNa="tableToPlot")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
opar <- list(mar=graphics::par("mar"))
on.exit(graphics::par(opar))
msg <- " requires matrix (or data.frame) with >0 rows and 2-7 columns"
msg2 <- " number of columns of 'matr' doesn't match with number of elements in 'colPos'"
if(length(dim(matr)) <2) stop(msg)
if(is.data.frame(matr)) matr <- as.matrix(matr)
if(nrow(matr) <1) stop("nothing to display", msg)
if(ncol(matr) >7 | ncol(matr) <2) stop(msg)
if(length(colPos) != ncol(matr)) stop(msg2)
if(length(titOffS) < ncol(matr)) titOffS <- c(titOffS,rep(0,ncol(matr)-length(titOffS)))
if(length(useAdj) < ncol(matr)) useAdj <- c(useAdj,rep(0.5,ncol(matr)-length(useAdj)))
if(length(useCex) < ncol(matr)) useCex <- rep(useCex,ceiling(ncol(matr)/length(useCex)))
if(length(useCol) < ncol(matr)) useCol <- rep(useCol,ceiling(ncol(matr)/length(useCol)))
matr <- matr[nrow(matr):1,] # reverse order since plot starts at bottom
graphics::par(mar=c(1,1,1,1))
graphics::plot.new()
if(nrow(matr) <30) graphics::layout(matrix(1:2), heights=c(nrow(matr)/2,1))
j <- 1:nrow(matr) +0.1 # y-position in plot
graphics::plot.window(xlim=c(0,1), ylim=c(1,nrow(matr) +1.7))
titPos <- colPos + titOffS
## add col-heads
graphics::text(titPos, nrow(matr)+1, colnames(matr), cex=sort(c(0.5,max(useCex,na.rm=TRUE) +0.1,1.2))[2])
for(i in 1:ncol(matr)) graphics::text(colPos[i], j, matr[,i], cex=useCex[i], col=useCol[i], adj=useAdj[i])
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/tableToPlot.R
|
#' 2-factorial limma-style t-test
#'
#' The aim of this function is to provide convenient acces to two-factorial (linear) testing withing the framework of \code{\link{makeMAList}} including the emprical Bayes shrinkage.
#' The input data 'datMatr' which should already be organized as limma-type MAList, eg using using \code{\link{makeMAList}}.
#' Note: This function uses the Bioconductor package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}.
#'
#' @param datMatr matrix or data.frame with lines as indenpendent series of measures (eg different genes)
#' @param fac1 (character or factor) vector describing grouping elements of each line of 'datMatr' for first factor, must be of same langth as fac2
#' @param fac2 (character or factor) vector describing grouping elements of each line of 'datMatr' for second factor, must be of same langth as fac1
#' @param testSynerg (logical) decide if factor-interactions (eg synergy) should be included to model
#' @param testOrientation (character) default (or any non-recignized input) '=', otherwise either '>','gerater','sup','upper' or '<','inf','lower'
#' @param addResults (character) vector defining which types of information should be included to output, may be 'lfdr','FDR' (for BY correction), 'Mval' (M values), 'means' (matrix with mean values for each group of replicates)
#' @param addGenes (matrix or data.frame) additional information to add to output
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) additional messages for debugging
#' @return This function returns an object of class "MArrayLM" (from limma) containing/enriched by the testing results
#' @seealso \code{\link{makeMAList}}, single line testing \code{\link[limma]{lmFit}} and the \code{eBayes}-family of functions in package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}
#' @examples
#' set.seed(2014)
#' dat0 <- rnorm(30) + rep(c(10,15,19,20),c(9,8,7,6))
#' fa <- factor(rep(letters[1:4],c(9,8,7,6)))
#' dat2 <- data.frame(facA=rep(c("-","A","-","A"), c(9,8,7,6)),
#' facB= rep(c("-","-","B","B"), c(9,8,7,6)), dat1=dat0, dat2=runif(30))
#' grpNa <- sub("-","",sub("\\.","", apply(dat2[,1:2], 1, paste, collapse="")))
#' test2f <- test2factLimma(t(dat2[,3:4]), dat2$facA, dat2$facB, testS=FALSE)
#' test2f
#' # Now you can easily summarize results using topTable from limma
#' if(requireNamespace("limma", quietly=TRUE)) {
#' library(limma)
#' topTable(test2f, coef=1, n=5)
#' topTable(test2f, coef=2, n=5) }
#' @export
test2factLimma <- function(datMatr, fac1, fac2, testSynerg=TRUE, testOrientation="=", addResults=c("lfdr","FDR","Mval","means"), addGenes=NULL, silent=FALSE, callFrom=NULL, debug=FALSE){
fxNa <- .composeCallName(callFrom, newNa="test2factLimma")
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!isTRUE(silent)) silent <- FALSE
doTest <- TRUE
if(!requireNamespace("limma", quietly=TRUE)) { doTest <- FALSE
warning(fxNa,"You need to install package 'limma' first from Bioconductor")}
if(doTest) {
msg1 <- " 'datMatr' should have the same number of cols as length of fac1 & fac2 !"
if(ncol(datMatr) != length(fac1) || ncol(datMatr) != length(fac2)) stop(msg1)
datDesign <- if(isTRUE(testSynerg)) try(stats::model.matrix(~ fac1 * fac2),silent=TRUE) else try(stats::model.matrix(~ fac1 + fac2), silent=TRUE)
if(inherits(datDesign, "try-error")) { doTest <- FALSE; message(fxNa," Problem with model.matrix(), please check your factors !")
} else if(debug) message(fxNa,"design matrix has ",nrow(datDesign)," rows and ",ncol(datDesign)," cols")
}
if(doTest) {
datFit <- try(limma::lmFit(datMatr, design=datDesign), silent=TRUE) ## Fitting linear models
if(inherits(datFit, "try-error")) {warning(fxNa," PROBLEM with lmFit(); check if package 'limma' is correctly installed, it seems not to be working properly !?!")
doTest <- FALSE
} else if(debug) message(fxNa,"Sucessfully run lmFit()") }
if(doTest) {
datFit <- limma::eBayes(datFit) ## Adjusting using empirical Bayes
altHyp <- "two.sided" # default, change only if explicit sign recognized
if(length(testOrientation) <1) testOrientation <- altHyp
if(testOrientation %in% c("<","less","inf","lower")) altHyp <- "less"
if(testOrientation %in% c(">","greater","sup","upper")) altHyp <- "greater"
chNAp <- colSums(!is.na(datFit$p.value))
if(any(chNAp <1)) {message(fxNa," problem with redundant factors ? Some cols of p.values are all NA !! (remove)")
datFit$p.value <- datFit$p.value[,which(chNAp >0)]}
chFdr <- requireNamespace("fdrtool", quietly=TRUE) # try(find.package("fdrtool"), silent=TRUE)
if(length(addResults) >0) if("lfdr" %in% tolower(addResults) & !chFdr) {
if(!silent) message(fxNa,"package 'fdrtool' not found ! Please install package fdrtool from CRAN for enabeling 'lfdr' estimations")
addResults <- addResults[which(!"lfdr" %in% tolower(addResults))] }
tx <- c("testing alternative hypothesis: true difference in means is "," than 0 (ie focus on "," results with A ",altHyp," than B)")
if(debug) message(fxNa,"altHyp = ",altHyp)
if(identical(altHyp,"greater")){
ch <- datFit$means[,1] > datFit$means[,2]
if(!silent) message(fxNa,tx[1],altHyp,tx[2],sum(ch),tx[3:5])
if(any(ch)) datFit$p.value[which(ch),] <- datFit$p.value[which(ch),]/2
if(any(!ch)) datFit$p.value[which(!ch),] <- 1- datFit$p.value[which(!ch),]/2 # !(A > B) .. A <= B
}
if(identical(altHyp,"less")){
ch <- datFit$means[,2] > datFit$means[,1]
if(!silent) message(fxNa,tx[1],altHyp,tx[2],sum(ch),tx[3:5])
if(any(ch)) datFit$p.value[which(ch),] <- datFit$p.value[which(ch),]/2
if(any(!ch)) datFit$p.value[which(!ch),] <- 1- datFit$p.value[which(!ch),]/2 # !(A > B) .. A <= B
}
if(length(addResults) <1) out <- datFit$p.value[,2] else { out <- datFit
if(debug) message(fxNa," addResults",addResults)
## further inspect & correct values of 'addResults' ?
if("Mval" %in% addResults) out$Mval <- (out$means[,1] - out$means[,2])
if("FDR" %in% toupper(addResults)) out$FDR <- if(length(dim(out$p.value)) >1) {
apply(out$p.value,2,stats::p.adjust,meth="BH")} else stats::p.adjust(out$p.value, meth="BH")
if("lfdr" %in% tolower(addResults)) {
chPa <- try(find.package("fdrtool"), silent=TRUE)
if(inherits(chPa, "try-error")) message("package 'fdrtool' not found ! Please install first .. running so far without 'lfdr'")
addResults <- addResults[which(!tolower(addResults) %in% "lfdr")] }
if("lfdr" %in% tolower(addResults)) {out$lfdr <- if(is.matrix(out$p.value)) {
apply(out$p.value,2,pVal2lfdr)} else pVal2lfdr(out$p.value)
}
if(inherits(datFit$lfdr, "try-error")) {message(fxNa," PROBLEM with calulating lfdr ! ")
} else if(debug) message(fxNa,"Sucessfully calculated lfdr ...")
if("BY" %in% toupper(addResults)) {datFit$BY <- if(length(dim(out$p.value)) >1) {
apply(datFit$p.value,2,stats::p.adjust,meth="BY")} else stats::p.adjust(out$p.value, meth="BY")
}
for(i in c("FDR","lfdr","BY")) {if(length(dim(out[[i]])) >1) rownames(out[[i]]) <- rownames(datMatr)}}
out
} else if(debug) message(fxNa,"ATTENTION, no two-factorial was calculated, returning NULL")
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/test2factLimma.R
|
#' Make single vector gray-gradient
#'
#' This function helps making gray-gradients.
#' Note : The resulting color gradient does not seem linear to the human eye, you may try \code{\link[grDevices]{gray.colors}} instead
#' @param startGray (numeric) gray shade at start
#' @param endGrey (numeric) gray shade at end
#' @param nSteps (integer) number of levels
#' @param transp (numeric) transparency alpha
#' @return character vector (of same length as x) with color encoding
#' @seealso \code{\link[grDevices]{gray.colors}}
#' @examples
#' layout(1:2)
#' col1 <- transpGraySca(0.8,0.3,7,0.9)
#' pie(rep(1,length(col1)), col=col1, main="from transpGraySca")
#' col2 <- gray.colors(7,0.9,0.3,alph=0.9)
#' pie(rep(1,length(col2)), col=col2, main="from gray.colors")
#' @export
transpGraySca <- function(startGray=0.2, endGrey=0.8, nSteps=5, transp=0.3){
## make single vector gray-gradient
## 'transp' should be numeric (betw 0 and 1) or NULL for no transparency (high val .. no transparency)
## default startGray (low) -> endGrey (high) goes from dense to light
## NOTE : color gradient NOT LINEAR to EYE !! use rather grey.colors
grSeq <- seq(startGray, endGrey, len=nSteps)
useCol <- grDevices::rgb(r=grSeq, g=grSeq, b=grSeq, alpha=if(is.null(transp)) 1 else transp)
useCol }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/transpGraySca.R
|
#' Locate duplicates in text and make non-redundant
#'
#' \code{treatTxtDuplicates} locates duplictes in character-vector 'x' and return list (length=3) : with $init (initial),
#' $nRed .. non-redundant text by adding number at end or beginning, and $nrLst .. list-version with indexes per unique entry.
#' Note : NAs (if multiple) will be renamed to NA_1, NA_2
#' @param x (character) vector with character-entries to identify (and remove) duplicates
#' @param atEnd (logical) decide location of placing the counter (at end or at beginning of ID) (see \code{\link{correctToUnique}})
#' @param sep (character) separator to add before counter when making non-redundant version
#' @param onlyCorrectToUnique (logical) if TRUE, return only vector of non-redundant
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return list with $init, $nRed, $nrLst
#' @seealso For simple correction use \code{\link{correctToUnique}}
#' @examples
#' treatTxtDuplicates(c("li0",NA,rep(c("li2","li3"),2)))
#' correctToUnique(c("li0",NA,rep(c("li2","li3"),2)))
#' @export
treatTxtDuplicates <- function(x, atEnd=TRUE, sep="_", onlyCorrectToUnique=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- .composeCallName(callFrom,newNa="treatTxtDuplicates")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(debug) message(fxNa, "tTDA1")
if(length(dim(x)) >1) { if(!silent) message(fxNa," expecting simple (text) vector as 'x' but got class ",class(x))
x <- if(is.list(x)) unlist(x) else as.character(x) }
xIni <- x
if(any(is.na(x))) x[which(is.na(x))] <- "NA"
out <- correctToUnique(x, sep=sep, atEnd=atEnd)
if(!onlyCorrectToUnique) {
anyDu <- duplicated(x, fromL=FALSE) | duplicated(x, fromL=TRUE)
if(any(anyDu)) { nrLst <- sapply(unique(x),function(z) which(x %in% z))
if(!is.list(nrLst)) nrLst <- as.list(as.data.frame(nrLst))
} else {nrLst <- as.list(1:length(x)); names(nrLst) <- x}
list(init=xIni, nRed=out, nrLst=nrLst)} else out }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/treatTxtDuplicates.R
|
#' Pairwise x,y combinations
#'
#' \code{triCoord} gets pairwise combinations for 'n' elements; returns matrix with x & y coordinates to form all pairwise groups for 1:n elements
#' @param n (integer) number of elements for making all pair-wise combinations
#' @param side (character) "upper" or "lower"
#' @return 2-column matrix wiyh indexes for all pairwise combnations of 1:n
#' @seealso \code{\link[base]{lower.tri}} or \code{upper.tri}, simpler version \code{\link{upperMaCoord}}
#' @examples
#' triCoord(4)
#' @export
triCoord <- function(n,side="upper") {
ma <- matrix(1:n,ncol=n,nrow=n)
out <- if(identical(side,"upper")) cbind(x=ma[upper.tri(ma)],y=t(ma)[upper.tri(ma)]) else {
cbind(x=ma[lower.tri(ma)],y=t(ma)[lower.tri(ma)])}
out }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/triCoord.R
|
#' Trim redundant text
#'
#' This function allows trimming/removing redundant text-fragments (redundant from head or tail) out of character vector 'txt'.
#'
#'
#'
#' @param txt character vector to be treated
#' @param minNchar (integer) minumin number of characters that must remain
#' @param side (character) may be be either 'both', 'left' or 'right'
#' @param spaceElim (logical) optional removal of any heading or tailing white space
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) display additional messages for debugging
#' @return This function returns a modified character vector
#' @seealso Inverse : Find/keep common text \code{\link{keepCommonText}}; you may also look for related functions in package \href{https://CRAN.R-project.org/package=stringr}{stringr}
#' @examples
#' txt1 <- c("abcd_ccc","bcd_ccc","cde_ccc")
#' trimRedundText(txt1, side="right") # trim from right
#'
#' txt2 <- c("ddd_ab","ddd_bcd","ddd_cde")
#' trimRedundText(txt2, side="left") # trim from left
#' @export
trimRedundText <- function(txt, minNchar=1, side="both", spaceElim=FALSE, silent=TRUE, callFrom=NULL, debug=FALSE) {
##
fxNa <- .composeCallName(callFrom, newNa="trimRedundText")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
doTrim <- TRUE
if(length(txt) <1) message(fxNa,"Problem : 'txt' appears empty, nothing to do") else {
if(!is.character(txt)) { txt <- try(as.character(txt))
if(inherits(txt, "try-error")) {txt <- NULL; doTrim <- FALSE
warning(fxNa,"Unable to convert 'txt' into text; nothing to do")}}
}
## trimming of heading and tailing white space
if(length(txt) >0 && spaceElim) {
txt <- gsub(" $","",gsub("^ ","",txt))
}
if(doTrim) { chLe <- nchar(txt)
if(any(naOmit(chLe) ==0)) { doTrim <- FALSE
if(sum(chLe ==0, na.rm=TRUE) + sum(is.na(chLe))==length(txt)) { message(fxNa,"NOTE : all elements appear empty ! Nothing to do ..")
} else if(!silent) message(fxNa,"NOTE : ",sum(chLe==0)," elements appear empty ! Nothing to do ..") }
}
## finish checking arguments
if(doTrim) {
msg2 <- "Argument 'minNchar' should be positive integer; setting to default=1"
if(length(minNchar) !=1 | !is.numeric(minNchar)) { minNchar <- 1
message(fxNa,msg2)} else minNchar <- abs(as.integer(minNchar))
msg1 <- "Argument 'side' should be either 'both', 'left' or 'right'; setting to default='both'"
if(length(side) <1) {side <- "both"; message(fxNa, msg1)}
if(!is.character(side)) {side <- "both"; message(fxNa, msg1)}
## main
nChar <- nchar(txt)
if(debug) {message(fxNa,"Ready tm trim ",pasteC(nChar)," characters"); trL <- list(txt=txt,minNchar=minNchar,nChar=nChar)}
if(any(c("any","both","left") %in% side)) {
txt <- .trimLeft(txt, minNchar=minNchar, silent=TRUE, callFrom=fxNa)
if(debug) {message(fxNa," .trimLeft reduced to ",pasteC(nchar(txt))," characters")}
}
if(any(c("any","both","right") %in% side)) {
txt <- .trimRight(txt, minNchar=minNchar, silent=TRUE, callFrom=fxNa)
if(debug){ message(fxNa," .trimRight reduced to ",pasteC(nchar(txt))," characters")}
}
txt } }
#' Trim from Left
#'
#' This function allows trimming/removing redundant text-fragments from left side
#'
#' @param x character vector to be treated
#' @param minNchar (integer) minumin number of characters that must remain
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a modified character vector
#' @seealso \code{\link{trimRedundText}}; Inverse : Find/keep common text \code{\link{keepCommonText}}; you may also look for related functions in package \href{https://CRAN.R-project.org/package=stringr}{stringr}
#' @examples
#' txt1 <- c("abcd_ccc","bcd_ccc","cde_ccc")
#' .trimLeft(txt1)
#' @export
.trimLeft <- function(x, minNchar=1, silent=TRUE, debug=FALSE, callFrom=NULL) {
## trim redundant starting from left side
fxNa <- .composeCallName(callFrom, newNa=".trimLeft")
nChar <- nchar(x)
msg <- c(fxNa,"Some entries are too short for trimming to min ",minNchar," characters, nothing to do")
if(all(naOmit(nChar > minNchar))) {
ch1 <- min(nChar, na.rm=TRUE)
if(ch1 -1 > minNchar) {
ch1 <- (ch1 -minNchar) :1
ch1 <- paste0("^",substr(rep(x[which.min(nChar)], length(ch1)), 1, ch1))
ch3 <- lapply(gsub("\\.","\\\\.",ch1), grep, x)
ch3 <- sapply(ch3, length) ==length(x)
if(any(ch3)) x <- substring(x, nchar(ch1[min(which(ch3))]))
} else {
if(!silent) message(msg) }
} else if(!silent) message(msg)
x
}
#' Trim from right
#'
#' This function allows trimming/removing redundant text-fragments from right side
#'
#' @param x character vector to be treated
#' @param minNchar (integer) minumin number of characters that must remain
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a modified character vector
#' @seealso \code{\link{trimRedundText}}; Inverse : Find/keep common text \code{\link{keepCommonText}}; you may also look for related functions in package \href{https://CRAN.R-project.org/package=stringr}{stringr}
#' @examples
#' txt1 <- c("abcd_ccc","bcd_ccc","cde_ccc")
#' .trimRight(txt1)
#' @export
.trimRight <- function(x, minNchar=1, silent=TRUE, debug=FALSE, callFrom=NULL) {
fxNa <- .composeCallName(callFrom, newNa=".trimRight")
nChar <- nchar(x)
msg <- c(fxNa,"Some entries are too short for trimming to min ",minNchar," characters, nothing to do")
if(all(naOmit(nChar > minNchar))) {
ch1 <- min(nChar, na.rm=TRUE)
if(ch1 -1 > minNchar) {
ch1 <- 1: (ch1 -minNchar)
ch2 <- x[which.min(nChar)]
ch1 <- paste0(substr(paste0(rep(ch2, length(ch1))), nchar(ch2)- ch1 +1, nchar(ch2)),"$")
ch3 <- lapply(gsub("\\.","\\\\.",ch1), grep, x)
ch3 <- sapply(ch3, length) ==length(x)
if(any(ch3)) x <- substring(x, 1, nchar(x) - nchar(ch1[which.max(which(ch3))]) +1)
} else {
if(!silent) message(msg) }
} else if(!silent) message(msg)
x
}
#' Trim from start
#'
#' This function allows trimming/removing redundant text-fragments from start
#'
#' @param x character vector to be treated
#' @param ... more vectors to be treated
#' @param minNchar (integer) minumin number of characters that must remain
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a modified character vector
#' @seealso \code{\link{trimRedundText}}; Inverse : Find/keep common text \code{\link{keepCommonText}}; you may also look for related functions in package \href{https://CRAN.R-project.org/package=stringr}{stringr}
#' @examples
#' txt1 <- c("abcd_ccc","bcd_ccc","cde_ccc")
#' .trimFromStart(txt1)
#' @export
.trimFromStart <- function(x,..., minNchar=1, silent=TRUE, debug=FALSE, callFrom=NULL) {
## trim, ie remove redundant characters from beginning
## 'minNchar' min number of characters that should remain
y <- list(...)
fxNa <- .composeCallName(callFrom, newNa=".trimFromStart")
if(length(x) < 1) message(fxNa," Problem : 'x' appears empty") else {
exclLiNa <- c("minNchar","silent","callFrom")
exclLiNa2 <- c(sapply(nchar(exclLiNa[1]):2, function(z) substr(exclLiNa[1],1,z)),
sapply(nchar(exclLiNa[2]):2, function(z) substr(exclLiNa[2],1,z)))
te <- which(names(y) %in% exclLiNa2)
if(length(naOmit(te)) > 0) y <- y[-1*which(names(y) %in% exclLiNa2)]
if(sum(sapply(y, length) >0)) {
x <- c(x, unlist(y)) }
if(!silent) message(fxNa," initial no of characters ", paste(nchar(x), collapse=" "))
while(length(unique(substr(x, 1, 1))) <2 & min(nchar(x),na.rm=TRUE) > minNchar) x <- substr(x, 2, nchar(x)) }
x }
#' Trim from end
#'
#' This function allows trimming/removing redundant text-fragments from end
#'
#' @param x character vector to be treated
#' @param ... more vectors to be treated
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a modified character vector
#' @seealso \code{\link{trimRedundText}}; Inverse : Find/keep common text \code{\link{keepCommonText}}; you may also look for related functions in package \href{https://CRAN.R-project.org/package=stringr}{stringr}
#' @examples
#' txt1 <- c("abcd_ccc","bcd_ccc","cde_ccc")
#' .trimFromEnd(txt1)
#' @export
.trimFromEnd <- function(x,..., callFrom=NULL, debug=FALSE, silent=TRUE) {
## trim, ie remove redundant characters from beginning
## note: since aruguments collected by
## less elaborated than .trimFromStart()
fxNa <- .composeCallName(callFrom,newNa=".trimFromEnd")
y <- list(...)
if(length(y) >0) {if(any(c("callFrom","silent") %in% names(y))) {
y <- y[-1*which(names(y) %in% c("callFrom","callFr","sil","silent"))]}}
if(sum(sapply(y, length) >0)) { # '...' argument will be added to x
x <- c(x, unlist(y)) }
if(!silent) message(fxNa,"Initial no of characters ", paste(nchar(x), collapse=" "))
while(length(unique(substr(x,nchar(x),nchar(x)))) <2) x <- substr(x, 1, nchar(x)-1)
x }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/trimRedundText.R
|
#' Unify Enumerators
#'
#' The aim of this function is to provide help in automatically harmonizing enumerators at the end of sample-names.
#' When data have same grouped setup/design, many times this is reflected in their names, eg 'A_sample1', 'A_sample2' and 'B_sample1'.
#' However, human operators may use multiple similar (but not identical) ways of expressing the same meanin, eg writng 'A_Samp_1'.
#' This function allows testing a panel of different extensions of enumerators and (if recognized) to replace them by a user-defined standard text/enumerator.
#' Please note that the more recent function \code{\link{rmEnumeratorName}} offers better/more flexible options.
#'
#' @details
#' This function has been developed for matching series of the same samples passing in parallel through different evaluation software (see R package wrProteo).
#' The way human operators may name things may easily leave room for surprises and this function allows testing only a limited number of common ways of writing.
#' Thus, in any case, the user is advised to inspect the results by eye and - if needed- to adjust the parameters.
#'
#' Basically enumerator separators can be constructed by combing a base-separator \code{baseSep} (like '-', '_' etc) and an enumerator-abbreviation \code{suplEnu}.
#' Then, all possible combinations will be tested if they occur in the text \code{x}.
#' Furthermore, the text searched has to be followd by on or multiple digts at the end of text-entry (decimal comma-separators etc are not allowed).
#' Thus, if there is other 'free text' following to the right after the enumerator-text this function will not find any enumerators to replace.
#'
#' The argument \code{stringentMatch} allows defining if this text has to be found in all text-entries of \code{x} or just one of them.
#' Whe using \code{stringentMatch=FALSE} there is risk that other text not meant to design enumerators may be picked up and modified.
#'
#' Please note, that with large data-sets (ie many columns) testing/checking a larger panel of enumerator-abreviations may result in slower performance.
#' In cases of larger data-sets it may be more effective to first study the data and then run simple subsitions using sub targeted for this very case.
#'
#' @param x (character) main input
#' @param refSep (character) separator for output
#' @param baseSep (character) basic seprators to test (you have to protect special characters)
#' @param suplEnu (character) additional text
#' @param stringentMatch (logical) decide if enumerator text has to be found in all instances or only once
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) display additional messages for debugging
#' @return This function returns a character vector of same length as input \code{x}, with it's content as adjusted enumerators
#' @seealso \code{\link{rmEnumeratorName}} for better/more flexible options; \code{\link[base]{grep}} or \code{sub()}, etc if exact and consistent patterns are known
#' @examples
#' unifyEnumerator(c("ab-1","ab-2","c-3"))
#' unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
#' unifyEnumerator(c("ab-1","c3-2","dR3"), strin=FALSE);
#'
#' @export
unifyEnumerator <- function(x, refSep="_", baseSep=c("\\-","\\ ","\\."), suplEnu=c("Repl","Rep","R","Number","No","Sample","Samp"), stringentMatch=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## unify Enumerators (towards end, must be followed by terminal digit) to refSep
## redSep (character, length=1)
#example#
fxNa <- .composeCallName(callFrom, newNa="unifyEnumerator")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(debug) {message(fxNa,"uE1")}
## need to filter as much as possible t keep combinatorics to check low (init check without considering combination of baseSep, suplEnu and digits)
chBa <- sapply(baseSep, function(y) length(grep(y, x))) # check if they occur at all, no matter if with digit or not..
chBa <- if(stringentMatch) chBa==length(x) else chBa >0
chRe <- length(grep(refSep, x)) # check if ref occur at all, no matter if with digit or not..
chRe <- if(stringentMatch) chRe==length(x) else chRe >0
if(debug) message(fxNa,"uE1b ",pasteC(chBa), " chRe ",pasteC(chRe))
if(any(chBa, chRe)) { baseSep <- if(any(chBa)) baseSep[which(chBa)] else NULL
if(debug) {message(fxNa,"uE2") ; uE2 <- list(x=x,refSep=refSep,baseSep=baseSep,suplEnu=suplEnu,chBa=chBa,chRe=chRe)}
## test enumerators
if(length(suplEnu) >0) {
suplEnu <- union(suplEnu, tolower(suplEnu))
chSup <- sapply(suplEnu, function(y) length(grep(y, x))) # check if they occur at all, no matter if with digit or not..
chSup <- if(stringentMatch) chSup==length(x) else chSup >0
suplEnu <- if(any(chSup)) suplEnu[which(chSup)] else NULL }
if(debug) {message(fxNa,"uE3") ; uE3 <- list(x=x,refSep=refSep,baseSep=baseSep,suplEnu=suplEnu,chBa=chBa,chRe=chRe)}
if(length(suplEnu) >0) {
## construct 'complex' enumerator pattern (all combin of separator & enumerator)
sep <- paste0(c( paste0(rep(baseSep, each=length(suplEnu)), suplEnu), paste0( suplEnu, rep(baseSep, each=length(suplEnu))),
paste0(rep(baseSep, each=length(suplEnu)*length(baseSep)), rep(suplEnu,each=length(baseSep)), baseSep), suplEnu, baseSep ), "[[:digit:]]+$")
chS <- sapply(sep, function(y) length(grep(y, x))) # test which acually oocur
chS <- if(stringentMatch) chS==length(x) else chS >0
if(all(chS)) warning(fxNa,".. no pattern found, this should not happen here")
if(debug) {message(fxNa,"uE3c") }
sep <- sep[which(chS)]
} else { ## only basic enumerators occur, test which ones occur with digits
baseSep <- paste0(baseSep,"[[:digit:]]+$")
chBa <- sapply(baseSep, function(y) if(stringentMatch) length(grep(y, x)) == length(x) else length(grep(y, x)) >0)
sep <- if(any(chBa)) baseSep[chBa] else NULL }
} else sep <- NULL
if(length(sep) >0) {
if(debug) {message(fxNa,"uE4"); uE4 <- list(x=x,sep=sep,refSep=refSep,baseSep=baseSep,suplEnu=suplEnu,chBa=chBa,chRe=chRe)}
for(i in sep) x <- sub(i, substr(i, 1, nchar(i) -13), x)
x
} else {if(debug) message(fxNa,"uE4b"); x} # no need to change, baseSep doesn't occur
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/unifyEnumerator.R
|
#' Report number of unique and redundant elements (optional figure)
#'
#' Make report about number of unique and redundant elements of vector 'dat'.
#' Note : fairly slow for long vectors !!
#' @param dat (charcter or numeric vector) main input where number of unique (and redunant) should be determined
#' @param frL (logical) optional (re-)introducing results from \code{duplicated} to shorten time of execution
#' @param plotDispl (logical) decide if pie-type plot should be produced
#' @param tit (character) optional title in plot
#' @param col (character) custom colors in pie
#' @param radius (numeric) radius passed to \code{pie}
#' @param sizeTo (numeric or charcter) optional reference group for size-population relative adjusting overall surface of pie
#' @param clockwise (logical) argument passed to pie
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return vector with counts of n (total), nUnique (wo any repeated), nHasRepeated (first of repeated), nRedundant), optional figure
#' @seealso \code{\link{correctToUnique}}, \code{\link[base]{unique}}
#' @examples
#' layout(1:2)
#' uniqCountReport(rep(1:7,1:7),plot=TRUE)
#' uniqCountReport(rep(1:3,1:3),plot=TRUE,sizeTo=rep(1:7,1:7))
#' @export
uniqCountReport <- function(dat, frL=NULL, plotDispl=FALSE, tit=NULL, col=NULL, radius=0.9, sizeTo=NULL, clockwise=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- .composeCallName(callFrom, newNa="uniqCountReport")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(is.null(frL)) frL <- duplicated(dat,fromLast=TRUE)
nUniQ <- length(unique(dat[which(frL)]))
out <- c(n=length(dat), nUnique=length(unique(dat)) -nUniQ, nHasRepeated=nUniQ, nRedundant=NA )
out[4] <- out[1] -sum(out[2:3],na.rm=TRUE)
if(plotDispl) .plotCountPie(out,tit=tit,col=col,radius=radius,sizeTo=sizeTo,clockwise=clockwise,silent=silent,callFrom=fxNa)
out }
#' Pie plot for counting results
#'
#' This function allows to inspect results of \code{table} or \code{uniqCountReport} on a pie-plot
#' Note : fairly slow for long vectors !!
#' @param count (integer vector) counting result
#' @param tit (character) optional title in plot
#' @param col (character) custom colors in pie
#' @param radius (numeric) radius passed to \code{pie}
#' @param sizeTo (numeric or charcter) optional reference group for size-population relative adjusting overall surface of pie
#' @param clockwise (logical) argument passed to pie
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return vector with counts of n (total), nUnique (wo any repeated), nHasRepeated (first of repeated), nRedundant), optional figure
#' @seealso \code{\link{uniqCountReport}}, \code{\link{correctToUnique}}, \code{\link[base]{unique}}
#' @examples
#' .plotCountPie(table(c(1:5,4:2)))
#' @export
.plotCountPie <- function(count, tit=NULL, col=NULL, radius=0.9, sizeTo=NULL, clockwise=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## plotting for .plotCountPie()
fxNa <- .composeCallName(callFrom, newNa=".plotCountPie")
if(is.null(col)) col <- c(3,4,2)
if(length(sizeTo) >0) {if(is.character(sizeTo) || length(sizeTo) >1) sizeTo <- length(sizeTo)
radius <- radius*sqrt(count[1]/sizeTo)
msg <- "Note: reference is soo high that size-adoped pie won't fit into plotting region, consider lowering 'radius' for all plots"
if(radius > 1.09 && !silent) message(fxNa,msg)}
graphics::pie(count[c(2,3,4)],col=col,init.angle=90,radius=radius,main=tit,labels=c("unique","hasRepeated","redundant"),cex=sort(c(0.55,0.3+radius,1))[2],clockwise=clockwise)
graphics::mtext(paste("n=",count[1]),cex=0.8,adj=0,line=-0.3)
}
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/uniqCountReport.R
|
#' (upper) pairwise x,y combinations
#'
#' \code{upperMaCoord} gets pairwise combinations for 'n' elements; return matrix with x & y coordinates to form all pairwise groups for n elements.
#' But no distinction of 'upper' or 'lower' possible like in \code{\link{triCoord}}
#' @param n (integer) number of elements for making all pair-wise combinations
#' @return 2-column matrix wiyh indexes for all pairwise combnations of 1:n
#' @seealso \code{\link[base]{lower.tri}}, more evolved version \code{\link{triCoord}}
#' @examples
#' upperMaCoord(4)
#' @export
upperMaCoord <- function(n) {
ma <- matrix(1:n,ncol=n,nrow=n)
cbind(x=ma[upper.tri(ma)],y=t(ma)[upper.tri(ma)]) }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/upperMaCoord.R
|
#' Check for values within range of reference
#'
#' \code{withinRefRange} checks which values of numeric vector 'x' are within range +/- 'fa' x 'ref' (ie within range of reference).
#' @param x matrix or data.frame
#' @param fa (numeric) absolute or relative tolerance value (numeric, length=1), interpreted according to 'absRef' as absolute or relative to 'x'(ie fa*ref)
#' @param ref (numeric) (center) reference value for comparison (numeric, length=1), if not given mean of 'x' (excluding NA or non-finite values) will be used
#' @param absRef (logical) return result as absolute or relative to 'x'(ie fa*ref)
#' @param asInd (logical) if TRUE return index of which values of 'x' are within range, otherwise return values if 'x' within range
#' @return numeric vector (containing only the values within range of reference)
#' @examples
#' ## within 2.5 +/- 0.7
#' withinRefRange(-5:6,fa=0.7,ref=2.5)
#' ## within 2.5 +/- (0.7*2.5)
#' withinRefRange(-5:6,fa=0.7,ref=2.5,absRef=FALSE)
#' @export
withinRefRange <- function(x,fa,ref=NULL,absRef=TRUE,asInd=FALSE) {
xIni <- x
if(any(length(fa) !=1,!is.finite(fa),!is.finite(ref))) stop(" 'fa' and 'ref' must be finite !")
chFin <- is.finite(as.numeric(x))
if(sum(chFin) <1) stop(" no finite values found in 'x' !")
if(sum(!chFin) >0) x[which(!chFin)] <- NA
x <- as.numeric(x)
if(is.null(ref)) ref <- sum(x[which(chFin)],na.rm=TRUE)/sum(chFin)
## main
out <- if(absRef) which(abs(x -ref) < fa) else which(abs(x/ref -1) < fa)
if(asInd) out else xIni[out] }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/withinRefRange.R
|
#' Write (and convert) csv files
#'
#' This functions is absed on \code{write.csv} allows for more options when writing data into csv-files.
#' The main input may be gven as R-object or read from file 'input'. Then, one can (re-)write using specified conversions.
#' An optional filter to select columns (column-name specified via 'filterCol') is available.
#' The output may be simultaneaously written to multiple formats, as specified in 'expTy',
#' tabulation characters may be converted to avoid accidentally split/shift text to multiple columns.
#' Note: Mixing '.' and ',' as comma separators via text-columns or fused text&data may cause problems lateron, though.
#'
#' @param input either matrix or data.frame
#' @param inPutFi (character or \code{NULL}) file-name to be read (format as US or Euro-type may specified via argument \code{imporTy})
#' @param expTy (character) 'US' and/or 'Eur' for sparator and decimal type in output
#' @param imporTy (character) default 'Eur' (otherwise set to 'US')
#' @param filename (character) optional new file name(s)
#' @param quote (logical) will be passed to function \code{write.csv}
#' @param filterCol (integer or character) optionally, to export only the columns specified here
#' @param replMatr optional, matrix (1st line:search, 2nd li:use for replacing) indicating which characters need to be replaced )
#' @param returnOut (logical) return output as object
#' @param SYLKprevent (logical) prevent difficulty when opening file via Excel. In some cases Excel presumes (by error) the SYLK format and produces an error when trying to open files :
#' To prevent this, if necessary, the 1st column-name will be changed from 'ID' to 'Id'.
#' @param digits (interger) limit number of signif digits in output (ie file)
#' @param silent (logical) suppress messages
#' @param debug (logical) for bug-tracking: more/enhanced messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function writes a file to disk and returns \code{NULL} unless \code{returnOut=TRUE}
#' @seealso \code{write.csv} in \code{\link[utils]{write.table}}, batch reading using this package \code{\link{readCsvBatch}}
#' @examples
#' dat1 <- data.frame(ini=letters[1:5],x1=1:5,x2=11:15,t1=c("10,10","20.20","11,11","21,21","33.33"),
#' t2=c("10,11","20.21","kl;kl","az,az","ze.ze"))
#' fiNa <- file.path(tempdir(), paste("test",1:2,".csv",sep=""))
#' writeCsv(dat1, filename=fiNa[1])
#' dir(path=tempdir(), pattern="cs")
#'
#' (writeCsv(dat1, replM=rbind(bad=c(";",","), replBy="__"), expTy=c("Eur"),
#' returnOut=TRUE, filename=fiNa[2]))
#'
#' @export
writeCsv <- function(input, inPutFi=NULL, expTy=c("Eur","US"), imporTy="Eur", filename=NULL, quote=FALSE, filterCol=NULL, replMatr=NULL, returnOut=FALSE, SYLKprevent=TRUE, digits=22, silent=FALSE,debug=FALSE,callFrom=NULL) {
fxNa <- .composeCallName(callFrom, newNa="writeCsv")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
argN <- deparse(substitute(input))
doWrite <- TRUE
if(!requireNamespace("utils", quietly=TRUE)) { doWrite <- FALSE
warning(fxNa,"package 'utils' not found ! Please install first")
}
if(length(input) <1) { doWrite <- FALSE; warning(fxNa," 'input' should be data or filename")}
if(doWrite) {
if(is.character(input)) {if(file.exists(input)) {inPutFi <- as.character(dat)[1]; dat <- NULL
if(!silent) message(fxNa,"trying to read ",inPutFi," as format: ",imporTy)
dat <- if(imporTy=="Eur") try(utils::read.csv2(inPutFi,stringsAsFactors=FALSE),silent=TRUE) else {
if(imporTy=="US") try(utils::read.csv(inPutFi, stringsAsFactors=FALSE),silent=TRUE) else try(utils::read.table(inPutFi,stringsAsFactors=FALSE),silent=TRUE) }
if("try-error" %in% class(dat)) {message(fxNa,"PROBLEM when trying tp open file '",inPutFi,"' - abandon"); doWrite <- FALSE }
} else { dat <- input
if(!silent) message(fxNa," 'input' is character but since no corresponding to existing filename, trying to interpret as data to be written to file")}
} else dat <- input
if(doWrite) {
if(!is.null(dat)) { if(length(inPutFi) >1) message(fxNa," ignoring content of 'inPutFi'")
inPutFi <- NULL; imporTy <- ""}
if(length(dim(dat)) <2) dat <- as.matrix(dat) # fx created for typical case of data.frame or matrix
datColCl <- rep(NA,ncol(dat))
for(i in 1:ncol(dat)) datColCl[i] <- class(dat[,i]) # document class for each column (won't work using apply)
if(length(expTy) <1) {expTy <- "Eur"; if(!silent) message(fxNa,"unkown format for 'expTy', setting to 'Eur'")}
expTy <- sort(stats::na.omit(expTy))
if(length(expTy) <1) {expTy <- "Eur"; if(!silent) message(fxNa,"unkown format for 'expTy', setting to 'Eur'")}
## FILTERING (independent to expTy)
## check if 'filterCol' in dat
if(is.list(filterCol) && length(filterCol) >0) {
useCol <- sapply(filterCol,function(x) x[1])
useCol <- which(useCol %in% colnames(dat))
if(!silent && length(useCol)<1) message(fxNa," none of the columns from 'filterCol' found in ",inPutFi)
filtThr <- sapply(filterCol[useCol],function(x) if(length(x>1)) x[2] else NA)
filterCol <- sapply(filterCol[useCol],function(x) x[1])
}
if(debug) {message(fxNa,"..xxWriteC0")}
if(length(filterCol) >0) {
for(i in 1:length(filterCol)) {
chLogi <- TRUE
## this may be further deveoped: check if column has usable logical content
chLi <- if(is.na(filtThr[i]) && chLogi) which(dat[,filterCol[i]]) else which(dat[,filterCol[i]] < filtThr[i])
if(length(chLi) <nrow(dat) & length(chLi) >0) dat <- dat[chLi,] else if(length(chLi) <1) {
message(fxNa," filtering for ",filterCol[i]," nothing left !!")
dat <- NULL; return(NULL) }}
} else if(!is.character(inPutFi)) expTy <- expTy[which(!expTy %in% imporTy)] # no need to re-write same file if no filtering (unless inPutFi is filename)
##
## treat non-conform characters (only in non-numeric cols, dependent on expTy)
## locate non-conform characters, then subtitute in non-numeric part of dat
datExp <- list()
if(!all(datColCl %in% c("numeric","integer"))){ # nothing to substitute if only numeric data, otherwise :
chCols <- which(!datColCl %in% c("numeric","integer"))
dat0 <- as.matrix(dat[,chCols])
## first try to locate numeric cols with bad separator (digit+comma+digit)
zz <- sub("^[[:digit:]]+,[[:digit:]]+$|^[[:digit:]]+\\.[[:digit:]]+$|^,[[:digit:]]+$|^\\.[[:digit:]]+$","", dat0) # no need to test for digits only wo separators
chNA <- is.na(zz)
if(any(chNA)) zz[which(chNA)] <- 0
toNum <- colSums(nchar(zz)) <1
if(debug) {message(fxNa,"..xxWriteC1b")}
if(any(toNum)) {
if(!silent) message(fxNa," adjusting ",sum(toNum)," column(s) with Euro comma-separator")
dat[,chCols[which(toNum)]] <- as.numeric(sub(",",".", dat[,chCols[which(toNum)]]))
datColCl[chCols[which(toNum)]] <- "numeric"
chCols <- which(!datColCl %in% c("numeric","integer")) } # refresh
## locate & replace 'bad' characters interfering with tabular separation -> need multiple versions
if(debug) {message(fxNa,"..xxWriteC2")}
if(length(chCols) >0) for(ty in expTy) {
dat0 <- as.matrix(dat[,chCols]) # refresh
replMat <- if(is.null(replMatr)) array(c(";"," ", ","," ", "\t"," "),
dim=c(2,1,3),dimnames=list(c("bad","subst"),c("sep1"),c("Eur","US","txt"))) else replMatr
if(length(dim(replMat)) >2) {
chTy <- ty %in% dimnames(replMat)[[3]]
replMat <- as.matrix(if(!chTy) replMat[,,1] else replMat[,,which(ty==dimnames(replMat)[[3]])])} # matrix with characters to test for (1st line) & 2nd line for replacing
locCh <- lapply(replMat[1,], grep, dat0)
locCh <- locCh[which(sapply(locCh, length) >0)] # indexes where substitution should take place
if(length(locCh) >0) for(i in 1:length(locCh)) dat0[locCh[[i]]] <- gsub(replMat[1,i],replMat[2,i],dat0[locCh[[i]]])
if(debug) {message(fxNa,"..xxWriteC3")}
if(length(expTy) >1) {datExp[[ty]] <- dat; datExp[[ty]][,chCols] <- dat0} else dat[,chCols] <- dat0 }}
## idea (future) -make optional non-redundant version ? allowing to replace completely .exportFilteredCSV()
## prevent Excel trying SYLK format : replace 1st col from 'ID' to 'Id'
if(SYLKprevent) {if(is.list(datExp)) { chID <- grep("^ID",sapply(datExp,function(x) colnames(x)[1])) >0
if(any(chID)) for(i in which(chID)) colnames(datExp[[i]])[1] <- sub("^ID","Id",colnames(datExp[[i]])[1])
} else { chID <- grep("^ID",colnames(datExp)[1])
if(chID) colnames(datExp)[1] <- sub("^ID","Id",colnames(datExp)[1])}}
## write to file
if(length(filename) <1) filename <- paste0(if(is.character(inPutFi)) sub("\\.csv$","",inPutFi) else argN,".",expTy,".csv")
if(length(filename) < length(expTy)) {
if(!silent) message(fxNa," adding type to name(s) of file(s) to be written")
filename <- paste0(sub("\\.csv$","",filename),".",expTy,".csv")
if("txt" %in% expTy) filename <- sub("\\.txt.\\csv$",".txt",filename)
names(filename) <- expTy }
if(debug) {message(fxNa,"..xxWriteC4")}
if(!silent & any(file.exists(filename))) message(fxNa,"file(s) ",pasteC(filename[which(file.exists(filename))],quo="'")," will be overwritten !")
if( "US" %in% expTy) tryW <- try(utils::write.csv(as.matrix(format(if(length(datExp)>0) datExp$US else dat,digits=digits)), filename["US"], row.names=FALSE, quote=quote),silent=silent)
if("txt" %in% expTy) tryW <- try(utils::write.table(as.matrix(format(if(length(datExp)>0) datExp$txt else dat,digits=digits)), filename["txt"], row.names=FALSE, quote=quote),silent=silent)
## idea (relaed to problem when input is fused numeric&text): in case of Eur test all cols if factor/text and then (optional?) convert '.' to ','
if("Eur" %in% expTy) tryW <- try(utils::write.csv2(as.matrix(format(if(length(datExp)>0) datExp$Eur else dat,digits=digits)), filename[1], row.names=FALSE, quote=quote),silent=silent)
## possibility to return values :
if(returnOut) {if(length(expTy) <2) dat else datExp} } } }
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/R/writeCsv.R
|
## ----setup0, include=FALSE, echo=FALSE, messages=FALSE, warnings=FALSE--------
suppressPackageStartupMessages({
library(wrMisc)
})
## ----install, echo=TRUE, eval=FALSE-------------------------------------------
# ## If not already installed, you'll have to install the package first.
# ## This is the basic installation commande in R
# install.packages("wrMisc")
## ----install2, echo=TRUE, eval=FALSE------------------------------------------
# packages <- c("knitr", "rmarkdown", "BiocManager", "kableExtra", "boot", "data.tree", "data.table",
# "fdrtool", "RColorBrewer", "Rcpp", "wrMisc", "wrGraph", "wrProteo")
# checkInstallPkg <- function(pkg) { # install function
# if(!requireNamespace(pkg, quietly=TRUE)) install.packages(pkg) }
#
# ## install if not yet present
# sapply(packages, checkInstallPkg)
## ----install3, echo=TRUE, eval=FALSE------------------------------------------
# ## Installation of limma
# BiocManager::install("limma")
## ----install4, echo=TRUE, eval=FALSE------------------------------------------
# ## Now you can open this vignette out of R:
# vignette("wrMiscVignette1", package="wrMisc")
## ----setup1-------------------------------------------------------------------
library("wrMisc")
library("knitr")
## This is 'wrMisc' version number :
packageVersion("wrMisc")
## ----basicVariability, echo=TRUE----------------------------------------------
grp1 <- rep(LETTERS[1:3], c(3,4,3))
sampNa1 <- paste0(grp1, c(1:3,1:4,1:3))
set.seed(2016); dat1 <- matrix(round(c(runif(50000) +rep(1:1000,50)),3),
ncol=10, dimnames=list(NULL,sampNa1))
dim(dat1)
head(dat1)
## ----sdForEachRow, echo=TRUE--------------------------------------------------
head(rowSds(dat1))
system.time(sd1 <- rowSds(dat1))
system.time(sd2 <- apply(dat1, 1, sd))
## ----usingApply, echo=TRUE----------------------------------------------------
table(round(sd1, 13)==round(sd2, 13))
## ----calculateRowCV, echo=TRUE------------------------------------------------
system.time(cv1 <- rowCVs(dat1))
system.time(cv2 <- apply(dat1, 1, sd) / rowMeans(dat1))
# typically the calculation using rowCVs is much faster
head(cv1)
# results from the 'conventional' way
head(cv2)
## ----rowGrpMeans1, echo=TRUE--------------------------------------------------
# we already defined the grouping :
grp1
## the mean for each group and row
system.time(mean1Gr <- rowGrpMeans(dat1, grp1))
## ----sdOrCVbyGrp, echo=TRUE---------------------------------------------------
## Now the sd for each row and group
system.time(sd1Gr <- rowGrpSds(dat1, grp1))
# will give us a matrix with the sd for each group & line
head(sd1Gr)
# Let's check the results of the first line :
sd1Gr[1,] == c(sd(dat1[1,1:3]), sd(dat1[1,4:7]), sd(dat1[1,8:10]))
# The CV :
system.time(cv1Gr <- rowGrpCV(dat1, grp1))
head(cv1Gr)
## ----rowGrpNA1, echo=TRUE-----------------------------------------------------
mat2 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
mat2 <- matrix(mat2, ncol=12, byrow=TRUE)
## The definition of the groups (ie replicates)
gr4 <- gl(3, 4, labels=LETTERS[1:3])
## ----rowGrpNA2, echo=TRUE-----------------------------------------------------
rowGrpNA(mat2,gr4)
## ----naOmit, echo=TRUE--------------------------------------------------------
aA <- c(11:13,NA,10,NA)
str(naOmit(aA))
# the 'classical' na.omit also stores which elements were NA
str(na.omit(aA))
## ----minDiff, echo=TRUE-------------------------------------------------------
set.seed(2017); aa <- 10 *c(0.1 +round(runif(20),2), 0.53, 0.53)
head(aa)
minDiff(aa,ppm=FALSE)
## ----partUnlist_1, echo=TRUE--------------------------------------------------
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
partUnlist(bb)
partUnlist(lapply(bb,.asDF2))
## ----unlist_1, echo=TRUE------------------------------------------------------
head(unlist(bb, recursive=FALSE))
## ----asSepList, echo=TRUE-----------------------------------------------------
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
asSepList(bb)
## ----lappend1, echo=TRUE------------------------------------------------------
li1 <- list(a=1, b=2, c=3)
li2 <- list(A=11, b=2, C=13)
append(li1, li2)
## ----lappend2, echo=TRUE------------------------------------------------------
appendNR(li1, li2)
## ----lrbind, echo=TRUE--------------------------------------------------------
dat2 <- matrix(11:34, ncol=3, dimnames=list(letters[1:8], colnames=LETTERS[1:3]))
lst2 <- by(dat2, rep(1:3,c(3,2,3)), as.matrix)
lst2
# join list-elements (back) into single matrix
lrbind(lst2)
## ----mergeMatrixList, echo=TRUE-----------------------------------------------
mat1 <- matrix(11:18, ncol=2, dimnames=list(letters[3:6],LETTERS[1:2]))
mat2 <- matrix(21:28, ncol=2, dimnames=list(letters[2:5],LETTERS[3:4]))
mat3 <- matrix(31:38, ncol=2, dimnames=list(letters[c(1,3:4,3)],LETTERS[4:5]))
#
mergeMatrixList(list(mat1, mat2), useColumn="all")
# with custom names for the individual matrices
mergeMatrixList(list(m1=mat1, m2=mat2, mat3), mode="union", useColumn=2)
## ----mergeMatrices, echo=TRUE-------------------------------------------------
mergeMatrices(mat1, mat2)
mergeMatrices(mat1, mat2, mat3, mode="union", useColumn=2)
## custom names for matrix-origin
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=2)
## flexible/custom selection of columns
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=list(1,1:2,2))
## ----fuseCommonListElem, echo=TRUE--------------------------------------------
val1 <- 10 +1:26
names(val1) <- letters
(lst1 <- list(c=val1[3:6], a=val1[1:3], b=val1[2:3] ,a=val1[12], c=val1[13]))
## here the names 'a' and 'c' appear twice :
names(lst1)
## now, let's fuse all 'a' and 'c'
fuseCommonListElem(lst1)
## ----listBatchReplace1, echo=TRUE---------------------------------------------
lst1 <- list(m1=matrix(11:18, ncol=2), m2=matrix(21:30, ncol=2), indR=31:34,
m3=matrix(c(21:23,NA,25:27,NA), ncol=2))
filterLiColDeList(lst1, useLines=2:3)
filterLiColDeList(lst1, useLines="allNA", ref=3)
## ----replInList1, echo=TRUE---------------------------------------------------
(lst1 <- list(aa=1:4, bb=c("abc","efg","abhh","effge"), cc=c("abdc","efg","efgh")))
listBatchReplace(lst1, search="efg", repl="EFG", silent=FALSE)
## ----listGroupsByNames, echo=TRUE---------------------------------------------
ser1 <- 1:7; names(ser1) <- c("AA","BB","AA.1","CC","AA.b","BB.e","A")
listGroupsByNames(ser1)
## ----listGroupsByNames2, echo=TRUE--------------------------------------------
listGroupsByNames((1:10)/5)
## ----filterList, echo=TRUE----------------------------------------------------
set.seed(2020); dat1 <- round(runif(80),2)
list1 <- list(m1=matrix(dat1[1:40], ncol=8), m2=matrix(dat1[41:80], ncol=8), other=letters[1:8])
rownames(list1$m1) <- rownames(list1$m2) <- paste0("line",1:5)
# Note: the list-element list1$other has a length different to that of filt. Thus, it won't get filtered.
filterList(list1, list1$m1[,1] >0.4) # filter according to 1st column of $m1 ...
filterList(list1, list1$m1 >0.4)
## ----matr2list, echo=TRUE-----------------------------------------------------
(mat1 <- matrix(1:12, ncol=3, dimnames=list(letters[1:4],LETTERS[1:3])))
str(matr2list(mat1))
## ----array0, echo=TRUE--------------------------------------------------------
(arr1 <- array(c(6:4,4:24), dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""),c("ch1","ch2"))))
## ----arrayCV1, echo=TRUE------------------------------------------------------
arrayCV(arr1)
# this is equivalent to
cbind(rowCVs(arr1[,,1]), rowCVs(arr1[,,2]))
## ----arrayCV2, echo=TRUE------------------------------------------------------
arrayCV(arr1, byDim=2)
## ----cutArrayInCluLike, echo=TRUE---------------------------------------------
cutArrayInCluLike(arr1, cluOrg=c(2,1,2,1))
## ----filt3dimArr, echo=TRUE---------------------------------------------------
filt3dimArr(arr1, displCrit=c("col1","col2"), filtCrit="col2", filtVal=7, filtTy=">")
## ----repeated1, echo=TRUE-----------------------------------------------------
## some text toy data
tr <- c("li0","n",NA,NA, rep(c("li2","li3"),2), rep("n",4))
## ----repeated2, echo=TRUE-----------------------------------------------------
table(tr)
unique(tr)
duplicated(tr, fromLast=FALSE)
## ----repeated3, echo=TRUE-----------------------------------------------------
aa <- c(11:16,NA,14:12,NA,14)
names(aa) <- letters[1:length(aa)]
aa
## ----findRepeated, echo=TRUE--------------------------------------------------
findRepeated(aa)
## ----firstOfRepeated, echo=TRUE-----------------------------------------------
firstOfRepeated(aa)
aa[firstOfRepeated(aa)$indUniq] # only unique with their names
unique(aa) # unique() does not return any names !
## ----correctToUnique1, echo=TRUE----------------------------------------------
correctToUnique(aa)
correctToUnique(aa, sep=".", NAenum=FALSE) # keep NAs (ie without transforming to character)
## ----nonAmbiguousNum, echo=TRUE-----------------------------------------------
unique(aa) # names are lost
nonAmbiguousNum(aa)
nonAmbiguousNum(aa, uniq=FALSE, asLi=TRUE) # separate in list unique and repeated
## ----sortByNRepeated, echo=TRUE-----------------------------------------------
cities <- c("Bangkok","London","Paris", "Singapore","New York City", "Istambul","Delhi","Rome","Dubai")
sortByNRepeated(x=cities[c(1:4)], y=cities[c(2:3,5:8)])
## or (unlimited) multiple inputs via list
choices1 <- list(Mary=cities[c(1:4)], Olivia=cities[c(2:3,5:8)], Paul=cities[c(5:3,9,5)]) # Note : Paul cited NYC twice !
table(unlist(choices1))
sortByNRepeated(choices1)
sortByNRepeated(choices1, filterIntraRep=FALSE) # without correcting multiple citation of NYC by Paul
## ----cbindNR, echo=TRUE-------------------------------------------------------
## First we'll make soe toy data :
(ma1 <- matrix(1:6, ncol=3, dimnames=list(1:2,LETTERS[3:1])))
(ma2 <- matrix(11:16, ncol=3, dimnames=list(1:2,LETTERS[3:5])))
## now we can join 2 or more matrixes
cbindNR(ma1, ma2, summarizeAs="mean") # average of both columns 'C'
## ----firstLineOfDat, echo=TRUE------------------------------------------------
(mat1 <- matrix(c(1:6, rep(1:3,1:3)), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2])))
## ----firstLineOfDat2, echo=TRUE-----------------------------------------------
firstLineOfDat(mat1, refCol=2)
## ----firstOfRepLines, echo=TRUE-----------------------------------------------
mat2 <- matrix(c("e","n","a","n","z","z","n","z","z","b",
"","n","c","n","","","n","","","z"), ncol=2)
firstOfRepLines(mat2, out="conc")
# or as index :
firstOfRepLines(mat2)
## ----nonredDataFrame, echo=TRUE-----------------------------------------------
(df1 <- data.frame(cbind(xA=letters[1:5], xB=c("h","h","f","e","f"), xC=LETTERS[1:5])))
## ----nonredDataFrame2, echo=TRUE----------------------------------------------
nonredDataFrame(df1, useCol=c("xB","xC"))
# without counter or concatenating
df1[which(!duplicated(df1[,2])),]
# or
df1[firstOfRepLines(df1,useCol=2),]
## ----get1stOfRepeatedByCol, echo=TRUE-----------------------------------------
mat2 <- cbind(no=as.character(1:20), seq=sample(LETTERS[1:15], 20, repl=TRUE),
ty=sample(c("full","Nter","inter"),20,repl=TRUE), ambig=rep(NA,20), seqNa=1:20)
(mat2uniq <- get1stOfRepeatedByCol(mat2, sortBy="seq", sortSupl="ty"))
# the values from column 'seq' are indeed unique
table(mat2uniq[,"seq"])
# This will return all first repeated (may be >1) but without furter sorting
# along column 'ty' neither marking in comumn 'ambig').
mat2[which(duplicated(mat2[,2],fromLast=FALSE)),]
## ----nonAmbiguousMat, echo=TRUE-----------------------------------------------
nonAmbiguousMat(mat1,by=2)
## ----nonAmbiguousMat2, echo=TRUE----------------------------------------------
set.seed(2017); mat3 <- matrix(c(1:100,round(rnorm(200),2)), ncol=3,
dimnames=list(1:100,LETTERS[1:3]));
head(mat3U <- nonAmbiguousMat(mat3, by="B", na="_", uniqO=FALSE), n=15)
head(get1stOfRepeatedByCol(mat3, sortB="B", sortS="B"))
## ----combineReplFromListToMatr, echo=TRUE-------------------------------------
lst2 <- list(aa_1x=matrix(1:12, nrow=4, byrow=TRUE), ab_2x=matrix(24:13, nrow=4, byrow=TRUE))
combineReplFromListToMatr(lst2)
## ----nonRedundLines, echo=TRUE------------------------------------------------
mat4 <- matrix(rep(c(1,1:3,3,1),2), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2]))
nonRedundLines(mat4)
## ----filtSizeUniq, echo=TRUE--------------------------------------------------
# input: c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), filtUn=TRUE, minSi=NULL)
# here a,b,c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), ref=c(letters[c(1:26,1:3)],
"dd","dd","bb","ddd"), filtUn=TRUE, minSi=NULL)
## ----makeNRedMatr, echo=TRUE--------------------------------------------------
t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
matrix(round(runif(30,-3,2),1), nc=2), stringsAsFactors=FALSE)
# First we split the data.frame in list
by(t3,t3[,1],function(x) x)
t(sapply(by(t3,t3[,1],function(x) x), summarizeCols, me="maxAbsOfRef"))
(xt3 <- makeNRedMatr(t3, summ="mean", iniID="ref"))
(xt3 <- makeNRedMatr(t3, summ=unlist(list(X1="maxAbsOfRef")), iniID="ref"))
## ----combineRedBasedOnCol, echo=TRUE------------------------------------------
matr <- matrix(c(letters[1:6],"h","h","f","e",LETTERS[1:5]), ncol=3,
dimnames=list(letters[11:15],c("xA","xB","xC")))
combineRedBasedOnCol(matr, colN="xB")
combineRedBasedOnCol(rbind(matr[1,],matr), colN="xB")
## ----convMatr2df, echo=TRUE---------------------------------------------------
x <- 1
dat1 <- matrix(1:10, ncol=2)
rownames(dat1) <- letters[c(1:3,2,5)]
## as.data.frame(dat1) ... would result in an error
convMatr2df(dat1)
convMatr2df(data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3))
tmp <- data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
tmp <- data.frame(a=as.character((1:3)/2), b=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
## ----combineOverlapInfo, echo=TRUE--------------------------------------------
set.seed(2013)
datT2 <- matrix(round(rnorm(200)+3,1), ncol=2, dimnames=list(paste("li",1:100,sep=""),
letters[23:24]))
# (mimick) some short and longer names for each line
inf2 <- cbind(sh=paste(rep(letters[1:4],each=26), rep(letters,4),1:(26*4),sep=""),
lo=paste(rep(LETTERS[1:4],each=26), rep(LETTERS,4), 1:(26*4), ",",
rep(letters[sample.int(26)],4), rep(letters[sample.int(26)],4), sep=""))[1:100,]
## We'll use this to test :
head(datT2, n=10)
## let's assign to each pair of x & y values a 'cluster' (column _clu_, the column _combInf_ tells us which lines/indexes are in this cluster)
head(combineOverlapInfo(datT2, disThr=0.03), n=10)
## it is also possible to rather display names (eg gene or protein-names) instead of index values
head(combineOverlapInfo(datT2, suplI=inf2[,2], disThr=0.03), n=10)
## ----getValuesByUnique, echo=TRUE---------------------------------------------
dat <- 11:19
names(dat) <- letters[c(6:3,2:4,8,3)]
## Here the names are not unique.
## Thus, the values can be binned by their (non-unique) names and a representative values calculated.
## Let's make a 'datUniq' with the mean of each group of values :
datUniq <- round(tapply(dat, names(dat), mean),1)
## now we propagate the mean values to the full vector
getValuesByUnique(dat, datUniq)
cbind(ini=dat,firstOfRep=getValuesByUnique(dat, datUniq),
indexUniq=getValuesByUnique(dat, datUniq, asIn=TRUE))
## ----combineByEitherFactor, echo=TRUE-----------------------------------------
nn <- rep(c("a","e","b","c","d","g","f"),c(3,1,2,2,1,2,1))
qq <- rep(c("m","n","p","o","q"),c(2,1,1,4,4))
nq <- cbind(nn,qq)[c(4,2,9,11,6,10,7,3,5,1,12,8),]
## Here we consider 2 columns 'nn' and 'qq' whe trying to regroup common values
## (eg value 'a' from column 'nn' and value 'o' from 'qq')
combineByEitherFactor(nq, 1, 2, nBy=FALSE)
## ----combineByEitherFactor2, echo=TRUE----------------------------------------
## the same, but including n by group/cluster
combineByEitherFactor(nq, 1, 2, nBy=TRUE)
## Not running further iterations works faster, but you may not reach 'convergence' immediately
combineByEitherFactor(nq,1, 2, nBy=FALSE)
## ----combineByEitherFactor3, echo=TRUE----------------------------------------
## another example
mm <- rep(c("a","b","c","d","e"), c(3,4,2,3,1))
pp <- rep(c("m","n","o","p","q"), c(2,2,2,2,5))
combineByEitherFactor(cbind(mm,pp), 1, 2, con=FALSE, nBy=TRUE)
## ----multiCharReplace1, echo=TRUE---------------------------------------------
# replace character content
x1 <- c("ab","bc","cd","efg","ghj")
multiCharReplace(x1, cbind(old=c("bc","efg"), new=c("BBCC","EF")))
# works also on matrix and/or to replace numeric content :
x3 <- matrix(11:16, ncol=2)
multiCharReplace(x3, cbind(12:13,112:113))
## ----multiCharReplace2, echo=TRUE---------------------------------------------
# replace and return logical vactor
x2 <- c("High","n/a","High","High","Low")
multiCharReplace(x2,cbind(old=c("n/a","Low","High"), new=c(NA,FALSE,TRUE)), convTo="logical")
## ----multiMatch1, echo=TRUE---------------------------------------------------
aa <- c("m","k","j; aa","m; aa; bb; o","n; dd","aa","cc")
bb <- c("aa","dd","aa; bb; q","p; cc")
## result as list of indexes
(bOnA <- multiMatch(aa, bb, method="asIndex")) # match bb on aa
## more convenient to the human reader
(bOnA <- multiMatch(aa, bb)) # match bb on aa
(bOnA <- multiMatch(aa, bb, method="matchedL")) # match bb on aa
## ----compGlobPat1, echo=TRUE--------------------------------------------------
aa <- letters[rep(c(3:1,4), each=2)]
ab <- letters[rep(c(5,8:6), each=2)] # 'same general' pattern to aa
ac <- letters[c(1:2,1:3,3:4,4)] # NOT 'same general' pattern to any other
ad <- letters[c(6:8,8:6,7:6)] # NOT 'same general' pattern to any other
## ----compGlobPat2, echo=TRUE--------------------------------------------------
## get global patterns
cbind(aa= match(aa, unique(aa)),
ab= match(ab, unique(ab)),
ac= match(ac, unique(ac)),
ad= match(ad, unique(ad)) )
## ----compGlobPat3, echo=TRUE--------------------------------------------------
bb <- data.frame(ind=1:length(aa), a=aa, b=ab, c=ac, d=ad)
## ----compGlobPat4, echo=TRUE--------------------------------------------------
replicateStructure(bb)
## ----compGlobPat5, echo=TRUE--------------------------------------------------
replicateStructure(bb, method="combAll")
## ----compGlobPat6, echo=TRUE--------------------------------------------------
replicateStructure(bb, method="combNonOrth")
## ----checkSimValueInSer, echo=TRUE--------------------------------------------
va1 <- c(4:7,7,7,7,7,8:10) +(1:11)/28600
checkSimValueInSer(va1)
cbind(va=va1, simil=checkSimValueInSer(va1))
## ----findCloseMatch1, echo=TRUE-----------------------------------------------
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,15.9,13.5,15.7,14.1,5)
(cloMa <- findCloseMatch(x=aA, y=cC, com="diff", lim=0.5, sor=FALSE))
## ----closeMatchMatrix1, echo=TRUE---------------------------------------------
# all matches (of 2d arg) to/within limit for each of 1st arg ('x'); 'y' ..to 2nd arg = cC
# first let's display only one single closest/best hit
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=TRUE)) #
## ----closeMatchMatrix2, echo=TRUE---------------------------------------------
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=FALSE,origN=TRUE)) #
(maAa <- closeMatchMatrix(cloMa, cbind(valA=81:87, aA), cbind(valC=91:99, cC), colM=2,
colP=2, lim=FALSE))
(maAa <- closeMatchMatrix(cloMa, cbind(aA,valA=81:87), cC, lim=FALSE, deb=TRUE)) #
a2 <- aA; names(a2) <- letters[1:length(a2)]; c2 <- cC; names(c2) <- letters[10 +1:length(c2)]
(cloM2 <- findCloseMatch(x=a2, y=c2, com="diff", lim=0.5, sor=FALSE))
(maA2 <- closeMatchMatrix(cloM2, predM=cbind(valA=81:87, a2),
measM=cbind(valC=91:99, c2), colM=2, colP=2, lim=FALSE, asData=TRUE))
(maA2 <- closeMatchMatrix(cloM2, cbind(id=names(a2), valA=81:87,a2), cbind(id=names(c2),
valC=91:99,c2), colM=3, colP=3, lim=FALSE, deb=FALSE))
## ----findSimilFrom2sets, echo=TRUE--------------------------------------------
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,12.6,15.9,14.1)
aZ <- matrix(c(aA,aA+20), ncol=2, dimnames=list(letters[1:length(aA)],c("aaA","aZ")))
cZ <- matrix(c(cC,cC+20), ncol=2, dimnames=list(letters[1:length(cC)],c("ccC","cZ")))
findCloseMatch(cC, aA, com="diff", lim=0.5, sor=FALSE)
findSimilFrom2sets(aA, cC)
findSimilFrom2sets(cC, aA)
findSimilFrom2sets(aA, cC, best=FALSE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=5e4, deb=TRUE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=FALSE)
# below: find fewer 'best matches' since search window larger (ie more good hits compete !)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=TRUE)
## ----fusePairs, echo=TRUE-----------------------------------------------------
(daPa <- matrix(c(1:5,8,2:6,9), ncol=2))
fusePairs(daPa, maxFuse=4)
## ----elimCloseCoord1, echo=TRUE-----------------------------------------------
da1 <- matrix(c(rep(0:4,5),0.01,1.1,2.04,3.07,4.5), ncol=2); da1[,1] <- da1[,1]*99; head(da1)
elimCloseCoord(da1)
## ----stableMode, echo=TRUE----------------------------------------------------
set.seed(2012); dat <- round(c(rnorm(120,0,1.2), rnorm(80,0.8,0.6), rnorm(25,-0.6,0.05), runif(200)),3)
dat <- dat[which(dat > -2 & dat <2)]
stableMode(dat)
## ----stableMode2, fig.height=8, fig.width=9, fig.align="center", echo=TRUE----
layout(1:2)
plot(1:length(dat), sort(dat), type="l", main="Sorted Values", xlab="rank", las=1)
abline(h=stableMode(dat, silent=TRUE), lty=2,col=2)
legend("topleft",c("stableMode"), text.col=2, col=2, lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
plot(density(dat, kernel="gaussian", adjust=0.7), xlab="Value of dat", main="Density Estimate Plot")
useCol <- c("red","green","blue","grey55")
legend("topleft",c("dens","binning","BBmisc","allModes"), text.col=useCol, col=useCol,
lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
abline(v=stableMode(dat, method="dens", silent=TRUE), lty=2, col="red", lwd=2)
abline(v=stableMode(dat, method="binning", silent=TRUE), lty=2, col="green")
abline(v=stableMode(dat, method="BBmisc", silent=TRUE), lty=2, col="blue")
abline(v=stableMode(dat, method="allModes"), lty=2, col="grey55")
## ----stableMode3, echo=TRUE---------------------------------------------------
set.seed(2021)
x <- sample(letters, 50000, replace=TRUE)
stableMode(dat, method="mode")
stableMode(dat, method="allModes")
## ----trimRedundText1, echo=TRUE-----------------------------------------------
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1)
## ----keepCommonText1, echo=TRUE-----------------------------------------------
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1, side="left") # remove redundant
keepCommonText(txt1, side="terminal") # keep redundant
keepCommonText(txt1, side="center") # computationally easier
## ----keepCommonText2, echo=TRUE-----------------------------------------------
txt2 <- c("abcd_abc_kjh", "bcd_abc123", "cd_abc_po")
keepCommonText(txt2, side="center")
## ----rmEnumeratorName1, echo=TRUE---------------------------------------------
xx <- c("hg_Re1","hjRe2_Re2","hk-Re3_Re33")
rmEnumeratorName(xx)
rmEnumeratorName(xx, newSep="--")
rmEnumeratorName(xx, incl="anyCase")
## ----rmEnumeratorName2, echo=TRUE---------------------------------------------
xy <- cbind(a=11:13, b=c("11#11","2_No2","333_samp333"), c=xx)
rmEnumeratorName(xy)
rmEnumeratorName(xy,incl=c("anyCase","trim2","rmEnumL"))
## ----rmEnumeratorName3, echo=TRUE---------------------------------------------
xz <- cbind(a=11:13, b=c("23#11","4#2","567#333"), c=xx)
apply(xz, 2, rmEnumeratorName, sepEnum=c("","_"), newSep="_", silent=TRUE)
## ----unifyEnumerator1, echo=TRUE----------------------------------------------
unifyEnumerator(c("ab-1","ab-2","c-3"))
unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
unifyEnumerator(c("ab-1","c3-2","dR3"), stringentMatch=FALSE)
## ----adjustDecPrefix1, echo=TRUE----------------------------------------------
adjustDecPrefix(c("10.psec","2 fsec"), unit="sec")
adjustDecPrefix(c("10.psec abc","2 fsec etc"), unit="sec")
## ----mergeVectors1, echo=TRUE-------------------------------------------------
x1 <- c(a=1, b=11, c=21)
x2 <- c(b=12, c=22, a=2)
x3 <- c(a=3, d=43)
mergeVectors(vect1=x1, vect2=x2, vect3=x3)
mergeVectors(vect1=x1, vect2=x2, vect3=x3, inclInfo=TRUE) # return list with additional info
## ----mergeVectors2, echo=TRUE-------------------------------------------------
x11 <- c(Noa=1, Numberb=11, Samplec=21)
x12 <- c(Nob=12, Numberc=22, Samplea=2)
x13 <- c(Numbera=3, d=43)
mergeVectors(vect1=x11, vect2=x12, vect3=x13)
## ----mergeVectors3, echo=TRUE-------------------------------------------------
x4 <- 41:44 # no names - not conform for merging
mergeVectors(x1, x2, x3, x4)
## ----matchMatrixLinesToRef1, echo=TRUE----------------------------------------
## Note : columns b and e allow non-ambigous match, not all elements of e are present in a
mat0 <- cbind(a=c("mvvk","axxd","bxxd","vv"),b=c("iwwy","iyyu","kvvh","gxx"), c=rep(9,4),
d=c("hgf","hgf","vxc","nvnn"), e=c("_vv_","_ww_","_xx_","_yy_"))
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[,5])
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[1:3,5], inclInfo=TRUE)
matchMatrixLinesToRef(mat0[,-2], ref=mat0[,2], inclInfo=TRUE) # needs 'reverse grep'
## ----orderMatrToRef1, echo=TRUE-----------------------------------------------
mat1 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], rep(1:5)), ncol=3)
orderMatrToRef(mat1, paste0(letters[c(3,4,5,3,4)],c(1,3,5,2,4)))
mat2 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3:5 )), ncol=3)
orderMatrToRef(mat2, paste0(letters[c(3,4,5,3,4)],c(1,3,5,1,4)))
mat3 <- matrix(paste0(letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3,3,5 )), ncol=3)
orderMatrToRef(mat3, paste0("__",letters[c(3,4,5,3,4)],c(1,3,5,1,3)))
## ----concatMatch1, echo=TRUE--------------------------------------------------
## simple example without concatenations or text-extensions
x0 <- c("ZZ","YY","AA","BB","DD","CC","D")
tab0 <- c("AA","BB,E","CC","FF,U")
match(x0, tab0)
concatMatch(x0, tab0) # same result as match(), but with names
## now let's construct somthing similar but with concatenations and text-extensions
x1 <- c("ZZ","YY","AA","BB-2","DD","CCdef","Dxy") # modif of single ID (no concat)
tab1 <- c("AA","WW,Vde,BB-5,E","CCab","FF,Uef")
match(x1, tab1) # match finds only the 'simplest' case (ie "AA")
concatMatch(x1, tab1) # finds all hits as in example above
x2 <- c("ZZ,Z","YY,Y","AA,Z,Y","BB-2","DD","X,CCdef","Dxy") # conatenated in 'x'
tab2 <- c("AA","WW,Vde,BB-5,E","CCab,WW","FF,UU")
concatMatch(x2, tab2) # concatenation in both 'x' and 'table'
## ----checkStrictOrder1, echo=TRUE---------------------------------------------
set.seed(2005); mat1 <- rbind(matrix(round(runif(40),1),nc=4), rep(1,4))
head(mat1)
checkStrictOrder(mat1); mat1[which(checkStrictOrder(mat1)[,2]==0),]
## ----checkGrpOrder1, echo=TRUE------------------------------------------------
head(mat1)
checkGrpOrder(mat1)
checkGrpOrder(mat1, revRank=FALSE) # only constant 'up' tested
## ----linModelSelect1, echo=TRUE-----------------------------------------------
li1 <- rep(c(4,3,3:6), each=3) + round(runif(18)/5,2)
names(li1) <- paste0(rep(letters[1:5], each=3), rep(1:3,6))
li2 <- rep(c(6,3:7), each=3) + round(runif(18)/5, 2)
dat2 <- rbind(P1=li1, P2=li2)
exp2 <- rep(c(11:16), each=3)
exp4 <- rep(c(3,10,30,100,300,1000), each=3)
## Check & plot for linear model
linModelSelect("P1", dat2, expect=exp2)
linModelSelect("P2", dat2, expect=exp2)
## ----plotLinModelCoef1, echo=TRUE---------------------------------------------
set.seed(2020)
x1 <- matrix(rep(c(2,2:5),each=20) + runif(100) +rep(c(0,0.5,2:3,5),20),
byrow=FALSE, ncol=10, dimnames=list(LETTERS[1:10],NULL))
## just the 1st regression :
summary(lm(b~a, data=data.frame(b=x1[,1], a=rep(1:5,each=2))))
## all regressions
x1.lmSum <- t(sapply(lapply(rownames(x1), linModelSelect, dat=x1,
expect=rep(1:5,each=2), silent=TRUE, plotGraph=FALSE),
function(x) c(x$coef[2,c(4,1)], startFr=x$startLev)))
x1.lmSum <- cbind(x1.lmSum, medQuantity=apply(x1,1,median))
x1.lmSum[,1] <- log10(x1.lmSum[,1])
head(x1.lmSum)
## ----plotLinModelCoef2, echo=TRUE---------------------------------------------
wrGraphOK <- requireNamespace("wrGraph", quietly=TRUE) # check if package is available
if(wrGraphOK) wrGraph::plotW2Leg(x1.lmSum, useCol=c("Pr(>|t|)","Estimate","medQuantity","startFr"),
legendloc="topleft", txtLegend="start at")
## ----ratioAllComb0, echo=TRUE-------------------------------------------------
set.seed(2014); ra1 <- c(rnorm(9,2,1), runif(8,1,2))
## ----ratioAllComb1, echo=TRUE-------------------------------------------------
median(ra1[1:9]) / median(ra1[10:17])
## ----ratioAllComb2, echo=TRUE-------------------------------------------------
summary( ratioAllComb(ra1[1:9], ra1[10:17]))
boxplot(list(norm=ra1[1:9], unif=ra1[10:17], rat=ratioAllComb(ra1[1:9],ra1[10:17])))
## ----combineAsN1, echo=TRUE---------------------------------------------------
tm1 <- list(a1=LETTERS[1:7], a2=LETTERS[3:9], a3=LETTERS[6:10], a4=LETTERS[8:12])
combineAsN(tm1, nCombin=3, lev=gl(1,4))[,1,]
## ----combineAsN2, echo=TRUE---------------------------------------------------
## different levels/groups in list-elements
tm4 <- list(a1=LETTERS[1:15], a2=LETTERS[3:16], a3=LETTERS[6:17], a4=LETTERS[8:19],
b1=LETTERS[5:19], b2=LETTERS[7:20], b3=LETTERS[11:24], b4=LETTERS[13:25], c1=LETTERS[17:26],
d1=LETTERS[4:12], d2=LETTERS[5:11], d3=LETTERS[6:12], e1=LETTERS[7:10])
te4 <- combineAsN(tm4, nCombin=4, lev=substr(names(tm4),1,1))
str(te4)
te4[,,1] # the counts part only
## ----readCsvBatch, echo=TRUE--------------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("pl01_1.csv","pl01_2.csv","pl02_1.csv","pl02_2.csv")
datAll <- readCsvBatch(fiNa, path1, silent=TRUE)
str(datAll)
## ----readCsvBatch2, echo=TRUE-------------------------------------------------
## batch reading of all csv files in specified path :
datAll2 <- readCsvBatch(fileNames=NULL, path=path1, silent=TRUE)
str(datAll2)
## ----readTabulatedBatch1, echo=TRUE-------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("a1.txt","a2.txt")
allTxt <- readTabulatedBatch(fiNa, path1)
str(allTxt)
## ----readVarColumns, echo=TRUE------------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- "Names1.tsv"
datAll <- readVarColumns(fiName=file.path(path1,fiNa), sep="\t")
str(datAll)
## ----readGit1, echo=TRUE------------------------------------------------------
## An example url with tabulated data :
url1 <- "https://github.com/bigbio/proteomics-metadata-standard/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv"
gitDataUrl(url1)
## ----readGit2, echo=TRUE------------------------------------------------------
dataPxd <- try(read.delim(gitDataUrl(url1), sep='\t', header=TRUE))
str(dataPxd)
## ----presenceGrpFilt1, echo=TRUE----------------------------------------------
dat1 <- matrix(1:56,ncol=7)
dat1[c(2,3,4,5,6,10,12,18,19,20,22,23,26,27,28,30,31,34,38,39,50,54)] <- NA
grp1 <- gl(3,3)[-(3:4)]
dat1
## now let's filter
presenceGrpFilt(dat1, gr=grp1, presThr=0.75) # stringent
presenceGrpFilt(dat1, gr=grp1, presThr=0.25) # less stringent
## ----presenceFilt, echo=TRUE--------------------------------------------------
presenceFilt(dat1, gr=grp1, maxGr=1, ratM=0.1)
presenceFilt(dat1, gr=grp1, maxGr=2, rat=0.5)
## ----cleanReplicates, echo=TRUE-----------------------------------------------
(mat3 <- matrix(c(19,20,30,40, 18,19,28,39, 16,14,35,41, 17,20,30,40), ncol=4))
cleanReplicates(mat3, nOutl=1)
cleanReplicates(mat3, nOutl=3)
## ----normalizeThis0, echo=TRUE------------------------------------------------
set.seed(2015); rand1 <- round(runif(300) +rnorm(300,0,2),3)
dat1 <- cbind(ser1=round(100:1 +rand1[1:100]), ser2=round(1.2*(100:1 +rand1[101:200]) -2),
ser3=round((100:1 +rand1[201:300])^1.2-3))
dat1 <- cbind(dat1, ser4=round(dat1[,1]^seq(2,5,length.out=100) +rand1[11:110],1))
## Let's introduce some NAs
dat1[dat1 <1] <- NA
## Let's get a quick overview of the data
summary(dat1)
## some selected lines (indeed, the 4th column appears always much higher)
dat1[c(1:5,50:54,95:100),]
## ----normalizeThis1, echo=TRUE------------------------------------------------
no1 <- normalizeThis(dat1, refGrp=1:3, meth="mean")
no2 <- normalizeThis(dat1, refGrp=1:3, meth="trimMean", trim=0.4)
no3 <- normalizeThis(dat1, refGrp=1:3, meth="median")
no4 <- normalizeThis(dat1, refGrp=1:3, meth="slope", quantFa=c(0.2,0.8))
## ----normalizeThis_plot1, echo=FALSE,eval=TRUE--------------------------------
boxplot(dat1, main="raw data", las=1)
## ----normalizeThis_plot2, echo=FALSE,eval=TRUE--------------------------------
layout(matrix(1:4, ncol=2))
boxplot(no1, main="mean normalization", las=1)
boxplot(no2, main="trimMean normalization", las=1)
boxplot(no3, main="median normalization", las=1)
boxplot(no4, main="slope normalization", las=1)
## ----rowNormalize1, echo=TRUE-------------------------------------------------
set.seed(2); AA <- matrix(rbinom(110, 10, 0.05), nrow=10)
AA[,4:5] <- AA[,4:5] *rep(4:3, each=nrow(AA))
AA1 <- rowNormalize(AA)
round(AA1, 2)
## ----rowNormalize2, echo=TRUE-------------------------------------------------
AC <- AA
AC[which(AC <1)] <- NA
(AC1 <- rowNormalize(AC))
## ----rowNormalize3, echo=TRUE-------------------------------------------------
(AC3 <- rowNormalize(AC, refLines=1:5, omitNonAlignable=TRUE))
## ----coordOfFilt1, echo=TRUE--------------------------------------------------
set.seed(2021); ma1 <- matrix(sample.int(n=40, size=27, replace=TRUE), ncol=9)
## let's test which values are >37
which(ma1 >37) # doesn't tell which row & col
coordOfFilt(ma1, ma1 >37)
## ----rnormW1, echo=TRUE-------------------------------------------------------
## some sample data :
x1 <- (11:16)[-5]
mean(x1); sd(x1)
## ----rnormW2, echo=TRUE-------------------------------------------------------
## the standard way for gerenating normal random values
ra1 <- rnorm(n=length(x1), mean=mean(x1), sd=sd(x1))
## In particular with low n, the random values deviate somehow from expected mean and sd :
mean(ra1) -mean(x1)
sd(ra1) -sd(x1)
## ----rnormW3, echo=TRUE-------------------------------------------------------
## random numbers with close fit to expected mean and sd :
ra2 <- rnormW(length(x1), mean(x1), sd(x1))
mean(ra2) -mean(x1)
sd(ra2) -sd(x1) # much closer to expected value
## ----moderTest2grp, echo=TRUE-------------------------------------------------
set.seed(2017); t8 <- matrix(round(rnorm(1600,10,0.4),2), ncol=8,
dimnames=list(paste("l",1:200), c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2]+3 # augment lines 3:6 for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6]+3 # augment lines 5:8 for AA2&BB2 (c,d,g,h should be found)
t4 <- log2(t8[,1:4]/t8[,5:8])
fit4 <- moderTest2grp(t4, gl(2,2))
## now we'll use limma's topTable() function to look at the 'best' results
if("list" %in% mode(fit4)) { # if you have limma installed we can look further
library(limma)
topTable(fit4, coef=1,n=5) # effect for 3,4,7,8
fit4in <- moderTest2grp(t4, gl(2,2), testO="<")
if("list" %in% mode(fit4in)) topTable(fit4in, coef=1,n=5) }
## ----moderTestXgrp, echo=TRUE-------------------------------------------------
grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
dimnames=list(paste(letters[], rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
## expect to find C/A in c,d,g, (h)
## expect to find C/D in c,d,e,f
## expect to find A/D in f,g,(h)
test8 <- moderTestXgrp(t8, grp)
head(test8$p.value, n=8)
## ----pVal2lfdr, echo=TRUE-----------------------------------------------------
set.seed(2017); t8 <- matrix(round(rnorm(160,10,0.4),2), ncol=8, dimnames=list(letters[1:20],
c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f) for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6] +3 # augment lines 5:8 (e-h) for AA2&BB2 (c,d,g,h should be found)
head(pVal2lfdr(apply(t8, 1, function(x) t.test(x[1:4], x[5:8])$p.value)))
## ----fcCI, echo=TRUE----------------------------------------------------------
set.seed(2022); ran <- rnorm(50)
confInt(ran, alpha=0.05)
## plot points and confindence interval of mean
plot(ran, jitter(rep(1, length(ran))), ylim=c(0.95, 1.05), xlab="random variable 'ran'",main="Points and Confidence Interval of Mean (alpha=0.05)", ylab="", las=1)
points(mean(ran), 0.97, pch=3, col=4) # mean
lines(mean(ran) +c(-1, 1) *confInt(ran, 0.05), c(0.97, 0.97), lwd=4, col=4) # CI
legend("topleft","95% conficence interval of mean", text.col=4,col=4,lty=1,lwd=1,seg.len=1.2,cex=0.9,xjust=0,yjust=0.5)
## ----matchSampToPairw, echo=TRUE----------------------------------------------
## make example if limma is not installed
if(!requireNamespace("limma", quietly=TRUE)) test8 <- list(FDR=matrix(1, nrow=2, ncol=3, dimnames=list(NULL,c("A-C","A-D","C-D"))))
matchSampToPairw(unique(grp), colnames(test8$FDR))
## ----pairWiseConc1, echo=TRUE-------------------------------------------------
mat1 <- matrix(1:8, nrow=2, dimnames=list(NULL, paste0(1:4,"-",6:9)))
numPairDeColNames(mat1)
## ----replicateStructure1, echo=TRUE-------------------------------------------
## column a is all different, b is groups of 2,
## c & d are groups of 2 nut NOT 'same general' pattern as b
strX <- data.frame(a=letters[18:11], b=letters[rep(c(3:1,4), each=2)],
c=letters[rep(c(5,8:6), each=2)], d=letters[c(1:2,1:3,3:4,4)],
e=letters[rep(c(4,8,4,7),each=2)], f=rep("z",8) )
strX
replicateStructure(strX[,1:2])
replicateStructure(strX[,1:4], method="combAll")
replicateStructure(strX[,1:4], method="combAll", exclNoRepl=FALSE)
replicateStructure(strX[,1:4], method="combNonOrth", exclNoRepl=TRUE)
replicateStructure(strX, method="lowest")
## ----std1, echo=TRUE----------------------------------------------------------
dat <- matrix(2*round(runif(100),2), ncol=4)
mean(dat); sd(dat)
datS <- scale(dat)
apply(datS, 2, sd)
# each column was teated separately
mean(datS); sd(datS); range(datS)
# the mean is almost 0.0 and the sd almost 1.0
datB <- scale(dat, center=TRUE, scale=FALSE)
mean(datB); sd(datB); range(datB) # mean is almost 0
## ----std2, echo=TRUE----------------------------------------------------------
datS2 <- standardW(dat)
apply(datS2, 2, sd)
summary(datS2)
mean(datS2); sd(datS2)
datS3 <- standardW(dat, byColumn=TRUE)
apply(datS3, 2, sd)
summary(datS3)
mean(datS3); sd(datS3)
## ----scale1, echo=TRUE--------------------------------------------------------
datR2 <- apply(dat, 2, scaleXY, 1, 100)
summary(datR2); sd(datR2)
## ----clu01, echo=TRUE---------------------------------------------------------
nGr <- 3
irKm <- stats::kmeans(iris[,1:4], nGr, nstart=nGr*4) # no need to standardize
table(irKm$cluster, iris$Species)
#wrGraph::plotPCAw(t(as.matrix(iris[,1:4])), sampleGrp=irKm,colBase=irKm$cluster,useSymb=as.numeric(as.factor(iris$Species)))
## ----clu02, echo=TRUE---------------------------------------------------------
## sort results by cluster number
head(reorgByCluNo(iris[,-5], irKm$cluster))
tail(reorgByCluNo(iris[,-5], irKm$cluster))
## ----clu03, echo=TRUE---------------------------------------------------------
## median an CV
ir2 <- reorgByCluNo(iris[,-5], irKm$cluster, addInfo=FALSE, retList=TRUE)
## ----clu04, echo=TRUE---------------------------------------------------------
sapply(ir2, function(x) apply(x, 2, median))
## ----clu05, echo=TRUE---------------------------------------------------------
sapply(ir2, colSds)
## ----filterNetw0, echo=TRUE---------------------------------------------------
lst2 <- list('121'=data.frame(ID=as.character(c(141,221,228,229,449)),11:15),
'131'=data.frame(ID=as.character(c(228,331,332,333,339)),11:15),
'141'=data.frame(ID=as.character(c(121,151,229,339,441,442,449)),c(11:17)),
'151'=data.frame(ID=as.character(c(449,141,551,552)),11:14),
'161'=data.frame(ID=as.character(171),11),
'171'=data.frame(ID=as.character(161),11),
'181'=data.frame(ID=as.character(881:882),11:12) )
## ----filterNetw1, echo=TRUE---------------------------------------------------
(nw1 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=FALSE))
## ----filterNetw2, echo=TRUE---------------------------------------------------
(nw2 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=TRUE))
## ----filterNetw3, echo=TRUE---------------------------------------------------
(nw3 <- filterNetw(lst2, limInt=20, sandwLim=14, remOrphans=TRUE))
## ----propMatr1, echo=TRUE-----------------------------------------------------
pairs3L <- matrix(LETTERS[c(1,3,3, 2,2,1)], ncol=2) # loop of 3
(netw13pr <- pairsAsPropensMatr(pairs3L)) # as prop matr
## ----contribToContigPerFrag, echo=TRUE----------------------------------------
path1 <- matrix(c(17,19,18,17, 4,4,2,3), ncol=2,
dimnames=list(c("A/B/C/D","A/B/G/D","A/H","A/H/I"), c("sumLen","n")))
contribToContigPerFrag(path1)
## ----simpleFragFig, echo=TRUE-------------------------------------------------
frag1 <- cbind(beg=c(2,3,7,13,13,15,7,9,7, 3,3,5), end=c(6,12,8,18,20,20,19,12,12, 4,5,7))
rownames(frag1) <- letters[1:nrow(frag1)]
simpleFragFig(frag1)
## ----countSameStartEnd, echo=TRUE---------------------------------------------
countSameStartEnd(frag1)
## ----pasteC, echo=TRUE--------------------------------------------------------
pasteC(1:4)
pasteC(letters[1:4],quoteC="'")
## ----color-gradient1, echo=TRUE-----------------------------------------------
set.seed(2015); dat1 <- round(runif(15),2)
plot(1:15, dat1, pch=16, cex=2, las=1, col=colorAccording2(dat1),
main="Color gradient according to value in y")
# Here we modify the span of the color gradient
plot(1:15, dat1, pch=16, cex=2, las=1,
col=colorAccording2(dat1, nStartO=0, nEndO=4, revCol=TRUE), main="blue to red")
# It is also possible to work with scales of transparency
plot(1:9, pch=3, las=1)
points(1:9, 1:9, col=transpGraySca(st=0, en=0.8, nSt=9,trans=0.3), cex=42, pch=16)
## ----convColorToTransp, fig.height=6, fig.width=3, echo=TRUE------------------
col0 <- c("#998FCC","#5AC3BA","#CBD34E","#FF7D73")
col1 <- convColorToTransp(col0,alph=0.7)
layout(1:2)
pie(rep(1,length(col0)), col=col0, main="no transparency")
pie(rep(1,length(col1)), col=col1, main="new transparency")
## ----sysDate1, echo=TRUE------------------------------------------------------
## To get started
Sys.Date()
## Compact English names (in European order), no matter what your local settings are :
sysDate()
## ----DateTab, echo=TRUE-------------------------------------------------------
tabD <- cbind(paste0("univ",1:6), c(sysDate(style="univ1"), sysDate(style="univ2"),
sysDate(style="univ3"), sysDate(style="univ4"), as.character(sysDate(style="univ5")),
sysDate(style="univ6")), paste0(" local",1:6),
c(sysDate(style="local1"), sysDate(style="local2"), sysDate(style="local3"),
sysDate(style="local4"), sysDate(style="local5"), sysDate(style="local6")))
knitr::kable(tabD, caption="Various ways of writing current date")
## ----sessionInfo, echo=FALSE--------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/inst/doc/doc/wrMiscVignette1.R
|
---
title: "Getting started with wrMisc"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrMiscVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains a collection of various (low-level) tools which may be of general interest.
These functions were accumulated over a number of years of data-wrangling when treating high-throughput data from biomedical applications.
Besides, these functions are further used/integrated in more specialized functions dedicated to specific applications in the packages [wrProteo](https://CRAN.R-project.org/package=wrProteo), [wrGraph](https://CRAN.R-project.org/package=wrGraph) or [wrTopDownFrag](https://CRAN.R-project.org/package=wrTopDownFrag).
All these packages are available on [CRAN](https://cran.r-project.org/).
If you are not familiar with [R](https://www.r-project.org/) you may find many introductory documents on the official R-site
in [contributed documents](https://cran.r-project.org/other-docs.html) or under [Documentation/Manuals](https://cran.r-project.org/manuals.html).
Of course, numerous other documents/sites with tutorials and courses exist, too.
### Dependencies and Compilation
One of the aims was to write a package easy to install, with low system requirements and few obligatory dependencies.
All code is written in pure R and does not need any special compilers.
The number of obligatory dependencies was kept to a minumum.
Most of additional packages used in some of the functions were declared as 'suggested' (ie not obligatory), to allow installation of _wrMisc_ even if some these additional packages can't be installed/compiled by the user's instance.
When a feature/function of one of the 'suggested' packages is about to be used, its presence/installation will be checked and,
only if found as missing, the user will be prompted a message inviting to install specific package(s) before using these specific functions.
This helps to avoid not being able installing this package at all if some dependencies may fail to get installed themselves.
### Installation And Loading
To get started, we need to install (if not yet installed) and load the package "[wrMisc](https://CRAN.R-project.org/package=wrMisc)" available from [CRAN](https://cran.r-project.org/).
```{r setup0, include=FALSE, echo=FALSE, messages=FALSE, warnings=FALSE}
suppressPackageStartupMessages({
library(wrMisc)
})
```
```{r install, echo=TRUE, eval=FALSE}
## If not already installed, you'll have to install the package first.
## This is the basic installation commande in R
install.packages("wrMisc")
```
Since the functions illustrated in this vignette require a number of the _suggested_ packages,
let's check if they are installed and add them (via a small function), if not yet installed.
```{r install2, echo=TRUE, eval=FALSE}
packages <- c("knitr", "rmarkdown", "BiocManager", "kableExtra", "boot", "data.tree", "data.table",
"fdrtool", "RColorBrewer", "Rcpp", "wrMisc", "wrGraph", "wrProteo")
checkInstallPkg <- function(pkg) { # install function
if(!requireNamespace(pkg, quietly=TRUE)) install.packages(pkg) }
## install if not yet present
sapply(packages, checkInstallPkg)
```
Finally, this package also uses the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
which has to be installed differently (see also help on [Bioconductor](https://bioconductor.org)):
```{r install3, echo=TRUE, eval=FALSE}
## Installation of limma
BiocManager::install("limma")
```
This vignette is also accessible from R command-line or on CRAN at [wrMisc](https://CRAN.R-project.org/package=wrMisc):
```{r install4, echo=TRUE, eval=FALSE}
## Now you can open this vignette out of R:
vignette("wrMiscVignette1", package="wrMisc")
```
Before using the functions of this package, we actually need to load the package first (best on a fresh R-session):
```{r setup1}
library("wrMisc")
library("knitr")
## This is 'wrMisc' version number :
packageVersion("wrMisc")
```
## Speed Optimized Functions In The Package wrMisc
In high-throughput experiments in biology (like transcriptomics, proteomics etc...) many different features get measured a number if times (different samples like patients or evolution of a disease). The resulting data typically contain many (independent) rows (eg >1000 different genes or proteins who's abundance was measured) and much fewer columns that may get further organized in groups of replicates.
As R is a versatile language, multiple options exist for assessing the global characteristics of such data, some are more efficient on a computational point of view.
In order to allow fast treatment of very large data-sets some tools have been re-designed for optimal performance.
### Assessing Basic Information About Variability (for matrix)
Many measurement techniques applied in high throughput manner suffer from precision.
This means, the same measurements taken twice in a row (ie repeated on the same subject) will very likely not give an identical result.
For this reason it is common practice to make replicate measurements to i) estimate mean (ie representative) values and ii) asses the factors contributing to the variablity observed.
Briefly, technical replicates represent the case where multiple read-outs of the very same sample are generated and the resulting variability is associated to technical issues during the process of taking measures. Biological replicates represent independant samples and reflect therefore the varibility a given parameter may have in a certain population of individuals.
With the tools presented here, both technical and biological replicates can be dealt with.
In several cases the interpretation of the resulting numbers should consider the experimental setup, though.
Let's make a simple matrix as toy data:
```{r basicVariability, echo=TRUE}
grp1 <- rep(LETTERS[1:3], c(3,4,3))
sampNa1 <- paste0(grp1, c(1:3,1:4,1:3))
set.seed(2016); dat1 <- matrix(round(c(runif(50000) +rep(1:1000,50)),3),
ncol=10, dimnames=list(NULL,sampNa1))
dim(dat1)
head(dat1)
```
Now lets estimate the standard deviation _(sd)_ for every row:
```{r sdForEachRow, echo=TRUE}
head(rowSds(dat1))
system.time(sd1 <- rowSds(dat1))
system.time(sd2 <- apply(dat1, 1, sd))
```
On most systems the equivalent calculation using *apply()* will run much slower compared to `rowSds`.
Note, there is a minor issue with rounding :
```{r usingApply, echo=TRUE}
table(round(sd1, 13)==round(sd2, 13))
```
Similarly we can easily calculate the CV (coefficient of variation, ie sd / mean, see also [CV](https://en.wikipedia.org/wiki/Coefficient_of_variation)) for every row using `rowCVs` :
```{r calculateRowCV, echo=TRUE}
system.time(cv1 <- rowCVs(dat1))
system.time(cv2 <- apply(dat1, 1, sd) / rowMeans(dat1))
# typically the calculation using rowCVs is much faster
head(cv1)
# results from the 'conventional' way
head(cv2)
```
Note, these calculations will be very efficient as long as the number of rows is much higher (>>) than the number of columns.
### Data Organized In (Sub-)Groups As Sets Of Columns
Now, let's assume our data is contains 3 initial samples measured as several replicates (already defined in _grp1_).
Similarly, we can also calculate the sd or CV for each line while splitting into groups of replicates (functions `rowGrpMeans`, `rowGrpSds` and `rowGrpCV`):
```{r rowGrpMeans1, echo=TRUE}
# we already defined the grouping :
grp1
## the mean for each group and row
system.time(mean1Gr <- rowGrpMeans(dat1, grp1))
```
```{r sdOrCVbyGrp, echo=TRUE}
## Now the sd for each row and group
system.time(sd1Gr <- rowGrpSds(dat1, grp1))
# will give us a matrix with the sd for each group & line
head(sd1Gr)
# Let's check the results of the first line :
sd1Gr[1,] == c(sd(dat1[1,1:3]), sd(dat1[1,4:7]), sd(dat1[1,8:10]))
# The CV :
system.time(cv1Gr <- rowGrpCV(dat1, grp1))
head(cv1Gr)
```
#### Counting Number Of NAs Per Row And Group Of Columns
Some data, like with quantitative proteomics measures, may contain an elevated number of _NAs_ (see also the package [wrProteo](https://CRAN.R-project.org/package=wrProteo) for further options for dealing with such data).
Furthermore, many other packages on CRAN and Bioconductor cover this topic, see also the [missing data task-view](https://CRAN.R-project.org/view=MissingData) on CRAN.
Similar as above there is an easy way to count the number of _NAs_ to get an overview how NAs are distributed.
Let's assume we have measures from 3 groups/samples with 4 replicates each :
```{r rowGrpNA1, echo=TRUE}
mat2 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
mat2 <- matrix(mat2, ncol=12, byrow=TRUE)
## The definition of the groups (ie replicates)
gr4 <- gl(3, 4, labels=LETTERS[1:3])
```
Now we can easily count the number of NAs per row and set of replicates.
```{r rowGrpNA2, echo=TRUE}
rowGrpNA(mat2,gr4)
```
### Fast NA-omit For Very Large Objects
The function _na.omit()_ from the package _stats_ also keeps a trace of all omitted instances.
This can be penalizing in terms of memory usage when handling very large vectors with a high content of NAs (eg >10000 NAs).
If you don't need to document precisely which elements got eliminated, the function `naOmit()` may offer
smoother functioning for very large objects.
```{r naOmit, echo=TRUE}
aA <- c(11:13,NA,10,NA)
str(naOmit(aA))
# the 'classical' na.omit also stores which elements were NA
str(na.omit(aA))
```
### Minimum Distance/Difference Between Values
If you need to find the closest neighbour(s) of a numeric vector, the function `minDiff()` will tell you the
distance ("dif","ppm" or "ratio") and index ("best") of the closest neighbour.
In case of multiple shortest distances the index if the first one is reported, and the column "nbest" will display a value of >1.
```{r minDiff, echo=TRUE}
set.seed(2017); aa <- 10 *c(0.1 +round(runif(20),2), 0.53, 0.53)
head(aa)
minDiff(aa,ppm=FALSE)
```
When you look at the first line, the value of 10.2 has one single closest value which is 10.4,
which is located in line number 19 (the column 'best' gives the index of the best).
Line number 19 points back to line number 1.
You can see, that some elements (like 5.7) occure multiple times (line no 3 and 9), multiple occurences are counted in the column _ncur_.
This is why column _nbest_ for line 15 (_value_ =6.0) indicates that it appears twice as closest value _nbest_.
## Working With Lists (And Lists Of Lists) {#WorkingWithLists}
### Partial unlist
When input from different places gets collected and combined into a list, this may give a collection of different types of data.
The function `partUnlist()` will to preserve multi-column elements as they are (and just bring down one level):
```{r partUnlist_1, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
partUnlist(bb)
partUnlist(lapply(bb,.asDF2))
```
This won't be possible using _unlist()_.
```{r unlist_1, echo=TRUE}
head(unlist(bb, recursive=FALSE))
```
To uniform such data to obtain a list with one column only for each list-element, the function `asSepList()` provides help :
```{r asSepList, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
asSepList(bb)
```
### Appending/Combining Lists
Separate lists may be combined using the _append()_ command, which also allows treating simple vectors.
```{r lappend1, echo=TRUE}
li1 <- list(a=1, b=2, c=3)
li2 <- list(A=11, b=2, C=13)
append(li1, li2)
```
However, this way there is no checking if some of the list-elements are present in both lists and thus will appear twice.
The function `appendNR()` allows to checking if some list-elements will appear twice, and thus avoid such duplicate entries.
```{r lappend2, echo=TRUE}
appendNR(li1, li2)
```
### rbind On Lists
When a matrix (or data.frame) gets split into a list, like in the example using _by()_, as a reverse-function such lists can get joined using `lrbind()` in an _rbind_-like fashion.
```{r lrbind, echo=TRUE}
dat2 <- matrix(11:34, ncol=3, dimnames=list(letters[1:8], colnames=LETTERS[1:3]))
lst2 <- by(dat2, rep(1:3,c(3,2,3)), as.matrix)
lst2
# join list-elements (back) into single matrix
lrbind(lst2)
```
### Merge Multiple Matrices From List
When combining different datasets the function `mergeMatrixList()` allows merging multiple matrices (or data.frames) into a single matrix.
Two types of mode of operation are available : i) Returning only the common/shared elements (as defined by the rownames), this is default _mode='intersect'_ ;
alternatively one may ii) fuse/merge all matrices together without any loss of data (using _mode='union'_, additional _NA_s may appear when a given rowname is absent in one of the input matrices).
Furthermore, one may specifically select which columns should be used for fusing using the argument _useColumn_.
```{r mergeMatrixList, echo=TRUE}
mat1 <- matrix(11:18, ncol=2, dimnames=list(letters[3:6],LETTERS[1:2]))
mat2 <- matrix(21:28, ncol=2, dimnames=list(letters[2:5],LETTERS[3:4]))
mat3 <- matrix(31:38, ncol=2, dimnames=list(letters[c(1,3:4,3)],LETTERS[4:5]))
#
mergeMatrixList(list(mat1, mat2), useColumn="all")
# with custom names for the individual matrices
mergeMatrixList(list(m1=mat1, m2=mat2, mat3), mode="union", useColumn=2)
```
Similarly, separate entries may be merged using `mergeMatrices()` :
```{r mergeMatrices, echo=TRUE}
mergeMatrices(mat1, mat2)
mergeMatrices(mat1, mat2, mat3, mode="union", useColumn=2)
## custom names for matrix-origin
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=2)
## flexible/custom selection of columns
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=list(1,1:2,2))
```
### Fuse Content Of List-Elements With Redundant (Duplicated) Names
When list-elements have the same name, their content (of named numeric or character vectors)
may get fused using `fuseCommonListElem()` according to the names of the list-elements :
```{r fuseCommonListElem, echo=TRUE}
val1 <- 10 +1:26
names(val1) <- letters
(lst1 <- list(c=val1[3:6], a=val1[1:3], b=val1[2:3] ,a=val1[12], c=val1[13]))
## here the names 'a' and 'c' appear twice :
names(lst1)
## now, let's fuse all 'a' and 'c'
fuseCommonListElem(lst1)
```
### Filtering Lines And/Or Columns For All List-Elements Of Same Size
In a number of cases the information in various list-elements is somehow related.
Eg, in S3-objects produced by [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), or data produced using [wrProteo](https://CRAN.R-project.org/package=wrProteo) several instances of matrix or data.frame refer to data that are related.
Some matrixes may conatain abundance data (or weights, etc) while another matrix or data.frame may contain the annotation information related to each line of the abundance data.
So if one wants to filter the data, ie remove some lines, this should be done in the same way with all related list-elements.
This way one may maintain a conventient 1:1 matching of lines.
The function `filterLiColDeList()` searches if other list-elements have suitable dimensions and will then run the same filtering as in the 'target' list-element.
In consequence this can be used with the output of wrProteo to remove simultaneously the same lines and/or columns.
```{r listBatchReplace1, echo=TRUE}
lst1 <- list(m1=matrix(11:18, ncol=2), m2=matrix(21:30, ncol=2), indR=31:34,
m3=matrix(c(21:23,NA,25:27,NA), ncol=2))
filterLiColDeList(lst1, useLines=2:3)
filterLiColDeList(lst1, useLines="allNA", ref=3)
```
### Replacements In List
The function `listBatchReplace()` works similar to _sub()_ and allows to search & replace exact matches to a character string along all elements of a list.
```{r replInList1, echo=TRUE}
(lst1 <- list(aa=1:4, bb=c("abc","efg","abhh","effge"), cc=c("abdc","efg","efgh")))
listBatchReplace(lst1, search="efg", repl="EFG", silent=FALSE)
```
### Organize Values Into list and Sort By Names
Named numeric or character vectors can be organized into lists using `listGroupsByNames()`,
based on their names (only the part before any extensions starting with a point gets considered).
Of course, other separators may be defined using the argument _sep_.
```{r listGroupsByNames, echo=TRUE}
ser1 <- 1:7; names(ser1) <- c("AA","BB","AA.1","CC","AA.b","BB.e","A")
listGroupsByNames(ser1)
```
If no names are present, the content of the vector itself will be used as name :
```{r listGroupsByNames2, echo=TRUE}
listGroupsByNames((1:10)/5)
```
### Batch-filter List-Elements
In the view of object-oriented programming several methods produce results integrated into lists or S3-objects (eg
[limma](https://bioconductor.org/packages/release/bioc/html/limma.html)).
The function `filterList()` aims facilitating the filtering of all elements of lists or S3-objects.
List-elements with inappropriate number of lines will be ignored.
```{r filterList, echo=TRUE}
set.seed(2020); dat1 <- round(runif(80),2)
list1 <- list(m1=matrix(dat1[1:40], ncol=8), m2=matrix(dat1[41:80], ncol=8), other=letters[1:8])
rownames(list1$m1) <- rownames(list1$m2) <- paste0("line",1:5)
# Note: the list-element list1$other has a length different to that of filt. Thus, it won't get filtered.
filterList(list1, list1$m1[,1] >0.4) # filter according to 1st column of $m1 ...
filterList(list1, list1$m1 >0.4)
```
### Transform Columns Of Matrix To List Of Vectors
At some occasions it may be useful separate columns of a matrix into separate vectors inside a list.
This can be done using `matr2list()`:
```{r matr2list, echo=TRUE}
(mat1 <- matrix(1:12, ncol=3, dimnames=list(letters[1:4],LETTERS[1:3])))
str(matr2list(mat1))
```
## Working With Arrays {#WorkingWithArrays}
Let's get stared with a little toy-array:
```{r array0, echo=TRUE}
(arr1 <- array(c(6:4,4:24), dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""),c("ch1","ch2"))))
```
### CV (Coefficient Of Variance) With Arrays
Now we can obtain the CV (coefficient of variance) by splitting along 3rd dimesion (ie this is equivalent to an _apply_ along the 3rd dimension) using `arrayCV()`:
```{r arrayCV1, echo=TRUE}
arrayCV(arr1)
# this is equivalent to
cbind(rowCVs(arr1[,,1]), rowCVs(arr1[,,2]))
```
Similarly we can split along any other dimension, eg the 2nd dimension :
```{r arrayCV2, echo=TRUE}
arrayCV(arr1, byDim=2)
```
### Slice 3-dim Array In List Of Matrixes (Or Arrays)
This procedure is similar to (re-)organizing an initial array into clusters, here we split along a user-defined factor/vector.
If a clustering-algorithm produces the cluster assignments, this function can be used to organize the input data accordingly using `cutArrayInCluLike()`.
```{r cutArrayInCluLike, echo=TRUE}
cutArrayInCluLike(arr1, cluOrg=c(2,1,2,1))
```
Let's cut by filtering along the 3rd dimension for all lines where column 'col2' is >7, and then display only the content of columns 'col1' and 'col2' (using `filt3dimArr()`):
```{r filt3dimArr, echo=TRUE}
filt3dimArr(arr1, displCrit=c("col1","col2"), filtCrit="col2", filtVal=7, filtTy=">")
```
## Working With Redundant Data {#WorkingWithRedundantData}
$_Semantics_$ : Please note, that there are two ways of interpreting the term '**unique**' :
* In regular understanding one describes this way an event which occurs only once, and thus does not occur/happen anywhere else.
* The command `unique()` will eliminate redundant entries to obtain a shorter 'unique' output vector, ie in the resultant vector all values/content (values) occur only once.
However, from the result of _unique()_ you cannot tell any more which ones were not unique initially !
In some applications (eg proteomics) initial identifiers (IDs) may occur multiple times in the data and we frequently need to identify events/values that occur only once, as the first meaning of '_unique_'.
This package provides (additional) functions to easily distinguish values occurring just once (ie _unique_) from those occurring multiple times. Furthermore, there are functions to rename/remove/combine replicated elements, eg `correctToUnique()` or `nonAmbiguousNum()`, so that no elements or lines of data get lost.
### Identify What Is Repeated (and Where Repeated Do Occur)
```{r repeated1, echo=TRUE}
## some text toy data
tr <- c("li0","n",NA,NA, rep(c("li2","li3"),2), rep("n",4))
```
The function _table()_ (from the package _base_) is very useful get some insights when working with smaller objects, but may be slow to handle very large objects.
As mentioned, _unique()_ will make everything unique, and afterwards you won't know any more who was unique in the first place !
The function `duplicated()` (also from package base) helps us getting the information who is repeated.
```{r repeated2, echo=TRUE}
table(tr)
unique(tr)
duplicated(tr, fromLast=FALSE)
```
```{r repeated3, echo=TRUE}
aa <- c(11:16,NA,14:12,NA,14)
names(aa) <- letters[1:length(aa)]
aa
```
`findRepeated()` (from this package) will return the position/index (and content/value) of repeated elements. However, the output in form of a list is not very convenient to the human reader.
```{r findRepeated, echo=TRUE}
findRepeated(aa)
```
`firstOfRepeated()` tells the index of the first instance of repeated elements, which elements you need to make the vector 'unique', and which elements get stripped off when making unique.
Please note, that _NA_ (no matter if they occure once or more times) are automatically in the part suggested to be removed.
```{r firstOfRepeated, echo=TRUE}
firstOfRepeated(aa)
aa[firstOfRepeated(aa)$indUniq] # only unique with their names
unique(aa) # unique() does not return any names !
```
### Correct Vector To Unique (While Maintaining The Original Vector Length)
If necessary, a counter can be added to non-unique entries, thus no individual values get eliminated and the length and order of the resultant object maintains the same using `correctToUnique()`.
This is of importance when assigning rownames to a data.frame : Assigning redundant values/text as rownames of a data.frame will result in an error !
```{r correctToUnique1, echo=TRUE}
correctToUnique(aa)
correctToUnique(aa, sep=".", NAenum=FALSE) # keep NAs (ie without transforming to character)
```
You see from the last example above, that this function has an argument for controlling enumerating elements.
### Mark Any Duplicated (ie Ambiguous) Elements by Changing Their Names (and Separate from Unqiue)
First, the truly unique values are reported and then the first occurance of repeated elements is given, _NA_ instances get ignored.
This can be done using `nonAmbiguousNum()` which maintains the length of the initial character vector.
```{r nonAmbiguousNum, echo=TRUE}
unique(aa) # names are lost
nonAmbiguousNum(aa)
nonAmbiguousNum(aa, uniq=FALSE, asLi=TRUE) # separate in list unique and repeated
```
### Compare Multiple Vectors And Sort By Number Of Common/Repeated Values/Words
The main aim of the function `sortByNRepeated()` is allowing to compare multiple vectors for common values/words and providing an output sorted by number of repeats.
Suppose 3 persons are asked which cities they wanted to visit.
Then we would like to make a counting of the most frequently cited cities.
Here we consider individual choices as equally ranked.
By default intra-repeats are eliminated.
```{r sortByNRepeated, echo=TRUE}
cities <- c("Bangkok","London","Paris", "Singapore","New York City", "Istambul","Delhi","Rome","Dubai")
sortByNRepeated(x=cities[c(1:4)], y=cities[c(2:3,5:8)])
## or (unlimited) multiple inputs via list
choices1 <- list(Mary=cities[c(1:4)], Olivia=cities[c(2:3,5:8)], Paul=cities[c(5:3,9,5)]) # Note : Paul cited NYC twice !
table(unlist(choices1))
sortByNRepeated(choices1)
sortByNRepeated(choices1, filterIntraRep=FALSE) # without correcting multiple citation of NYC by Paul
```
### Combine Multiple Matrixes Where Some Column-Names Are The Same
Here, it is supposed that you want to join 2 or more matrixes describing different properties of the same collection of individuals (as rows).
Common column-names are interpreted that their respective information should be combined (either as average or as sum).
This can be done using `cbindNR()` :
```{r cbindNR, echo=TRUE}
## First we'll make soe toy data :
(ma1 <- matrix(1:6, ncol=3, dimnames=list(1:2,LETTERS[3:1])))
(ma2 <- matrix(11:16, ncol=3, dimnames=list(1:2,LETTERS[3:5])))
## now we can join 2 or more matrixes
cbindNR(ma1, ma2, summarizeAs="mean") # average of both columns 'C'
```
### Filter Matrix To Keep Only First Of Repeated Lines
This ressembles to the functioning of _unique()_, but applies to a user-specified column of the matrix.
```{r firstLineOfDat, echo=TRUE}
(mat1 <- matrix(c(1:6, rep(1:3,1:3)), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2])))
```
The function `firstLineOfDat()` allows to access/extract the first line of repeated instances.
```{r firstLineOfDat2, echo=TRUE}
firstLineOfDat(mat1, refCol=2)
```
This function was rather designed for dealing with character input, it allows concatenating all columns and to remove redundant.
```{r firstOfRepLines, echo=TRUE}
mat2 <- matrix(c("e","n","a","n","z","z","n","z","z","b",
"","n","c","n","","","n","","","z"), ncol=2)
firstOfRepLines(mat2, out="conc")
# or as index :
firstOfRepLines(mat2)
```
### Filter To Unique Column-Content Of Matrix, Add Counter And Concatenated Information
```{r nonredDataFrame, echo=TRUE}
(df1 <- data.frame(cbind(xA=letters[1:5], xB=c("h","h","f","e","f"), xC=LETTERS[1:5])))
```
The function `nonredDataFrame()` offers to include a counter of redundant instances encountered (for 1st column specified) :
```{r nonredDataFrame2, echo=TRUE}
nonredDataFrame(df1, useCol=c("xB","xC"))
# without counter or concatenating
df1[which(!duplicated(df1[,2])),]
# or
df1[firstOfRepLines(df1,useCol=2),]
```
### Get First Of Repeated By Column
```{r get1stOfRepeatedByCol, echo=TRUE}
mat2 <- cbind(no=as.character(1:20), seq=sample(LETTERS[1:15], 20, repl=TRUE),
ty=sample(c("full","Nter","inter"),20,repl=TRUE), ambig=rep(NA,20), seqNa=1:20)
(mat2uniq <- get1stOfRepeatedByCol(mat2, sortBy="seq", sortSupl="ty"))
# the values from column 'seq' are indeed unique
table(mat2uniq[,"seq"])
# This will return all first repeated (may be >1) but without furter sorting
# along column 'ty' neither marking in comumn 'ambig').
mat2[which(duplicated(mat2[,2],fromLast=FALSE)),]
```
### Transform (ambigous) Matrix To Non-ambiguous Matrix (In Respect To Given Column)
```{r nonAmbiguousMat, echo=TRUE}
nonAmbiguousMat(mat1,by=2)
```
Here another example, ambiguous will be marked by an '_' :
```{r nonAmbiguousMat2, echo=TRUE}
set.seed(2017); mat3 <- matrix(c(1:100,round(rnorm(200),2)), ncol=3,
dimnames=list(1:100,LETTERS[1:3]));
head(mat3U <- nonAmbiguousMat(mat3, by="B", na="_", uniqO=FALSE), n=15)
head(get1stOfRepeatedByCol(mat3, sortB="B", sortS="B"))
```
### Combine Replicates From List To Matrix
```{r combineReplFromListToMatr, echo=TRUE}
lst2 <- list(aa_1x=matrix(1:12, nrow=4, byrow=TRUE), ab_2x=matrix(24:13, nrow=4, byrow=TRUE))
combineReplFromListToMatr(lst2)
```
### Non-redundant Lines Of Matrix
```{r nonRedundLines, echo=TRUE}
mat4 <- matrix(rep(c(1,1:3,3,1),2), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2]))
nonRedundLines(mat4)
```
### Filter For Unique Elements /2
```{r filtSizeUniq, echo=TRUE}
# input: c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), filtUn=TRUE, minSi=NULL)
# here a,b,c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), ref=c(letters[c(1:26,1:3)],
"dd","dd","bb","ddd"), filtUn=TRUE, minSi=NULL)
```
### Make Non-redundant Matrix
```{r makeNRedMatr, echo=TRUE}
t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
matrix(round(runif(30,-3,2),1), nc=2), stringsAsFactors=FALSE)
# First we split the data.frame in list
by(t3,t3[,1],function(x) x)
t(sapply(by(t3,t3[,1],function(x) x), summarizeCols, me="maxAbsOfRef"))
(xt3 <- makeNRedMatr(t3, summ="mean", iniID="ref"))
(xt3 <- makeNRedMatr(t3, summ=unlist(list(X1="maxAbsOfRef")), iniID="ref"))
```
### Combine/Reduce Redundant Lines Based On Specified Column
```{r combineRedBasedOnCol, echo=TRUE}
matr <- matrix(c(letters[1:6],"h","h","f","e",LETTERS[1:5]), ncol=3,
dimnames=list(letters[11:15],c("xA","xB","xC")))
combineRedBasedOnCol(matr, colN="xB")
combineRedBasedOnCol(rbind(matr[1,],matr), colN="xB")
```
### Convert Matrix (eg With Redundant) Row-Names To data.frame
```{r convMatr2df, echo=TRUE}
x <- 1
dat1 <- matrix(1:10, ncol=2)
rownames(dat1) <- letters[c(1:3,2,5)]
## as.data.frame(dat1) ... would result in an error
convMatr2df(dat1)
convMatr2df(data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3))
tmp <- data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
tmp <- data.frame(a=as.character((1:3)/2), b=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
```
### Find And Combine Points Located Very Close In X/Y Space
```{r combineOverlapInfo, echo=TRUE}
set.seed(2013)
datT2 <- matrix(round(rnorm(200)+3,1), ncol=2, dimnames=list(paste("li",1:100,sep=""),
letters[23:24]))
# (mimick) some short and longer names for each line
inf2 <- cbind(sh=paste(rep(letters[1:4],each=26), rep(letters,4),1:(26*4),sep=""),
lo=paste(rep(LETTERS[1:4],each=26), rep(LETTERS,4), 1:(26*4), ",",
rep(letters[sample.int(26)],4), rep(letters[sample.int(26)],4), sep=""))[1:100,]
## We'll use this to test :
head(datT2, n=10)
## let's assign to each pair of x & y values a 'cluster' (column _clu_, the column _combInf_ tells us which lines/indexes are in this cluster)
head(combineOverlapInfo(datT2, disThr=0.03), n=10)
## it is also possible to rather display names (eg gene or protein-names) instead of index values
head(combineOverlapInfo(datT2, suplI=inf2[,2], disThr=0.03), n=10)
```
### Bin And Summarize Values According To Their Names
```{r getValuesByUnique, echo=TRUE}
dat <- 11:19
names(dat) <- letters[c(6:3,2:4,8,3)]
## Here the names are not unique.
## Thus, the values can be binned by their (non-unique) names and a representative values calculated.
## Let's make a 'datUniq' with the mean of each group of values :
datUniq <- round(tapply(dat, names(dat), mean),1)
## now we propagate the mean values to the full vector
getValuesByUnique(dat, datUniq)
cbind(ini=dat,firstOfRep=getValuesByUnique(dat, datUniq),
indexUniq=getValuesByUnique(dat, datUniq, asIn=TRUE))
```
### Regrouping Simultaneaously by Two Factors
For example, if you wish to create group-labels considering the eye- and hair-color of a small group students (supposed a sort of controlled vocabulary was used),
the function `combineByEitherFactor()` will help. So basically, this is an empiric segmentation-approach for two categorical variables.
Please note, that with large data-sets and very disperse data this approach will not provide great results.
In the example below we'll attempt to 'cluster' according to columns _nn_ and _qq_, the resultant cluster number can be found in column _grp_.
```{r combineByEitherFactor, echo=TRUE}
nn <- rep(c("a","e","b","c","d","g","f"),c(3,1,2,2,1,2,1))
qq <- rep(c("m","n","p","o","q"),c(2,1,1,4,4))
nq <- cbind(nn,qq)[c(4,2,9,11,6,10,7,3,5,1,12,8),]
## Here we consider 2 columns 'nn' and 'qq' whe trying to regroup common values
## (eg value 'a' from column 'nn' and value 'o' from 'qq')
combineByEitherFactor(nq, 1, 2, nBy=FALSE)
```
The argument _nBy_ simply allows adding an additional column with the group/cluster-number.
```{r combineByEitherFactor2, echo=TRUE}
## the same, but including n by group/cluster
combineByEitherFactor(nq, 1, 2, nBy=TRUE)
## Not running further iterations works faster, but you may not reach 'convergence' immediately
combineByEitherFactor(nq,1, 2, nBy=FALSE)
```
```{r combineByEitherFactor3, echo=TRUE}
## another example
mm <- rep(c("a","b","c","d","e"), c(3,4,2,3,1))
pp <- rep(c("m","n","o","p","q"), c(2,2,2,2,5))
combineByEitherFactor(cbind(mm,pp), 1, 2, con=FALSE, nBy=TRUE)
```
### Batch Replacing Of Values Or Character-Strings
The function `multiCharReplace()` facilitates multiple replacements in a vector, matrix or data.frame.
```{r multiCharReplace1, echo=TRUE}
# replace character content
x1 <- c("ab","bc","cd","efg","ghj")
multiCharReplace(x1, cbind(old=c("bc","efg"), new=c("BBCC","EF")))
# works also on matrix and/or to replace numeric content :
x3 <- matrix(11:16, ncol=2)
multiCharReplace(x3, cbind(12:13,112:113))
```
Sometimes data get imported using different encoding for what should be interpreted as _FALSE_ and _TRUE_ :
```{r multiCharReplace2, echo=TRUE}
# replace and return logical vactor
x2 <- c("High","n/a","High","High","Low")
multiCharReplace(x2,cbind(old=c("n/a","Low","High"), new=c(NA,FALSE,TRUE)), convTo="logical")
```
### Multi-to-multi Matching Of (Concatenated) Terms
The function allows to split (if necessary, using _strsplit()_) two vectors and compare each isolated tag (eg identifyer) from the 1st vector/object against each isolated tag from the second vector/object. This runs like a loop of one to many comparisons. The basic output is a list with indexes of which element of the 1st vector/object has matches in the 2nd vector/object. Since this is not convenient to the human reader, tabular output can be created, too.
```{r multiMatch1, echo=TRUE}
aa <- c("m","k","j; aa","m; aa; bb; o","n; dd","aa","cc")
bb <- c("aa","dd","aa; bb; q","p; cc")
## result as list of indexes
(bOnA <- multiMatch(aa, bb, method="asIndex")) # match bb on aa
## more convenient to the human reader
(bOnA <- multiMatch(aa, bb)) # match bb on aa
(bOnA <- multiMatch(aa, bb, method="matchedL")) # match bb on aa
```
### Comparing Global Patterns
In most programming languages it is fairly easy to compare _exact_ content of character vectors or factors with unordered levels.
However, sometimes - due to semantic issues - some people may call a color 'purple' while others call it 'violet'.
Thus, without using controled vocabulary the _exact_ terms may vary.
Here, let's address the case, where no dictionaries of controled vocabulary are available for substituting equivalent terms.
Thus, we'll compare 4 vectors of equal length and check if the words/letters used could be substituted to result in the first vector.
Vectors _aa_ and _ab_ have the same global pattern, ie after repeating a word twice it moves to another word.
Vectors _ac_ and _ad_ have different general patterns, either with alternating words or falling back to a word previsously used.
Based and extended on a post on stackoverflow [https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/](https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/) :
```{r compGlobPat1, echo=TRUE}
aa <- letters[rep(c(3:1,4), each=2)]
ab <- letters[rep(c(5,8:6), each=2)] # 'same general' pattern to aa
ac <- letters[c(1:2,1:3,3:4,4)] # NOT 'same general' pattern to any other
ad <- letters[c(6:8,8:6,7:6)] # NOT 'same general' pattern to any other
```
The basic pattern can be extracted combining match() and unique():
```{r compGlobPat2, echo=TRUE}
## get global patterns
cbind(aa= match(aa, unique(aa)),
ab= match(ab, unique(ab)),
ac= match(ac, unique(ac)),
ad= match(ad, unique(ad)) )
```
Let's make a data.frame with the annotation toy-data from above.
Each line is supposed to represent a sample, and the columns show different aspects of annotation.
```{r compGlobPat3, echo=TRUE}
bb <- data.frame(ind=1:length(aa), a=aa, b=ab, c=ac, d=ad)
```
Via the function `replicateStructure()` is it possible to compare annotation as different columns for equivalent global patterns.
By default, this function excludes all columns not designating any replicates, like the numbers in the first column ($ind).
Also it will try to find the column with the median number of levels, when comparing to all other columns.
The output is a list with *\$col* inidicating which column(s) may be used, *\$lev* for the correpsonding global pattern, *\$meth* for the method finally used and
_\$allCols_ for documenting the global pattern in each column (whether it was selected or not).
```{r compGlobPat4, echo=TRUE}
replicateStructure(bb)
```
Besides, it is also possible to combine all columns if one considers they contribute complementary substructures of the overal annotation.
```{r compGlobPat5, echo=TRUE}
replicateStructure(bb, method="combAll")
```
However, when combining multiple columns it may happen -like in the example above- that finally no more lines remain being considered as replicates.
This can also be found when one column describes the groups and another gives the order of the replicates therein.
However, for calling a (standard) statistical test it may be necessary exclude these replicate-numbers to designate the groups of replicates.
To overcome the problem of loosing the understanding of replicate-structure when combining all factors, it is possible to look for non-orthogonal structures,
ie to try excluding columns which (after combining) would suggest no replicates after combining all columns.
See the example below :
```{r compGlobPat6, echo=TRUE}
replicateStructure(bb, method="combNonOrth")
```
## Search For Similar (Numeric) Values {#SearchForSimilarNumericValues}
This section addresses values that are not truly _identical_ but may differ only in the very last digit(s)
and thus may be in a pragmatic view get considered and treated as 'about the same'.
The simplest approach would be to round values and then look for identical values.
The functions presented here (like `checkSimValueInSer()`) offer this type of search in a convenient way.
```{r checkSimValueInSer, echo=TRUE}
va1 <- c(4:7,7,7,7,7,8:10) +(1:11)/28600
checkSimValueInSer(va1)
cbind(va=va1, simil=checkSimValueInSer(va1))
```
### Find Similar Numeric Values Of Two Columns Of A Matrix
The search for similar values may be preformed as absolute distance or as 'ppm' (as it is eg usual in proteomics when comparing measured and theoretically expected mass).
```{r findCloseMatch1, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,15.9,13.5,15.7,14.1,5)
(cloMa <- findCloseMatch(x=aA, y=cC, com="diff", lim=0.5, sor=FALSE))
```
The result of _findCloseMatch()_ is a list organized by each 'x', telling all instances of 'y' found within the distance tolerance given by _lim_.
Using `closeMatchMatrix()` the result obtained above, can be presented in a more convenient format for the human eye.
```{r closeMatchMatrix1, echo=TRUE}
# all matches (of 2d arg) to/within limit for each of 1st arg ('x'); 'y' ..to 2nd arg = cC
# first let's display only one single closest/best hit
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=TRUE)) #
```
Using the argument _limitToBest=FALSE_ we can display all distances within the limits imposed, some values/points may occur multiple times.
For example, value number 4 of 'cC' (=12.5) or value number 3 of 'aA' (=13) now occur multiple times...
```{r closeMatchMatrix2, echo=TRUE}
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=FALSE,origN=TRUE)) #
(maAa <- closeMatchMatrix(cloMa, cbind(valA=81:87, aA), cbind(valC=91:99, cC), colM=2,
colP=2, lim=FALSE))
(maAa <- closeMatchMatrix(cloMa, cbind(aA,valA=81:87), cC, lim=FALSE, deb=TRUE)) #
a2 <- aA; names(a2) <- letters[1:length(a2)]; c2 <- cC; names(c2) <- letters[10 +1:length(c2)]
(cloM2 <- findCloseMatch(x=a2, y=c2, com="diff", lim=0.5, sor=FALSE))
(maA2 <- closeMatchMatrix(cloM2, predM=cbind(valA=81:87, a2),
measM=cbind(valC=91:99, c2), colM=2, colP=2, lim=FALSE, asData=TRUE))
(maA2 <- closeMatchMatrix(cloM2, cbind(id=names(a2), valA=81:87,a2), cbind(id=names(c2),
valC=91:99,c2), colM=3, colP=3, lim=FALSE, deb=FALSE))
```
### Find Similar Numeric Values From Two Vectors/Matrixes
For comparing two sets of data one may use `findSimilarFrom2sets()`.
```{r findSimilFrom2sets, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,12.6,15.9,14.1)
aZ <- matrix(c(aA,aA+20), ncol=2, dimnames=list(letters[1:length(aA)],c("aaA","aZ")))
cZ <- matrix(c(cC,cC+20), ncol=2, dimnames=list(letters[1:length(cC)],c("ccC","cZ")))
findCloseMatch(cC, aA, com="diff", lim=0.5, sor=FALSE)
findSimilFrom2sets(aA, cC)
findSimilFrom2sets(cC, aA)
findSimilFrom2sets(aA, cC, best=FALSE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=5e4, deb=TRUE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=FALSE)
# below: find fewer 'best matches' since search window larger (ie more good hits compete !)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=TRUE)
```
### Fuse Previously Identified Pairs To 'Clusters'
When you have already identified the closest neighbour of a set of values, you may want to
re-organize/fuse such pairs to a given number of total clusters (using `fusePairs()`).
```{r fusePairs, echo=TRUE}
(daPa <- matrix(c(1:5,8,2:6,9), ncol=2))
fusePairs(daPa, maxFuse=4)
```
### Eliminate Close (Overlapping) Points (In Bivariate x & y Space)
When visualizing larger data-sets in an x&y space one may find many points overlapping when their values are almost the same.
The function `elimCloseCoord()` aims to do reduce a bivariate data-set to 'non-overlapping' points, somehow similar to human perception.
```{r elimCloseCoord1, echo=TRUE}
da1 <- matrix(c(rep(0:4,5),0.01,1.1,2.04,3.07,4.5), ncol=2); da1[,1] <- da1[,1]*99; head(da1)
elimCloseCoord(da1)
```
### Mode Of (Continuous) Data
Looking for the _mode_ is rather easy with counting data, the result of _table()_ will get you there quickly.
However, with continuous data the mode may be more tricky to defne and identify.
Intuitively most people consider the mode asthe peak of a density estimation (which remains to be defined and estimated).
With continuous data most frequent (precise) value may be quite different/distant to the most dense region of data.
The function `stableMode()` presented here has different modes of operation, at this point there is no clear rule which mode may perform most satisfactory in different situations.
```{r stableMode, echo=TRUE}
set.seed(2012); dat <- round(c(rnorm(120,0,1.2), rnorm(80,0.8,0.6), rnorm(25,-0.6,0.05), runif(200)),3)
dat <- dat[which(dat > -2 & dat <2)]
stableMode(dat)
```
Now we can try to show on a plot :
```{r stableMode2, fig.height=8, fig.width=9, fig.align="center", echo=TRUE}
layout(1:2)
plot(1:length(dat), sort(dat), type="l", main="Sorted Values", xlab="rank", las=1)
abline(h=stableMode(dat, silent=TRUE), lty=2,col=2)
legend("topleft",c("stableMode"), text.col=2, col=2, lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
plot(density(dat, kernel="gaussian", adjust=0.7), xlab="Value of dat", main="Density Estimate Plot")
useCol <- c("red","green","blue","grey55")
legend("topleft",c("dens","binning","BBmisc","allModes"), text.col=useCol, col=useCol,
lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
abline(v=stableMode(dat, method="dens", silent=TRUE), lty=2, col="red", lwd=2)
abline(v=stableMode(dat, method="binning", silent=TRUE), lty=2, col="green")
abline(v=stableMode(dat, method="BBmisc", silent=TRUE), lty=2, col="blue")
abline(v=stableMode(dat, method="allModes"), lty=2, col="grey55")
```
Please note, that plotting data modelled via a Kernell function (as above) also relies on strong hypothesis which may not be well justified in a number of cases !
For this reason, the _sorted values_ were plotted, too.
As you can see from this example above, looking for the most frequent exact value may not be a perfect choice for continous data.
In this example the method _'allModes'_ (ie the multiple instances of most frequent exact values) gave partially usable results (dashed grey lines), due to the rounding to 3 digits.
As you can see in the example above, the method _'allModes'_ may give multiple ties !
More rounding will make to data more discrete and ultimately ressemble cunting data. However, with rounding some of the finer resolution/details will get lost.
### Most Frequently Occuring Value (traditional mode)
The function `stableMode()` can also be used to locate _the_ most frequently occuring exact value of numeric or character vectors.
As we just saw at the end of the previous example, the argument _method="allModes"_ allows finding all ties (if present).
```{r stableMode3, echo=TRUE}
set.seed(2021)
x <- sample(letters, 50000, replace=TRUE)
stableMode(dat, method="mode")
stableMode(dat, method="allModes")
```
## Text-Manipulations {#Text-Manipulations}
There are several packages offering interesting functions for manipulating text. Here are a few functions to complement these.
### Trimming Redundant Text
Automatic annotation has the tendency to concatenate many parameters into a single names.
The function `trimRedundText()` was designed to allow trimming redundant text from left and/or right side of a character-vector
(when the same portion of text appears in _each_ element).
However, as in some cases (like the first element of the example below) nothing would remain, it is possible to define a _minimum_ width for the remaining/resulting text.
```{r trimRedundText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1)
```
### Extract Common Part Of Text
The original idea was to do something resembling the inverse process of trimming redundant text (example above), but this time to discard the variable text.
In the end this is not as trivial when 'common' or 'redundant' text is not at the beginning or end of a chain of characters.
In particular with very large text this is an active field of research (eg for sequence alignment).
The function presented here is a very light-weight solution designed for smaller and simple settings, like inspecting column-names.
Furthermore, the function `keepCommonText()` only reports the first (longest) hit.
So, when there are multiple conserved 'words' of equal length, only the first of them will be identified.
When setting the argument 'hiResol=FALSE' this function has an option to decrease the resulution of searching, which in turn increases the speed, howevere, at cost of missing the optimal solution.
In this case the resultant chain of characters should be inspected if it can be further extended/optimized.
With terminal common text :
```{r keepCommonText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1, side="left") # remove redundant
keepCommonText(txt1, side="terminal") # keep redundant
keepCommonText(txt1, side="center") # computationally easier
```
With internal coomon text:
```{r keepCommonText2, echo=TRUE}
txt2 <- c("abcd_abc_kjh", "bcd_abc123", "cd_abc_po")
keepCommonText(txt2, side="center")
```
### Manipulating Enumerator-Extensions
Human operators may have many ways to write enumerators like 'xx_sample_1', 'xx_Sample_2', 'xx_s3', 'xx_4', etc.
Many times you may find such text as names or column-names for measures underneith.
The functions presented below will work only if _consistent numerators_, ie (text +) digit-character(s) are at the end of all character-strings to be treated.
Please note, that with large vectors testing/checking a larger panel of enumerator-abreviations may result in slower performance.
In cases of such larger data-sets it may be more effective to first study the data and then run simple subsitions using _sub()_ targeted for this very case.
#### Remove/Modify Enumerators
The aim of this function consists in identifying a _common_ pattern for terminal enumeratos (ie at end of words/character strings) and to subsequently modify or remove them.
As separator-symbols and separator-words are given indedently all combinations thereof may be tested.
Furthermore the user has the choice to (automatically) all truncated versions of separator-words (eg _Sam_ instead of _Sample_).
As basic setting `rmEnumeratorName()` allows to identify and then modify a _common_ terminal enumerator from all elements of a character string :
```{r rmEnumeratorName1, echo=TRUE}
xx <- c("hg_Re1","hjRe2_Re2","hk-Re3_Re33")
rmEnumeratorName(xx)
rmEnumeratorName(xx, newSep="--")
rmEnumeratorName(xx, incl="anyCase")
```
Furthermore, this function allows scanning a matrix of text-data and to perform similar operations to the _first_ column found containing a _common_ terminal enumerator.
```{r rmEnumeratorName2, echo=TRUE}
xy <- cbind(a=11:13, b=c("11#11","2_No2","333_samp333"), c=xx)
rmEnumeratorName(xy)
rmEnumeratorName(xy,incl=c("anyCase","trim2","rmEnumL"))
```
If you which to remove/subsitute mutiple types of enumerators the function \code{rmEnumeratorName} must be run independently, see last example below.
```{r rmEnumeratorName3, echo=TRUE}
xz <- cbind(a=11:13, b=c("23#11","4#2","567#333"), c=xx)
apply(xz, 2, rmEnumeratorName, sepEnum=c("","_"), newSep="_", silent=TRUE)
```
#### Unify Enumerators
The (slightly older) function `unifyEnumerator()` offers less options, in particular the potential separator-words must be given explicitly, only lower/upper-case may be kept flexible.
```{r unifyEnumerator1, echo=TRUE}
unifyEnumerator(c("ab-1","ab-2","c-3"))
unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
unifyEnumerator(c("ab-1","c3-2","dR3"), stringentMatch=FALSE)
```
### Adjust Decimal Prefixes And Extact Numeric+Unit Part
The function `adjustDecPrefix()` provides help extracting the numeric part of character vectors and allows adjusting to a single million-unit type.
This can be used to convert a vector of mixed prefixes like 'z','a','f','p','n','u' and 'm' (note the 'u' is used for 'micro').
```{r adjustDecPrefix1, echo=TRUE}
adjustDecPrefix(c("10.psec","2 fsec"), unit="sec")
adjustDecPrefix(c("10.psec abc","2 fsec etc"), unit="sec")
```
### Merging Multiple Named Vectors To Matrix
The function `mergeVectors()` allows merging for multiple named vectors (each element needs to be named).
Basically, all elements carrying the same name across different input-vectors will be aligned in the same column of the output (input-vectors appear as lines).
If _match()_ is not successful, the function will try _grep()_ to find matches.
Different to _merge()_ which allows merging only 2 data.frames, here multiple vectors may be merge at once.
```{r mergeVectors1, echo=TRUE}
x1 <- c(a=1, b=11, c=21)
x2 <- c(b=12, c=22, a=2)
x3 <- c(a=3, d=43)
mergeVectors(vect1=x1, vect2=x2, vect3=x3)
mergeVectors(vect1=x1, vect2=x2, vect3=x3, inclInfo=TRUE) # return list with additional info
```
Furthermore, when merging is (still) not successful, common enumerator-names will be stripped to enhance chances of better merging.
```{r mergeVectors2, echo=TRUE}
x11 <- c(Noa=1, Numberb=11, Samplec=21)
x12 <- c(Nob=12, Numberc=22, Samplea=2)
x13 <- c(Numbera=3, d=43)
mergeVectors(vect1=x11, vect2=x12, vect3=x13)
```
```{r mergeVectors3, echo=TRUE}
x4 <- 41:44 # no names - not conform for merging
mergeVectors(x1, x2, x3, x4)
```
### Match All Lines of Matrix To Reference Note
This function allows adjusting the order of lines of a matrix \code{mat} to a reference character-vector \code{ref},
even when initial direct matching of character-strings using \code{match} is not possible/successful.
In this case, various variants of using \code{grep} will be used to see if unambiguous matching is possible of characteristic parts of the text.
All columns of \code{mat} will be tested an the column giving the bes resuts will be used.
```{r matchMatrixLinesToRef1, echo=TRUE}
## Note : columns b and e allow non-ambigous match, not all elements of e are present in a
mat0 <- cbind(a=c("mvvk","axxd","bxxd","vv"),b=c("iwwy","iyyu","kvvh","gxx"), c=rep(9,4),
d=c("hgf","hgf","vxc","nvnn"), e=c("_vv_","_ww_","_xx_","_yy_"))
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[,5])
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[1:3,5], inclInfo=TRUE)
matchMatrixLinesToRef(mat0[,-2], ref=mat0[,2], inclInfo=TRUE) # needs 'reverse grep'
```
### Order Matrix According To Reference
The function `orderMatrToRef()` has the aim of facilitating brining a matrix of text/data in the order of a given reference (character vector).
This function will try all columns of the input-matrix to see which gives the best coverage/ highest number of matches to the reference.
If no hits are found, this function will try by partial matching (using _grep()_) all entries of the reference and vice-versa all entries of the matrix.
```{r orderMatrToRef1, echo=TRUE}
mat1 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], rep(1:5)), ncol=3)
orderMatrToRef(mat1, paste0(letters[c(3,4,5,3,4)],c(1,3,5,2,4)))
mat2 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3:5 )), ncol=3)
orderMatrToRef(mat2, paste0(letters[c(3,4,5,3,4)],c(1,3,5,1,4)))
mat3 <- matrix(paste0(letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3,3,5 )), ncol=3)
orderMatrToRef(mat3, paste0("__",letters[c(3,4,5,3,4)],c(1,3,5,1,3)))
```
### Value Matching With Option For Concatenated Terms
Sometimes we need to match terms in concatenated tables.
The function `concatMatch()` was designed to behave similar to _match()_ but also allowing to serach among concatenated terms and some further text-simplifications.
```{r concatMatch1, echo=TRUE}
## simple example without concatenations or text-extensions
x0 <- c("ZZ","YY","AA","BB","DD","CC","D")
tab0 <- c("AA","BB,E","CC","FF,U")
match(x0, tab0)
concatMatch(x0, tab0) # same result as match(), but with names
## now let's construct somthing similar but with concatenations and text-extensions
x1 <- c("ZZ","YY","AA","BB-2","DD","CCdef","Dxy") # modif of single ID (no concat)
tab1 <- c("AA","WW,Vde,BB-5,E","CCab","FF,Uef")
match(x1, tab1) # match finds only the 'simplest' case (ie "AA")
concatMatch(x1, tab1) # finds all hits as in example above
x2 <- c("ZZ,Z","YY,Y","AA,Z,Y","BB-2","DD","X,CCdef","Dxy") # conatenated in 'x'
tab2 <- c("AA","WW,Vde,BB-5,E","CCab,WW","FF,UU")
concatMatch(x2, tab2) # concatenation in both 'x' and 'table'
```
### Check for (Strict) Order
Thi function `checkStrictOrder()` was designed to scan each line of an (numeric) input matrix for up- down- or equal-development, ie the chang to the next value on the right.
For example when working with a matrix of with 4 columns one can look 3 times a the neighbour value following to the right (in the same line), thus the output will mention 3 events (for each line).
If _all counts_ are 'up' and 0 counts are 'down' or 'eq', the line follows a permanently increase (not necessarily linear), etc.
In some automated procedures (where the numer of columns of initial input may vary) it may be easier to test if any 0 occur.
For this reason the argument _invertCount_ was introduced, in this case a line with a '0' occurring characterizes a constant behaviour (for the respective column).
```{r checkStrictOrder1, echo=TRUE}
set.seed(2005); mat1 <- rbind(matrix(round(runif(40),1),nc=4), rep(1,4))
head(mat1)
checkStrictOrder(mat1); mat1[which(checkStrictOrder(mat1)[,2]==0),]
```
A slightly more general way of testing can be done using `checkGrpOrder()`. Here, simlpy a logical value will produced for each line of input indicating if there is constant behaviour.
When the argument _revRank=TRUE_ (default) constant up- or constant down-characteristics will be tested
```{r checkGrpOrder1, echo=TRUE}
head(mat1)
checkGrpOrder(mat1)
checkGrpOrder(mat1, revRank=FALSE) # only constant 'up' tested
```
## Working With Regressions {#WorkingWithRegressions}
### Best Starting Point For Linear Regressions (Start of linearity)
In many types of measurments the very low level measures are delicate.
Especially when the readout starts with a baseline signal before increasing amounts of the analyte start producing a linear relationship.
In such cases some of the very lowest levels of the analyte are masked by the (random) baseline signal.
The function `linModelSelect()` presented here allows omitting some of the lowest analyte measures to focus on the linear part of the dose-response relationship.
```{r linModelSelect1, echo=TRUE}
li1 <- rep(c(4,3,3:6), each=3) + round(runif(18)/5,2)
names(li1) <- paste0(rep(letters[1:5], each=3), rep(1:3,6))
li2 <- rep(c(6,3:7), each=3) + round(runif(18)/5, 2)
dat2 <- rbind(P1=li1, P2=li2)
exp2 <- rep(c(11:16), each=3)
exp4 <- rep(c(3,10,30,100,300,1000), each=3)
## Check & plot for linear model
linModelSelect("P1", dat2, expect=exp2)
linModelSelect("P2", dat2, expect=exp2)
```
This function was designed for use with rather small data-sets with no (or very few) measures of base-line.
When larger panels of data ara available, it may be better to first define a confidence interval for the base-line measurement
and then only to consider points outside this confidence interval for regressing dose-response relationships
(see also [Detection limit](https://en.wikipedia.org/wiki/Detection_limit)).
### High Throughput Testing For Linear Regressions
Once we have run multiple linear regressions on differt parts of the data we might wat to compare them in a single plot.
Below, we construct 10 series of data that get modeled the same way, ideally one would obtain a slope close to 1.0.
We still allow omitting some starting points, if the resulting model would fit better.
```{r plotLinModelCoef1, echo=TRUE}
set.seed(2020)
x1 <- matrix(rep(c(2,2:5),each=20) + runif(100) +rep(c(0,0.5,2:3,5),20),
byrow=FALSE, ncol=10, dimnames=list(LETTERS[1:10],NULL))
## just the 1st regression :
summary(lm(b~a, data=data.frame(b=x1[,1], a=rep(1:5,each=2))))
## all regressions
x1.lmSum <- t(sapply(lapply(rownames(x1), linModelSelect, dat=x1,
expect=rep(1:5,each=2), silent=TRUE, plotGraph=FALSE),
function(x) c(x$coef[2,c(4,1)], startFr=x$startLev)))
x1.lmSum <- cbind(x1.lmSum, medQuantity=apply(x1,1,median))
x1.lmSum[,1] <- log10(x1.lmSum[,1])
head(x1.lmSum)
```
Now we can try to plot :
```{r plotLinModelCoef2, echo=TRUE}
wrGraphOK <- requireNamespace("wrGraph", quietly=TRUE) # check if package is available
if(wrGraphOK) wrGraph::plotW2Leg(x1.lmSum, useCol=c("Pr(>|t|)","Estimate","medQuantity","startFr"),
legendloc="topleft", txtLegend="start at")
```
## Combinatorics Issues {#CombinatoricsIssues}
### All Pairwise Ratios
`ratioAllComb()` calculates all possible pairwise ratios between all individual calues of x and y.
```{r ratioAllComb0, echo=TRUE}
set.seed(2014); ra1 <- c(rnorm(9,2,1), runif(8,1,2))
```
Let's assume there are 2 parts of 'x' for which we would like to know the representative ratio :
The ratio of medians does not well reflect the typical ratio (if each element has the same chance to be picked).
```{r ratioAllComb1, echo=TRUE}
median(ra1[1:9]) / median(ra1[10:17])
```
Instead, we'll build all possible ratios and summarize then.
```{r ratioAllComb2, echo=TRUE}
summary( ratioAllComb(ra1[1:9], ra1[10:17]))
boxplot(list(norm=ra1[1:9], unif=ra1[10:17], rat=ratioAllComb(ra1[1:9],ra1[10:17])))
```
### Count Frequency Of Terms Combined From Different Drawings (combineAsN)
The main idea of this function is to count frequency of terms when combining different drawings.
Suppose, you are asking students for their prefered hobbies.
Now, you want to know how many terms will occur in common in groups of 3 students.
In the example below, simple letters are shown instead of names of hobbies ...
In the simplest way of using `combineAsN()` does something similar to _table_ :
Here we're looking at the full combinatorics of making groups of _nCombin_ students and let's count the frequency of terms found 3 times identical, 2 times or only once (ie not cited by the others).
In case multiple groups of _nCombin_ students can be formed, the average of the counts, standard error of the mean (sem), 95% confidence interval (CI) and sd aregiven to resume the results.
```{r combineAsN1, echo=TRUE}
tm1 <- list(a1=LETTERS[1:7], a2=LETTERS[3:9], a3=LETTERS[6:10], a4=LETTERS[8:12])
combineAsN(tm1, nCombin=3, lev=gl(1,4))[,1,]
```
One may imagine that different locations/coties/countries will give different results.
Thus, we'll declare the different origins/location using the _lev_ argument.
Now, this function focusses (by default) on combinations of students from _nCombin_ different origins/location and
counts how many hobbies were mentioned as all different ('sing', ie number of hobbies only one student mentioned),
single repeat ('doub') or three times repeated ('trip'), plus minumum twice or 'any' (ie number of hobies citied no matter how many repeats).
The output is an array, the 3rd dimension contains the counts, fllowed by sem, CI and sd.
```{r combineAsN2, echo=TRUE}
## different levels/groups in list-elements
tm4 <- list(a1=LETTERS[1:15], a2=LETTERS[3:16], a3=LETTERS[6:17], a4=LETTERS[8:19],
b1=LETTERS[5:19], b2=LETTERS[7:20], b3=LETTERS[11:24], b4=LETTERS[13:25], c1=LETTERS[17:26],
d1=LETTERS[4:12], d2=LETTERS[5:11], d3=LETTERS[6:12], e1=LETTERS[7:10])
te4 <- combineAsN(tm4, nCombin=4, lev=substr(names(tm4),1,1))
str(te4)
te4[,,1] # the counts part only
```
## Import/Export
### Batch-Reading Of Csv Files
Some software do produce a series of csv files, where a large experiment/data-set get recorded as multiple files.
The function `readCsvBatch()` was designed for reading multiple csv files of exactly the same layout and to join their content.
As output a list with the content of each file can be produced (one matrix per file), or the data may be fused into an array, as shown below.
```{r readCsvBatch, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("pl01_1.csv","pl01_2.csv","pl02_1.csv","pl02_2.csv")
datAll <- readCsvBatch(fiNa, path1, silent=TRUE)
str(datAll)
```
When setting the first argument _fileNames_ to _NULL_, you can read all files of a given path.
```{r readCsvBatch2, echo=TRUE}
## batch reading of all csv files in specified path :
datAll2 <- readCsvBatch(fileNames=NULL, path=path1, silent=TRUE)
str(datAll2)
```
### Batch-Reading Of Tabulated Files
The function `readTabulatedBatch()` allows fast batch reading of tabulated files.
All files specified (or all files from a given directory) will be read into separate data.frames of a list.
Default options are US-style comma, automatic testing for head in case the package _data.table_ is available (otheriwse : no header).
Furthermore it is possible to design a given (numeric) column and directly filter for all lines passing a given threshold, allowing to get smaller objects.
```{r readTabulatedBatch1, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("a1.txt","a2.txt")
allTxt <- readTabulatedBatch(fiNa, path1)
str(allTxt)
```
### Reading Incomplete Tables
Sometimes were may get confronted with data which look like 'incomplete' tables.
In such cases some rows do not contain as many elements/columns as other columns.
Files with this type of data pose a problem for `read.table()` (from the _utils_ package).
In some cases using the argument _fill=TRUE_ may allow to overcome this problem.
The function _readVarColumns()_ (from this package) was designed to provide better help in such cases.
Basically each line is read and parsed separately, the user should check/decide on the separator to be used.
The example below lists people's names in different locations, some locations have more persons ...
Sometimes exporting such data will generate shorter lines in locations with fewer elements (here 'London') and no additional separators will get added (to mark all empty fields) towards the end.
The function `readVarColumns()` (from this package) provides help to read such data, if the content (and separators) of the last columns are missing.
```{r readVarColumns, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- "Names1.tsv"
datAll <- readVarColumns(fiName=file.path(path1,fiNa), sep="\t")
str(datAll)
```
### Converting Url For Reading Tabulated Data From GitHub
[GitHub](https://github.com/) allows sharing code and (to a lower degree) data.
In order to properly read tabulated (txt, tsv or csv) data directly from a given url, the user should switch to the 'Raw' view.
The function `gitDataUrl()` allows to conventiently switch any url (on git) to the format from 'Raw view', suitable for directly reading the data using _read.delim()_ , _read.table()_ or _read.csv()_ etc ...).
```{r readGit1, echo=TRUE}
## An example url with tabulated data :
url1 <- "https://github.com/bigbio/proteomics-metadata-standard/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv"
gitDataUrl(url1)
```
The example below shows how this is used in the function _readSampleMetaData()_ in [wrProteo](https://CRAN.R-project.org/package=wrProteo).
```{r readGit2, echo=TRUE}
dataPxd <- try(read.delim(gitDataUrl(url1), sep='\t', header=TRUE))
str(dataPxd)
```
## Normalization {#Normalization}
The main reason of normalization is to remove variability in the data which is not directly linked to the (original/biological) concept of a given experiment.
High throughput data from real world measurements may easily contain various deformations due to technical reasons, eg slight temperature variations, electromagnetic interference, instability of reagents etc.
In particular, transferring constant amounts of liquids/reagents in highly repeated steps over large experiments is often also very challenging, small variations of the amounts of liquid (or similar) are typically addressed by normalization. However, applying aggressive normalization to the data also brings considerable risk of starting to loose some of the effects one intended to study.
At some point it may rather be better to eliminate a few samples or branches of an experiment to avoid too invasive intervention. This shows that quality control can be tightly linked to decisions about data-normalization.
In conclusion, normalization may be far more challenging than simply running some algorithms..
In general, the use has to assume/define some hypothesis to justify intervention.
Sometimes specific elements of an experiment are known to be not affected and can therefore be used to normalize the rest.
Eg, if you observe growth of trees in a forest, big blocks of rock on the floor are assumed no to change their location.
So one could use them as alignment-marks to superpose pictures taken at slightly different positions.
The hypothesis of no global changes is very common : During the course of many biological experiments (eg change of nutrient) one
assumes that only a small portion of the elements measured (eg the abundance of all different gene-products) do change,
since many processes of a living cell like growth, replication and interaction with neighbour-cells are assumed not to be affected.
So, if one assumes that there are no global changes one normalizes the input-data in a way that the average or median across each experiment will give the same value.
In analogy, if one takes photographs on a partially cloudy day, most cameras will adjust light settings (sun r clouds) so that global luminosity stays the same.
However, if too many of the measured elements are affected, this normalization approach will lead to (additional) loss of information.
It is _essential_ to understand the type of deformation(s) data may suffer from in order to choose the appropriate approacges for normalization.
Of course, graphical representations ([PCA](https://en.wikipedia.org/wiki/Principal_component_analysis), [MA-plots](https://en.wikipedia.org/wiki/MA_plot), etc) are extremely important to identifying abnormalities and potential problems.
The package [wrGraph](https://CRAN.R-project.org/package=wrGraph) offers also complementary options useful in the context of normalization.
Again, graphical representation(s) of the data help to visualize how different normalization procedures affect outcomes.
Before jumping into normalization it may be quite useful to _filter_ the data first.
The overall idea is, that most high-throughput experiments do produce some non-meaningful data (artefacts) and it may be wise to remove such 'bad' data
first, as they may effect normalization (in particular _extreme values_).
A special case of problematic data concerns _NA_-values.
### Filter Lines Of Matrix To Reduce Content Of NAs
Frequent _NA_-values may represent another potential issue. With NA-values there is no general optimal advice.
To get started, you should try to investigate how and why NA-values occurred to check if there is a special 'meaning' to them.
For example, on some measurement systems values below detection limit may be simply reported as NAs.
If the lines of your data represent different features quantified (eg proteins), than lines with mostly NA-values represent features
that may not be well exploited anyway. Therefore many times one tries to filter away lines of 'bad' data.
Of course, if there is a column (sample) with an extremely high content of NAs, one should also investigate what might be particular
with this column (sample), to see if one might be better of to eliminate the entire column.
Please note, that imputing _NA_-values represents another option instead of filtering and removing, multiple other packages address this in detail, too.
All decisions of which approach to use should be data-driven.
#### Filter For Each Group Of Columns For Sufficient Data As Non-NA
filter for each group of columns for sufficient data as non-NA
The function `presenceGrpFilt()` allows to
```{r presenceGrpFilt1, echo=TRUE}
dat1 <- matrix(1:56,ncol=7)
dat1[c(2,3,4,5,6,10,12,18,19,20,22,23,26,27,28,30,31,34,38,39,50,54)] <- NA
grp1 <- gl(3,3)[-(3:4)]
dat1
## now let's filter
presenceGrpFilt(dat1, gr=grp1, presThr=0.75) # stringent
presenceGrpFilt(dat1, gr=grp1, presThr=0.25) # less stringent
```
#### Filter As Separate Pairwise Groups Of Samples
If you want to use your data in a pair-wise view (like running t-tests on each line) the function `presenceFilt()`
allows to eliminate lines containing too many _NA_-values for each pair-wise combination of the groups/levles.
```{r presenceFilt, echo=TRUE}
presenceFilt(dat1, gr=grp1, maxGr=1, ratM=0.1)
presenceFilt(dat1, gr=grp1, maxGr=2, rat=0.5)
```
#### Cleaning Replicates
This procedures aims to remove (by setting to as _NA_) the most extreme of noisy replicates.
Thus, it is assumed that all columns of the input matrix (or data.frame) are replicates of the other columns.
The _nOutl_ most distant points are identified and will be set to _NA_.
```{r cleanReplicates, echo=TRUE}
(mat3 <- matrix(c(19,20,30,40, 18,19,28,39, 16,14,35,41, 17,20,30,40), ncol=4))
cleanReplicates(mat3, nOutl=1)
cleanReplicates(mat3, nOutl=3)
```
### The Function normalizeThis()
In biological high-throughput data columns typically represent different samples, which may be organized as replicates.
During high-throughput experiments thousands of (independent) elements are measured (eg abundance of gene-products), they are represented by rows.
As real-world experiments are not always as perfect as we may think, small changes in the signal measured may easily happen.
Thus, the aim of normalizing is to remove or reduce any trace/variability in the data not related to the original experiement but due to imperfections during detection.
Note, that some experiments may produce a considerable amount of missing data (NAs) which require special attention (dedicated developments exist in other R-packages eg in [wrProteo](https://CRAN.R-project.org/package=wrProteo)).
My general advice is to first carefully look where such missing data is observed and to pay attention to replicate measurements
where a given element once was measured with a real numeric value and once as missing information (NA).
```{r normalizeThis0, echo=TRUE}
set.seed(2015); rand1 <- round(runif(300) +rnorm(300,0,2),3)
dat1 <- cbind(ser1=round(100:1 +rand1[1:100]), ser2=round(1.2*(100:1 +rand1[101:200]) -2),
ser3=round((100:1 +rand1[201:300])^1.2-3))
dat1 <- cbind(dat1, ser4=round(dat1[,1]^seq(2,5,length.out=100) +rand1[11:110],1))
## Let's introduce some NAs
dat1[dat1 <1] <- NA
## Let's get a quick overview of the data
summary(dat1)
## some selected lines (indeed, the 4th column appears always much higher)
dat1[c(1:5,50:54,95:100),]
```
Our toy data may be normalized by a number of different criteria.
In real applications the nature of the data and the type of deformation detected/expected will largely help
deciding which normalization might be the 'best' choice. Here we'll try first normalizing by the mean,
ie all columns will be forced to end up with the same column-mean.
The trimmed mean does not consider values at extremes (as outliers are frequently artefacts and display extreme values).
When restricting even stronger which values to consider one will eventually end up with the median (3rd method used below).
```{r normalizeThis1, echo=TRUE}
no1 <- normalizeThis(dat1, refGrp=1:3, meth="mean")
no2 <- normalizeThis(dat1, refGrp=1:3, meth="trimMean", trim=0.4)
no3 <- normalizeThis(dat1, refGrp=1:3, meth="median")
no4 <- normalizeThis(dat1, refGrp=1:3, meth="slope", quantFa=c(0.2,0.8))
```
It is suggested to verify normalization results by plots.
Note, that [Box plots](https://en.wikipedia.org/wiki/Box_plot) may not be appropriate in some cases (eg multimodal distributions),
for displaying more details you may consider using [Violin-Plots](https://en.wikipedia.org/wiki/Violin_plot) from packages [vioplot](https://CRAN.R-project.org/package=vioplot) or [wrGraph](https://CRAN.R-project.org/package=wrGraph), another option might be a (cumulated) frequency plot (eg in package [wrGraph](https://CRAN.R-project.org/package=wrGraph)).
```{r normalizeThis_plot1, echo=FALSE,eval=TRUE}
boxplot(dat1, main="raw data", las=1)
```
You can see clearly, that the 4th data-set has a problem of range. So we'll see if some proportional normalization
may help to make it more comparable to the other ones.
```{r normalizeThis_plot2, echo=FALSE,eval=TRUE}
layout(matrix(1:4, ncol=2))
boxplot(no1, main="mean normalization", las=1)
boxplot(no2, main="trimMean normalization", las=1)
boxplot(no3, main="median normalization", las=1)
boxplot(no4, main="slope normalization", las=1)
```
### Normalize By Rows
The standard approach for normalizing relies on consisting all columns as collections of data who's distribution is not supposed to change.
In some cases/projects we may want to formulate a much more 'aggressive' hypothesis : We consider the content of all columns strictly as the same.
For example this may be the case when comparing with technical replicates only.
In such cases one may use the function `rowNormalize()` which tries to find the average or mean optimal within-line normalization factor.
Besides, an additional mode of operation for _sparse data_ has been added :
Basically, once a row contains just one NA, this row can't be used any more to derive a normalization factor for all rows.
Thus, with many NA-values the number of 'complete' rows will be low or even 0 redering this approach inefficient or impossible.
Once the content of NA-values is above a customizable threshold, the data will be broken in smaller subsets with fewer groups of fewer columns,
thus increasing the chances of finding 'complete' subsets of data which will be normalized first and added to other subsets in later steps.
This approach relies on the **hypothesis** that *all data in a given line should be (aproximately) the same value* !
Thus, this procedure is particularly well adopted to the case when _all_ samples are multiple replicate measurements of the _same_ sample.
```{r rowNormalize1, echo=TRUE}
set.seed(2); AA <- matrix(rbinom(110, 10, 0.05), nrow=10)
AA[,4:5] <- AA[,4:5] *rep(4:3, each=nrow(AA))
AA1 <- rowNormalize(AA)
round(AA1, 2)
```
Now, let's make this sparse and try normalizing:
```{r rowNormalize2, echo=TRUE}
AC <- AA
AC[which(AC <1)] <- NA
(AC1 <- rowNormalize(AC))
```
Like with _normalizeThis()_ we can define some reference-lines (only these lines will be considered to determine normalization-factors)
```{r rowNormalize3, echo=TRUE}
(AC3 <- rowNormalize(AC, refLines=1:5, omitNonAlignable=TRUE))
```
Please note, that the iterative procedure for _sparse data_ may consume large amounts of computational resources, in particular when
a small number of subgroups has been selected.
### Matrix Coordinates Of Values/Points According To Filtering
Sometimes one needs to obtain the coordinates of values/points of a matrix according to a given filtering condition.
The standard approach using _which()_ gives only a _linearized_ index but not row/column, which is sufficient for replacing indexed values.
If you need to know the true row/column indexes, you may use `coordOfFilt()`.
```{r coordOfFilt1, echo=TRUE}
set.seed(2021); ma1 <- matrix(sample.int(n=40, size=27, replace=TRUE), ncol=9)
## let's test which values are >37
which(ma1 >37) # doesn't tell which row & col
coordOfFilt(ma1, ma1 >37)
```
## Statistical Testing {#StatisticalTesting}
### Normal Random Number Generation with Close Fit to Expected mean and sd
When creating random values to an expected _mean_ and _sd_, the results ontained using the standard function `rnorm()`
may deviate somehow from the expected mean and sd, in particular with low _n_.
To still produce random values fitting closely to the expected _mean_ and _sd_ you may use the function `rnormW()`.
The case of _n=2_ is quite simple with one possible results.
In other cases (_n>2_), there will be a random initiation which can be fixed using the argument _seed_.
```{r rnormW1, echo=TRUE}
## some sample data :
x1 <- (11:16)[-5]
mean(x1); sd(x1)
```
```{r rnormW2, echo=TRUE}
## the standard way for gerenating normal random values
ra1 <- rnorm(n=length(x1), mean=mean(x1), sd=sd(x1))
## In particular with low n, the random values deviate somehow from expected mean and sd :
mean(ra1) -mean(x1)
sd(ra1) -sd(x1)
```
```{r rnormW3, echo=TRUE}
## random numbers with close fit to expected mean and sd :
ra2 <- rnormW(length(x1), mean(x1), sd(x1))
mean(ra2) -mean(x1)
sd(ra2) -sd(x1) # much closer to expected value
```
Thus, the second data-sets fits even with few _n_ very well to the global characteristics defined/expected.
### Moderated Pair-Wise t-Test from limma
If you are not familiar with the way data is handled in the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
and you would like to use some of the tools for running moderated t-tests therein, this will provide easy access using `moderTest2grp()` :
```{r moderTest2grp, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(1600,10,0.4),2), ncol=8,
dimnames=list(paste("l",1:200), c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2]+3 # augment lines 3:6 for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6]+3 # augment lines 5:8 for AA2&BB2 (c,d,g,h should be found)
t4 <- log2(t8[,1:4]/t8[,5:8])
fit4 <- moderTest2grp(t4, gl(2,2))
## now we'll use limma's topTable() function to look at the 'best' results
if("list" %in% mode(fit4)) { # if you have limma installed we can look further
library(limma)
topTable(fit4, coef=1,n=5) # effect for 3,4,7,8
fit4in <- moderTest2grp(t4, gl(2,2), testO="<")
if("list" %in% mode(fit4in)) topTable(fit4in, coef=1,n=5) }
```
### Multiple Moderated Pair-Wise t-Tests From limma
If you want to make multiple pair-wise comparisons using `moderTestXgrp()` :
```{r moderTestXgrp, echo=TRUE}
grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
dimnames=list(paste(letters[], rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
## expect to find C/A in c,d,g, (h)
## expect to find C/D in c,d,e,f
## expect to find A/D in f,g,(h)
test8 <- moderTestXgrp(t8, grp)
head(test8$p.value, n=8)
```
### Transform p-values To Local False Discovery Rate (lfdr)
To get an introduction into local false discovery rate estimations you may read [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209).
A convenient way to get lfdr values calculated by the package [fdrtool](https://CRAN.R-project.org/package=fdrtool) is available via the function `pVal2lfdr()`.
Note, that the toy-example used below is too small for estimating meaningful lfdr values.
For this reason the function _fdrtool()_ from package [fdrtool](https://CRAN.R-project.org/package=fdrtool) will issue warnings.
```{r pVal2lfdr, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(160,10,0.4),2), ncol=8, dimnames=list(letters[1:20],
c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f) for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6] +3 # augment lines 5:8 (e-h) for AA2&BB2 (c,d,g,h should be found)
head(pVal2lfdr(apply(t8, 1, function(x) t.test(x[1:4], x[5:8])$p.value)))
```
### Confindence Intervals (under Normal Distribution)
The [confindence interval (CI)](https://en.wikipedia.org/wiki/Confidence_interval) is a common way of describing the uncertainity of measured or estimated values.
The function `confInt()` allows calculating the confidence interval of the mean (using the functions _qt()_ and _sd()_) under
a given [significance level (alpha)](https://en.wikipedia.org/wiki/Statistical_significance).
assuming that the [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) is valid.
```{r fcCI, echo=TRUE}
set.seed(2022); ran <- rnorm(50)
confInt(ran, alpha=0.05)
## plot points and confindence interval of mean
plot(ran, jitter(rep(1, length(ran))), ylim=c(0.95, 1.05), xlab="random variable 'ran'",main="Points and Confidence Interval of Mean (alpha=0.05)", ylab="", las=1)
points(mean(ran), 0.97, pch=3, col=4) # mean
lines(mean(ran) +c(-1, 1) *confInt(ran, 0.05), c(0.97, 0.97), lwd=4, col=4) # CI
legend("topleft","95% conficence interval of mean", text.col=4,col=4,lty=1,lwd=1,seg.len=1.2,cex=0.9,xjust=0,yjust=0.5)
```
### Extract Groups Of Replicates From Pair-Wise Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the column-names are concatenated group-names.
To get the index of which group has been used in which pair-wise set you may use the function `matchSampToPairw()`, as shown below.
```{r matchSampToPairw, echo=TRUE}
## make example if limma is not installed
if(!requireNamespace("limma", quietly=TRUE)) test8 <- list(FDR=matrix(1, nrow=2, ncol=3, dimnames=list(NULL,c("A-C","A-D","C-D"))))
matchSampToPairw(unique(grp), colnames(test8$FDR))
```
### Extract Numeric Part Of Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the results will be in adjacent columns and the group-names reflected in the column-names.
In the case measurements from multiple levels of a given variable are compared it is useful to extract the numeric part, the function `numPairDeColNames()` provides support to do so.
When extracting just the numeric part, unit names will get lost, though. Note, if units used are not constant (eg seconds and milliseconds mixed) the extracted numeric values do not reflect the real quantitative context any more.
```{r pairWiseConc1, echo=TRUE}
mat1 <- matrix(1:8, nrow=2, dimnames=list(NULL, paste0(1:4,"-",6:9)))
numPairDeColNames(mat1)
```
### Automatic Determination Of Replicate Structure Based On Meta-Data
In order to run statistical testing the user must know which sample should be considered replicate of whom.
The function `()` aims to provide help by checking all column of a matrix of meta-data with the aim of identifying the replicate-status.
To do so, all columns are examined how many groups of replicats they may design. Depending on the argumen _method_ various options for choosing automatically exist :
The default _method="combAll"_ will select the column with the median number of groups (not counting all-different or all-same columns)).
When using as _method="combAll"_ (ie combine all columns that are neither all-different nor all-same), there is risk all lines (samples) will be be considered different and no replicates remain.
To avoid this situation the argument -method_ can be set to _"combNonOrth"_.
Then, it will be checked if adding more columns will lead to complete loss of replicates, and -if so- concerned columns omitted.
```{r replicateStructure1, echo=TRUE}
## column a is all different, b is groups of 2,
## c & d are groups of 2 nut NOT 'same general' pattern as b
strX <- data.frame(a=letters[18:11], b=letters[rep(c(3:1,4), each=2)],
c=letters[rep(c(5,8:6), each=2)], d=letters[c(1:2,1:3,3:4,4)],
e=letters[rep(c(4,8,4,7),each=2)], f=rep("z",8) )
strX
replicateStructure(strX[,1:2])
replicateStructure(strX[,1:4], method="combAll")
replicateStructure(strX[,1:4], method="combAll", exclNoRepl=FALSE)
replicateStructure(strX[,1:4], method="combNonOrth", exclNoRepl=TRUE)
replicateStructure(strX, method="lowest")
```
## Working With Clustering {#WorkingWithClustering}
Multiple concepts for clustering have been deeveloped, most of them allow extracting a vector with the cluster-numbers.
Here some functions helping to work with the output of such clustering results are presented.
### Prepare Data For Clustering
The way how to prepare data for clustering may be as important as the choice of the actual clustering-algorithm ...
Many clustering algorithms are available in R (eg see also [CRAN Task View: Cluster Analysis & Finite Mixture Models](https://CRAN.R-project.org/view=Cluster)), many of them require the input data to be standardized.
The regular way of standardizing sets all elements to mean=0 and sd=1.
To do so, the function `scale()` may be used.
```{r std1, echo=TRUE}
dat <- matrix(2*round(runif(100),2), ncol=4)
mean(dat); sd(dat)
datS <- scale(dat)
apply(datS, 2, sd)
# each column was teated separately
mean(datS); sd(datS); range(datS)
# the mean is almost 0.0 and the sd almost 1.0
datB <- scale(dat, center=TRUE, scale=FALSE)
mean(datB); sd(datB); range(datB) # mean is almost 0
```
However, if you want the entire data-set and not each column sparately, you may use `standardW()`.
Thus, relative differences visible within a line will be conserved.
Furthermore, in case of 3-dim arrays, this function returns also the same dimensions as the input.
```{r std2, echo=TRUE}
datS2 <- standardW(dat)
apply(datS2, 2, sd)
summary(datS2)
mean(datS2); sd(datS2)
datS3 <- standardW(dat, byColumn=TRUE)
apply(datS3, 2, sd)
summary(datS3)
mean(datS3); sd(datS3)
```
Sometimes it is sufficient to only set the minimum and maximum to a given range.
```{r scale1, echo=TRUE}
datR2 <- apply(dat, 2, scaleXY, 1, 100)
summary(datR2); sd(datR2)
```
### Characterize Clustering Results
Here a very basic clustering example...
```{r clu01, echo=TRUE}
nGr <- 3
irKm <- stats::kmeans(iris[,1:4], nGr, nstart=nGr*4) # no need to standardize
table(irKm$cluster, iris$Species)
#wrGraph::plotPCAw(t(as.matrix(iris[,1:4])), sampleGrp=irKm,colBase=irKm$cluster,useSymb=as.numeric(as.factor(iris$Species)))
```
Using the function `reorgByCluNo()` we can now 'apply' the clustering result to the initial data to obtain other information.
```{r clu02, echo=TRUE}
## sort results by cluster number
head(reorgByCluNo(iris[,-5], irKm$cluster))
tail(reorgByCluNo(iris[,-5], irKm$cluster))
```
Let's calculate the median and sd values for each cluster:
```{r clu03, echo=TRUE}
## median an CV
ir2 <- reorgByCluNo(iris[,-5], irKm$cluster, addInfo=FALSE, retList=TRUE)
```
```{r clu04, echo=TRUE}
sapply(ir2, function(x) apply(x, 2, median))
```
```{r clu05, echo=TRUE}
sapply(ir2, colSds)
```
Besides, we have already seen the function `cutArrayInCluLike()` in section [Working with Arrays](#WorkingWithArrays) 'Working with Arrays'.
## Tree-Like Structures {#TreeLikeStructures}
### Filter Lists Of Connected Nodes, Extension Of Networks As 'Sandwich'
When interogating network-databases (like String for proteins or the coexpressionDB for gene co-expression) typically a (semi-)quantitatve
value is supplied with the connection of node 'A' to node 'B'.
In many cases, it may be useful to filter the initial query-output to retain only strong interactions.
Furthermore, it may be of interest to expand such networks by nodes allowing to (further) inter-connect initial query-nodes
(so called 'Sandwich' nodes as they are in the middle of initial nodes), for such nodes a separate (eg even more stringent) threshold can be applied.
Here let's suppose nodes have 3-digit names (ie numbers). 7 nodes of an initial query gave 1 to 7 conected nodes,
the results are presented as list of data.frames where the 1st column is the connected node and the 2nd column the quality score of the connection (edge).
Furthemore, let's assume that here lower scores are better.
```{r filterNetw0, echo=TRUE}
lst2 <- list('121'=data.frame(ID=as.character(c(141,221,228,229,449)),11:15),
'131'=data.frame(ID=as.character(c(228,331,332,333,339)),11:15),
'141'=data.frame(ID=as.character(c(121,151,229,339,441,442,449)),c(11:17)),
'151'=data.frame(ID=as.character(c(449,141,551,552)),11:14),
'161'=data.frame(ID=as.character(171),11),
'171'=data.frame(ID=as.character(161),11),
'181'=data.frame(ID=as.character(881:882),11:12) )
```
Now, we'd like to keep the core network consisting of all (dirctly) interconnected nodes with scores below 20 :
```{r filterNetw1, echo=TRUE}
(nw1 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=FALSE))
```
In the resulting output the 1st column now represents the query-nodes, the 2nd column all connected nodes based on filtering scores for edges,
and the 3rd colum the score for the edges.
Let's also remove all nodes not connected to a backbone at least 3 nodes long, ie remove orphan pairs of nodes :
```{r filterNetw2, echo=TRUE}
(nw2 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=TRUE))
```
If you want to expand this network by nodes allowing to further interconnect the nodes from above,
we can add all 'sandwich' nodes (let's use a threshold of inferior/equal to 14 which will use only the better 'sandwich'-edges) :
```{r filterNetw3, echo=TRUE}
(nw3 <- filterNetw(lst2, limInt=20, sandwLim=14, remOrphans=TRUE))
```
### Convert Collection Of Pairs Of Nodes To Propensity Matrix
Many times networks get created from pairs of nodes. One way to represent the full network is via propensisty matrixes.
Several advanced tools and packages rather accept such propensisty matrixes as input.
Here, it is assumed that each line of the input represents a separate pair of nodes connected by an edge.
```{r propMatr1, echo=TRUE}
pairs3L <- matrix(LETTERS[c(1,3,3, 2,2,1)], ncol=2) # loop of 3
(netw13pr <- pairsAsPropensMatr(pairs3L)) # as prop matr
```
### Characterize Individual Contribution Of Single Edges In Tree-Structures
```{r contribToContigPerFrag, echo=TRUE}
path1 <- matrix(c(17,19,18,17, 4,4,2,3), ncol=2,
dimnames=list(c("A/B/C/D","A/B/G/D","A/H","A/H/I"), c("sumLen","n")))
contribToContigPerFrag(path1)
```
### Count Same Start- And End- Sites Of Edges (Or Fragments)
If you have a set of fragments from a common ancestor and the fragment's start- and end-sites
are marked by index-positions (integers), you can make a simple graphical display :
```{r simpleFragFig, echo=TRUE}
frag1 <- cbind(beg=c(2,3,7,13,13,15,7,9,7, 3,3,5), end=c(6,12,8,18,20,20,19,12,12, 4,5,7))
rownames(frag1) <- letters[1:nrow(frag1)]
simpleFragFig(frag1)
```
Now we can make a matrix telling if some fragments do start or end at exactely the same position.
```{r countSameStartEnd, echo=TRUE}
countSameStartEnd(frag1)
```
## Support for Graphical Output {#SupportForGraphicalOutput}
### Convenient Paste-Collapse
The function `pasteC()` allows adding quotes and separating the last element by specific text (eg 'and').
```{r pasteC, echo=TRUE}
pasteC(1:4)
pasteC(letters[1:4],quoteC="'")
```
### Transform Numeric Values to Color-Gradient
By default most color-gradients end with a color very close to the beginning.
```{r color-gradient1, echo=TRUE}
set.seed(2015); dat1 <- round(runif(15),2)
plot(1:15, dat1, pch=16, cex=2, las=1, col=colorAccording2(dat1),
main="Color gradient according to value in y")
# Here we modify the span of the color gradient
plot(1:15, dat1, pch=16, cex=2, las=1,
col=colorAccording2(dat1, nStartO=0, nEndO=4, revCol=TRUE), main="blue to red")
# It is also possible to work with scales of transparency
plot(1:9, pch=3, las=1)
points(1:9, 1:9, col=transpGraySca(st=0, en=0.8, nSt=9,trans=0.3), cex=42, pch=16)
```
### Assign New Transparency To Given Colors
For this purpose you may use `convColorToTransp`.
```{r convColorToTransp, fig.height=6, fig.width=3, echo=TRUE}
col0 <- c("#998FCC","#5AC3BA","#CBD34E","#FF7D73")
col1 <- convColorToTransp(col0,alph=0.7)
layout(1:2)
pie(rep(1,length(col0)), col=col0, main="no transparency")
pie(rep(1,length(col1)), col=col1, main="new transparency")
```
### Print Matrix-Content As Plot
There are many ways of creating reports. If you want simply to combine a few plots into a pdf, the function `tableToPlot()`
may be helpful to add a small table (eg overview of points/samples/files used in other plots of the same pdf).
This function prints tables in the current graphical output/window (which may by a pdf-device).
## Other Convenience Functions {#OtherConvenienceFunctions}
### Writing Compact Dates (more options ...)
Many times it may be useful to add the date to filenames when saving data or plots as files.
The built-in functions _date()_, _Sys.Date()_ and _Sys.Time()_ are a good way to start.
Generally I like to use abbreviated month-names since the order of writing the month is different in Europe compared to the USA,
so this may help avoiding mis-interpreting dates insetad of writing the number of the Month.
For example, 2021-03-05 means in Europe March 5th while in other places it means May 3rd.
The R-functions mentioned above use local language settings, so I wrote the function `sysDate` to
produce compact versions of current the date, **independent to local language settings** (or not -if you prefer), ie locale-specific,
(yes, in some languages - like French - the first 3 letters of the month may give ambiguous results !)
and to avoid white space ' ' (which I prefer to avoid in file-names).
Please look at the function's help-page for all available options.
```{r sysDate1, echo=TRUE}
## To get started
Sys.Date()
## Compact English names (in European order), no matter what your local settings are :
sysDate()
```
The table below shows a number of options to write the date in English or using local month-names :
```{r DateTab, echo=TRUE}
tabD <- cbind(paste0("univ",1:6), c(sysDate(style="univ1"), sysDate(style="univ2"),
sysDate(style="univ3"), sysDate(style="univ4"), as.character(sysDate(style="univ5")),
sysDate(style="univ6")), paste0(" local",1:6),
c(sysDate(style="local1"), sysDate(style="local2"), sysDate(style="local3"),
sysDate(style="local4"), sysDate(style="local5"), sysDate(style="local6")))
knitr::kable(tabD, caption="Various ways of writing current date")
```
## Session-Info
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/inst/doc/doc/wrMiscVignette1.Rmd
|
## ----setup0, include=FALSE, echo=FALSE, messages=FALSE, warnings=FALSE--------
suppressPackageStartupMessages({
library(wrMisc)
})
## ----install, echo=TRUE, eval=FALSE-------------------------------------------
# ## If not already installed, you'll have to install the package first.
# ## This is the basic installation commande in R
# install.packages("wrMisc")
## ----install2, echo=TRUE, eval=FALSE------------------------------------------
# packages <- c("knitr", "rmarkdown", "BiocManager", "kableExtra", "boot", "data.tree", "data.table",
# "fdrtool", "RColorBrewer", "Rcpp", "wrMisc", "wrGraph", "wrProteo")
# checkInstallPkg <- function(pkg) { # install function
# if(!requireNamespace(pkg, quietly=TRUE)) install.packages(pkg) }
#
# ## install if not yet present
# sapply(packages, checkInstallPkg)
## ----install3, echo=TRUE, eval=FALSE------------------------------------------
# ## Installation of limma
# BiocManager::install("limma")
## ----install4, echo=TRUE, eval=FALSE------------------------------------------
# ## Now you can open this vignette out of R:
# vignette("wrMiscVignette1", package="wrMisc")
## ----setup1-------------------------------------------------------------------
library("wrMisc")
library("knitr")
## This is 'wrMisc' version number :
packageVersion("wrMisc")
## ----basicVariability, echo=TRUE----------------------------------------------
grp1 <- rep(LETTERS[1:3], c(3,4,3))
sampNa1 <- paste0(grp1, c(1:3,1:4,1:3))
set.seed(2016); dat1 <- matrix(round(c(runif(50000) +rep(1:1000,50)),3),
ncol=10, dimnames=list(NULL,sampNa1))
dim(dat1)
head(dat1)
## ----sdForEachRow, echo=TRUE--------------------------------------------------
head(rowSds(dat1))
system.time(sd1 <- rowSds(dat1))
system.time(sd2 <- apply(dat1, 1, sd))
## ----usingApply, echo=TRUE----------------------------------------------------
table(round(sd1, 13)==round(sd2, 13))
## ----calculateRowCV, echo=TRUE------------------------------------------------
system.time(cv1 <- rowCVs(dat1))
system.time(cv2 <- apply(dat1, 1, sd) / rowMeans(dat1))
# typically the calculation using rowCVs is much faster
head(cv1)
# results from the 'conventional' way
head(cv2)
## ----rowGrpMeans1, echo=TRUE--------------------------------------------------
# we already defined the grouping :
grp1
## the mean for each group and row
system.time(mean1Gr <- rowGrpMeans(dat1, grp1))
## ----sdOrCVbyGrp, echo=TRUE---------------------------------------------------
## Now the sd for each row and group
system.time(sd1Gr <- rowGrpSds(dat1, grp1))
# will give us a matrix with the sd for each group & line
head(sd1Gr)
# Let's check the results of the first line :
sd1Gr[1,] == c(sd(dat1[1,1:3]), sd(dat1[1,4:7]), sd(dat1[1,8:10]))
# The CV :
system.time(cv1Gr <- rowGrpCV(dat1, grp1))
head(cv1Gr)
## ----rowGrpNA1, echo=TRUE-----------------------------------------------------
mat2 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
mat2 <- matrix(mat2, ncol=12, byrow=TRUE)
## The definition of the groups (ie replicates)
gr4 <- gl(3, 4, labels=LETTERS[1:3])
## ----rowGrpNA2, echo=TRUE-----------------------------------------------------
rowGrpNA(mat2,gr4)
## ----naOmit, echo=TRUE--------------------------------------------------------
aA <- c(11:13,NA,10,NA)
str(naOmit(aA))
# the 'classical' na.omit also stores which elements were NA
str(na.omit(aA))
## ----minDiff, echo=TRUE-------------------------------------------------------
set.seed(2017); aa <- 10 *c(0.1 +round(runif(20),2), 0.53, 0.53)
head(aa)
minDiff(aa,ppm=FALSE)
## ----partUnlist_1, echo=TRUE--------------------------------------------------
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
partUnlist(bb)
partUnlist(lapply(bb,.asDF2))
## ----unlist_1, echo=TRUE------------------------------------------------------
head(unlist(bb, recursive=FALSE))
## ----asSepList, echo=TRUE-----------------------------------------------------
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
asSepList(bb)
## ----lappend1, echo=TRUE------------------------------------------------------
li1 <- list(a=1, b=2, c=3)
li2 <- list(A=11, b=2, C=13)
append(li1, li2)
## ----lappend2, echo=TRUE------------------------------------------------------
appendNR(li1, li2)
## ----lrbind, echo=TRUE--------------------------------------------------------
dat2 <- matrix(11:34, ncol=3, dimnames=list(letters[1:8], colnames=LETTERS[1:3]))
lst2 <- by(dat2, rep(1:3,c(3,2,3)), as.matrix)
lst2
# join list-elements (back) into single matrix
lrbind(lst2)
## ----mergeMatrixList, echo=TRUE-----------------------------------------------
mat1 <- matrix(11:18, ncol=2, dimnames=list(letters[3:6],LETTERS[1:2]))
mat2 <- matrix(21:28, ncol=2, dimnames=list(letters[2:5],LETTERS[3:4]))
mat3 <- matrix(31:38, ncol=2, dimnames=list(letters[c(1,3:4,3)],LETTERS[4:5]))
#
mergeMatrixList(list(mat1, mat2), useColumn="all")
# with custom names for the individual matrices
mergeMatrixList(list(m1=mat1, m2=mat2, mat3), mode="union", useColumn=2)
## ----mergeMatrices, echo=TRUE-------------------------------------------------
mergeMatrices(mat1, mat2)
mergeMatrices(mat1, mat2, mat3, mode="union", useColumn=2)
## custom names for matrix-origin
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=2)
## flexible/custom selection of columns
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=list(1,1:2,2))
## ----fuseCommonListElem, echo=TRUE--------------------------------------------
val1 <- 10 +1:26
names(val1) <- letters
(lst1 <- list(c=val1[3:6], a=val1[1:3], b=val1[2:3] ,a=val1[12], c=val1[13]))
## here the names 'a' and 'c' appear twice :
names(lst1)
## now, let's fuse all 'a' and 'c'
fuseCommonListElem(lst1)
## ----listBatchReplace1, echo=TRUE---------------------------------------------
lst1 <- list(m1=matrix(11:18, ncol=2), m2=matrix(21:30, ncol=2), indR=31:34,
m3=matrix(c(21:23,NA,25:27,NA), ncol=2))
filterLiColDeList(lst1, useLines=2:3)
filterLiColDeList(lst1, useLines="allNA", ref=3)
## ----replInList1, echo=TRUE---------------------------------------------------
(lst1 <- list(aa=1:4, bb=c("abc","efg","abhh","effge"), cc=c("abdc","efg","efgh")))
listBatchReplace(lst1, search="efg", repl="EFG", silent=FALSE)
## ----listGroupsByNames, echo=TRUE---------------------------------------------
ser1 <- 1:7; names(ser1) <- c("AA","BB","AA.1","CC","AA.b","BB.e","A")
listGroupsByNames(ser1)
## ----listGroupsByNames2, echo=TRUE--------------------------------------------
listGroupsByNames((1:10)/5)
## ----filterList, echo=TRUE----------------------------------------------------
set.seed(2020); dat1 <- round(runif(80),2)
list1 <- list(m1=matrix(dat1[1:40], ncol=8), m2=matrix(dat1[41:80], ncol=8), other=letters[1:8])
rownames(list1$m1) <- rownames(list1$m2) <- paste0("line",1:5)
# Note: the list-element list1$other has a length different to that of filt. Thus, it won't get filtered.
filterList(list1, list1$m1[,1] >0.4) # filter according to 1st column of $m1 ...
filterList(list1, list1$m1 >0.4)
## ----matr2list, echo=TRUE-----------------------------------------------------
(mat1 <- matrix(1:12, ncol=3, dimnames=list(letters[1:4],LETTERS[1:3])))
str(matr2list(mat1))
## ----array0, echo=TRUE--------------------------------------------------------
(arr1 <- array(c(6:4,4:24), dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""),c("ch1","ch2"))))
## ----arrayCV1, echo=TRUE------------------------------------------------------
arrayCV(arr1)
# this is equivalent to
cbind(rowCVs(arr1[,,1]), rowCVs(arr1[,,2]))
## ----arrayCV2, echo=TRUE------------------------------------------------------
arrayCV(arr1, byDim=2)
## ----cutArrayInCluLike, echo=TRUE---------------------------------------------
cutArrayInCluLike(arr1, cluOrg=c(2,1,2,1))
## ----filt3dimArr, echo=TRUE---------------------------------------------------
filt3dimArr(arr1, displCrit=c("col1","col2"), filtCrit="col2", filtVal=7, filtTy=">")
## ----repeated1, echo=TRUE-----------------------------------------------------
## some text toy data
tr <- c("li0","n",NA,NA, rep(c("li2","li3"),2), rep("n",4))
## ----repeated2, echo=TRUE-----------------------------------------------------
table(tr)
unique(tr)
duplicated(tr, fromLast=FALSE)
## ----repeated3, echo=TRUE-----------------------------------------------------
aa <- c(11:16,NA,14:12,NA,14)
names(aa) <- letters[1:length(aa)]
aa
## ----findRepeated, echo=TRUE--------------------------------------------------
findRepeated(aa)
## ----firstOfRepeated, echo=TRUE-----------------------------------------------
firstOfRepeated(aa)
aa[firstOfRepeated(aa)$indUniq] # only unique with their names
unique(aa) # unique() does not return any names !
## ----correctToUnique1, echo=TRUE----------------------------------------------
correctToUnique(aa)
correctToUnique(aa, sep=".", NAenum=FALSE) # keep NAs (ie without transforming to character)
## ----nonAmbiguousNum, echo=TRUE-----------------------------------------------
unique(aa) # names are lost
nonAmbiguousNum(aa)
nonAmbiguousNum(aa, uniq=FALSE, asLi=TRUE) # separate in list unique and repeated
## ----sortByNRepeated, echo=TRUE-----------------------------------------------
cities <- c("Bangkok","London","Paris", "Singapore","New York City", "Istambul","Delhi","Rome","Dubai")
sortByNRepeated(x=cities[c(1:4)], y=cities[c(2:3,5:8)])
## or (unlimited) multiple inputs via list
choices1 <- list(Mary=cities[c(1:4)], Olivia=cities[c(2:3,5:8)], Paul=cities[c(5:3,9,5)]) # Note : Paul cited NYC twice !
table(unlist(choices1))
sortByNRepeated(choices1)
sortByNRepeated(choices1, filterIntraRep=FALSE) # without correcting multiple citation of NYC by Paul
## ----cbindNR, echo=TRUE-------------------------------------------------------
## First we'll make soe toy data :
(ma1 <- matrix(1:6, ncol=3, dimnames=list(1:2,LETTERS[3:1])))
(ma2 <- matrix(11:16, ncol=3, dimnames=list(1:2,LETTERS[3:5])))
## now we can join 2 or more matrixes
cbindNR(ma1, ma2, summarizeAs="mean") # average of both columns 'C'
## ----firstLineOfDat, echo=TRUE------------------------------------------------
(mat1 <- matrix(c(1:6, rep(1:3,1:3)), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2])))
## ----firstLineOfDat2, echo=TRUE-----------------------------------------------
firstLineOfDat(mat1, refCol=2)
## ----firstOfRepLines, echo=TRUE-----------------------------------------------
mat2 <- matrix(c("e","n","a","n","z","z","n","z","z","b",
"","n","c","n","","","n","","","z"), ncol=2)
firstOfRepLines(mat2, out="conc")
# or as index :
firstOfRepLines(mat2)
## ----nonredDataFrame, echo=TRUE-----------------------------------------------
(df1 <- data.frame(cbind(xA=letters[1:5], xB=c("h","h","f","e","f"), xC=LETTERS[1:5])))
## ----nonredDataFrame2, echo=TRUE----------------------------------------------
nonredDataFrame(df1, useCol=c("xB","xC"))
# without counter or concatenating
df1[which(!duplicated(df1[,2])),]
# or
df1[firstOfRepLines(df1,useCol=2),]
## ----get1stOfRepeatedByCol, echo=TRUE-----------------------------------------
mat2 <- cbind(no=as.character(1:20), seq=sample(LETTERS[1:15], 20, repl=TRUE),
ty=sample(c("full","Nter","inter"),20,repl=TRUE), ambig=rep(NA,20), seqNa=1:20)
(mat2uniq <- get1stOfRepeatedByCol(mat2, sortBy="seq", sortSupl="ty"))
# the values from column 'seq' are indeed unique
table(mat2uniq[,"seq"])
# This will return all first repeated (may be >1) but without furter sorting
# along column 'ty' neither marking in comumn 'ambig').
mat2[which(duplicated(mat2[,2],fromLast=FALSE)),]
## ----nonAmbiguousMat, echo=TRUE-----------------------------------------------
nonAmbiguousMat(mat1,by=2)
## ----nonAmbiguousMat2, echo=TRUE----------------------------------------------
set.seed(2017); mat3 <- matrix(c(1:100,round(rnorm(200),2)), ncol=3,
dimnames=list(1:100,LETTERS[1:3]));
head(mat3U <- nonAmbiguousMat(mat3, by="B", na="_", uniqO=FALSE), n=15)
head(get1stOfRepeatedByCol(mat3, sortB="B", sortS="B"))
## ----combineReplFromListToMatr, echo=TRUE-------------------------------------
lst2 <- list(aa_1x=matrix(1:12, nrow=4, byrow=TRUE), ab_2x=matrix(24:13, nrow=4, byrow=TRUE))
combineReplFromListToMatr(lst2)
## ----combineRedundLinesInListAcRef, echo=TRUE---------------------------------
x1 <- list(quant=matrix(11:34, ncol=3, dimnames=list(letters[8:1], LETTERS[11:13])),
annot=matrix(paste0(LETTERS[c(1:4,6,3:5)],LETTERS[c(1:4,6,3:5)]), ncol=1,
dimnames=list(paste(letters[1:8]),"xx")) )
combineRedundLinesInList(lst=x1, refNa="annot", datNa="quant", refColNa="xx")
## ----nonRedundLines, echo=TRUE------------------------------------------------
mat4 <- matrix(rep(c(1,1:3,3,1),2), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2]))
nonRedundLines(mat4)
## ----filtSizeUniq, echo=TRUE--------------------------------------------------
# input: c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), filtUn=TRUE, minSi=NULL)
# here a,b,c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), ref=c(letters[c(1:26,1:3)],
"dd","dd","bb","ddd"), filtUn=TRUE, minSi=NULL)
## ----makeNRedMatr, echo=TRUE--------------------------------------------------
t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
matrix(round(runif(30,-3,2),1), nc=2), stringsAsFactors=FALSE)
# First we split the data.frame in list
by(t3,t3[,1],function(x) x)
t(sapply(by(t3,t3[,1],function(x) x), summarizeCols, me="maxAbsOfRef"))
(xt3 <- makeNRedMatr(t3, summ="mean", iniID="ref"))
(xt3 <- makeNRedMatr(t3, summ=unlist(list(X1="maxAbsOfRef")), iniID="ref"))
## ----combineRedBasedOnCol, echo=TRUE------------------------------------------
matr <- matrix(c(letters[1:6],"h","h","f","e",LETTERS[1:5]), ncol=3,
dimnames=list(letters[11:15],c("xA","xB","xC")))
combineRedBasedOnCol(matr, colN="xB")
combineRedBasedOnCol(rbind(matr[1,],matr), colN="xB")
## ----convMatr2df, echo=TRUE---------------------------------------------------
x <- 1
dat1 <- matrix(1:10, ncol=2)
rownames(dat1) <- letters[c(1:3,2,5)]
## as.data.frame(dat1) ... would result in an error
convMatr2df(dat1)
convMatr2df(data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3))
tmp <- data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
tmp <- data.frame(a=as.character((1:3)/2), b=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
## ----combineOverlapInfo, echo=TRUE--------------------------------------------
set.seed(2013)
datT2 <- matrix(round(rnorm(200)+3,1), ncol=2, dimnames=list(paste("li",1:100,sep=""),
letters[23:24]))
# (mimick) some short and longer names for each line
inf2 <- cbind(sh=paste(rep(letters[1:4],each=26), rep(letters,4),1:(26*4),sep=""),
lo=paste(rep(LETTERS[1:4],each=26), rep(LETTERS,4), 1:(26*4), ",",
rep(letters[sample.int(26)],4), rep(letters[sample.int(26)],4), sep=""))[1:100,]
## We'll use this to test :
head(datT2, n=10)
## let's assign to each pair of x & y values a 'cluster' (column _clu_, the column _combInf_ tells us which lines/indexes are in this cluster)
head(combineOverlapInfo(datT2, disThr=0.03), n=10)
## it is also possible to rather display names (eg gene or protein-names) instead of index values
head(combineOverlapInfo(datT2, suplI=inf2[,2], disThr=0.03), n=10)
## ----getValuesByUnique, echo=TRUE---------------------------------------------
dat <- 11:19
names(dat) <- letters[c(6:3,2:4,8,3)]
## Here the names are not unique.
## Thus, the values can be binned by their (non-unique) names and a representative values calculated.
## Let's make a 'datUniq' with the mean of each group of values :
datUniq <- round(tapply(dat, names(dat), mean),1)
## now we propagate the mean values to the full vector
getValuesByUnique(dat, datUniq)
cbind(ini=dat,firstOfRep=getValuesByUnique(dat, datUniq),
indexUniq=getValuesByUnique(dat, datUniq, asIn=TRUE))
## ----combineByEitherFactor, echo=TRUE-----------------------------------------
nn <- rep(c("a","e","b","c","d","g","f"),c(3,1,2,2,1,2,1))
qq <- rep(c("m","n","p","o","q"),c(2,1,1,4,4))
nq <- cbind(nn,qq)[c(4,2,9,11,6,10,7,3,5,1,12,8),]
## Here we consider 2 columns 'nn' and 'qq' whe trying to regroup common values
## (eg value 'a' from column 'nn' and value 'o' from 'qq')
combineByEitherFactor(nq, 1, 2, nBy=FALSE)
## ----combineByEitherFactor2, echo=TRUE----------------------------------------
## the same, but including n by group/cluster
combineByEitherFactor(nq, 1, 2, nBy=TRUE)
## Not running further iterations works faster, but you may not reach 'convergence' immediately
combineByEitherFactor(nq,1, 2, nBy=FALSE)
## ----combineByEitherFactor3, echo=TRUE----------------------------------------
## another example
mm <- rep(c("a","b","c","d","e"), c(3,4,2,3,1))
pp <- rep(c("m","n","o","p","q"), c(2,2,2,2,5))
combineByEitherFactor(cbind(mm,pp), 1, 2, con=FALSE, nBy=TRUE)
## ----multiCharReplace1, echo=TRUE---------------------------------------------
# replace character content
x1 <- c("ab","bc","cd","efg","ghj")
multiCharReplace(x1, cbind(old=c("bc","efg"), new=c("BBCC","EF")))
# works also on matrix and/or to replace numeric content :
x3 <- matrix(11:16, ncol=2)
multiCharReplace(x3, cbind(12:13,112:113))
## ----multiCharReplace2, echo=TRUE---------------------------------------------
# replace and return logical vactor
x2 <- c("High","n/a","High","High","Low")
multiCharReplace(x2,cbind(old=c("n/a","Low","High"), new=c(NA,FALSE,TRUE)), convTo="logical")
## ----multiMatch1, echo=TRUE---------------------------------------------------
aa <- c("m","k","j; aa","m; aa; bb; o","n; dd","aa","cc")
bb <- c("aa","dd","aa; bb; q","p; cc")
## result as list of indexes
(bOnA <- multiMatch(aa, bb, method="asIndex")) # match bb on aa
## more convenient to the human reader
(bOnA <- multiMatch(aa, bb)) # match bb on aa
(bOnA <- multiMatch(aa, bb, method="matchedL")) # match bb on aa
## ----compGlobPat1, echo=TRUE--------------------------------------------------
aa <- letters[rep(c(3:1,4), each=2)]
ab <- letters[rep(c(5,8:6), each=2)] # 'same general' pattern to aa
ac <- letters[c(1:2,1:3,3:4,4)] # NOT 'same general' pattern to any other
ad <- letters[c(6:8,8:6,7:6)] # NOT 'same general' pattern to any other
## ----compGlobPat2, echo=TRUE--------------------------------------------------
## get global patterns
cbind(aa= match(aa, unique(aa)),
ab= match(ab, unique(ab)),
ac= match(ac, unique(ac)),
ad= match(ad, unique(ad)) )
## ----compGlobPat3, echo=TRUE--------------------------------------------------
bb <- data.frame(ind=1:length(aa), a=aa, b=ab, c=ac, d=ad)
## ----compGlobPat4, echo=TRUE--------------------------------------------------
replicateStructure(bb)
## ----compGlobPat5, echo=TRUE--------------------------------------------------
replicateStructure(bb, method="combAll")
## ----compGlobPat6, echo=TRUE--------------------------------------------------
replicateStructure(bb, method="combNonOrth")
## ----checkSimValueInSer, echo=TRUE--------------------------------------------
va1 <- c(4:7,7,7,7,7,8:10) + (1:11)/28600
checkSimValueInSer(va1, ppm=5)
data.frame(va=sort(va1), simil=checkSimValueInSer(va1))
## ----findCloseMatch1, echo=TRUE-----------------------------------------------
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,15.9,13.5,15.7,14.1,5)
(cloMa <- findCloseMatch(x=aA, y=cC, com="diff", lim=0.5, sor=FALSE))
## ----closeMatchMatrix1, echo=TRUE---------------------------------------------
# all matches (of 2d arg) to/within limit for each of 1st arg ('x'); 'y' ..to 2nd arg = cC
# first let's display only one single closest/best hit
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=TRUE)) #
## ----closeMatchMatrix2, echo=TRUE---------------------------------------------
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=FALSE,origN=TRUE)) #
(maAa <- closeMatchMatrix(cloMa, cbind(valA=81:87, aA), cbind(valC=91:99, cC), colM=2,
colP=2, lim=FALSE))
(maAa <- closeMatchMatrix(cloMa, cbind(aA,valA=81:87), cC, lim=FALSE, deb=TRUE)) #
a2 <- aA; names(a2) <- letters[1:length(a2)]; c2 <- cC; names(c2) <- letters[10 +1:length(c2)]
(cloM2 <- findCloseMatch(x=a2, y=c2, com="diff", lim=0.5, sor=FALSE))
(maA2 <- closeMatchMatrix(cloM2, predM=cbind(valA=81:87, a2),
measM=cbind(valC=91:99, c2), colM=2, colP=2, lim=FALSE, asData=TRUE))
(maA2 <- closeMatchMatrix(cloM2, cbind(id=names(a2), valA=81:87,a2), cbind(id=names(c2),
valC=91:99,c2), colM=3, colP=3, lim=FALSE, deb=FALSE))
## ----findSimilFrom2sets, echo=TRUE--------------------------------------------
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,12.6,15.9,14.1)
aZ <- matrix(c(aA,aA+20), ncol=2, dimnames=list(letters[1:length(aA)],c("aaA","aZ")))
cZ <- matrix(c(cC,cC+20), ncol=2, dimnames=list(letters[1:length(cC)],c("ccC","cZ")))
findCloseMatch(cC, aA, com="diff", lim=0.5, sor=FALSE)
findSimilFrom2sets(aA, cC)
findSimilFrom2sets(cC, aA)
findSimilFrom2sets(aA, cC, best=FALSE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=5e4, deb=TRUE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=FALSE)
# below: find fewer 'best matches' since search window larger (ie more good hits compete !)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=TRUE)
## ----fusePairs, echo=TRUE-----------------------------------------------------
(daPa <- matrix(c(1:5,8,2:6,9), ncol=2))
fusePairs(daPa, maxFuse=4)
## ----elimCloseCoord1, echo=TRUE-----------------------------------------------
da1 <- matrix(c(rep(0:4,5),0.01,1.1,2.04,3.07,4.5), ncol=2); da1[,1] <- da1[,1]*99; head(da1)
elimCloseCoord(da1)
## ----stableMode, echo=TRUE----------------------------------------------------
set.seed(2012); dat <- round(c(rnorm(120,0,1.2), rnorm(80,0.8,0.6), rnorm(25,-0.6,0.05), runif(200)),3)
dat <- dat[which(dat > -2 & dat <2)]
stableMode(dat)
## ----stableMode2, fig.height=8, fig.width=9, fig.align="center", echo=TRUE----
layout(1:2)
plot(1:length(dat), sort(dat), type="l", main="Sorted Values", xlab="rank", las=1)
abline(h=stableMode(dat, silent=TRUE), lty=2,col=2)
legend("topleft",c("stableMode"), text.col=2, col=2, lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
plot(density(dat, kernel="gaussian", adjust=0.7), xlab="Value of dat", main="Density Estimate Plot")
useCol <- c("red","green","blue","grey55")
legend("topleft",c("dens","binning","BBmisc","allModes"), text.col=useCol, col=useCol,
lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
abline(v=stableMode(dat, method="dens", silent=TRUE), lty=2, col="red", lwd=2)
abline(v=stableMode(dat, method="binning", silent=TRUE), lty=2, col="green")
abline(v=stableMode(dat, method="BBmisc", silent=TRUE), lty=2, col="blue")
abline(v=stableMode(dat, method="allModes"), lty=2, col="grey55")
## ----stableMode3, echo=TRUE---------------------------------------------------
set.seed(2021)
x <- sample(letters, 50000, replace=TRUE)
stableMode(dat, method="mode")
stableMode(dat, method="allModes")
## ----trimRedundText1, echo=TRUE-----------------------------------------------
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1)
## ----keepCommonText1, echo=TRUE-----------------------------------------------
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1, side="left") # remove redundant
keepCommonText(txt1, side="terminal") # keep redundant
keepCommonText(txt1, side="center") # computationally easier
## ----keepCommonText2, echo=TRUE-----------------------------------------------
txt2 <- c("abcd_abc_kjh", "bcd_abc123", "cd_abc_po")
keepCommonText(txt2, side="center")
## ----rmEnumeratorName1, echo=TRUE---------------------------------------------
xx <- c("hg_Re1","hjRe2_Re2","hk-Re3_Re33")
rmEnumeratorName(xx)
rmEnumeratorName(xx, newSep="--")
rmEnumeratorName(xx, incl="anyCase")
## ----rmEnumeratorName2, echo=TRUE---------------------------------------------
xy <- cbind(a=11:13, b=c("11#11","2_No2","333_samp333"), c=xx)
rmEnumeratorName(xy)
rmEnumeratorName(xy,incl=c("anyCase","trim2","rmEnumL"))
## ----rmEnumeratorName3, echo=TRUE---------------------------------------------
xz <- cbind(a=11:13, b=c("23#11","4#2","567#333"), c=xx)
apply(xz, 2, rmEnumeratorName, sepEnum=c("","_"), newSep="_", silent=TRUE)
## ----unifyEnumerator1, echo=TRUE----------------------------------------------
unifyEnumerator(c("ab-1","ab-2","c-3"))
unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
unifyEnumerator(c("ab-1","c3-2","dR3"), stringentMatch=FALSE)
## ----adjustUnitPrefix1, echo=TRUE---------------------------------------------
adjustUnitPrefix(c("10.psec","2 fsec"), unit="sec")
## ----adjustUnitPrefix2, echo=TRUE---------------------------------------------
adjustUnitPrefix(c("10.psec abc","2 fsec etc"), unit="sec")
## ----mergeVectors1, echo=TRUE-------------------------------------------------
x1 <- c(a=1, b=11, c=21)
x2 <- c(b=12, c=22, a=2)
x3 <- c(a=3, d=43)
mergeVectors(vect1=x1, vect2=x2, vect3=x3)
## ----mergeVectors2, echo=TRUE-------------------------------------------------
mergeVectors(vect1=x1, vect2=x2, vect3=x3, inclInfo=TRUE) # return list with additional info
## ----mergeVectors3, echo=TRUE-------------------------------------------------
x4 <- 41:44 # no names - not conform for merging and will be ignored
mergeVectors(x1, x2, x3, x4)
## ----matchMatrixLinesToRef1, echo=TRUE----------------------------------------
## Note : columns b and e allow non-ambigous match, not all elements of e are present in a
mat0 <- cbind(a=c("mvvk","axxd","bxxd","vv"),b=c("iwwy","iyyu","kvvh","gxx"), c=rep(9,4),
d=c("hgf","hgf","vxc","nvnn"), e=c("_vv_","_ww_","_xx_","_yy_"))
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[,5])
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[1:3,5], inclInfo=TRUE)
matchMatrixLinesToRef(mat0[,-2], ref=mat0[,2], inclInfo=TRUE) # needs 'reverse grep'
## ----orderMatrToRef1, echo=TRUE-----------------------------------------------
mat1 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], rep(1:5)), ncol=3)
orderMatrToRef(mat1, paste0(letters[c(3,4,5,3,4)],c(1,3,5,2,4)))
mat2 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3:5 )), ncol=3)
orderMatrToRef(mat2, paste0(letters[c(3,4,5,3,4)],c(1,3,5,1,4)))
mat3 <- matrix(paste0(letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3,3,5 )), ncol=3)
orderMatrToRef(mat3, paste0("__",letters[c(3,4,5,3,4)],c(1,3,5,1,3)))
## ----concatMatch1, echo=TRUE--------------------------------------------------
## simple example without concatenations or text-extensions
x0 <- c("ZZ","YY","AA","BB","DD","CC","D")
tab0 <- c("AA","BB,E","CC","FF,U")
match(x0, tab0)
concatMatch(x0, tab0) # same result as match(), but with names
## now let's construct somthing similar but with concatenations and text-extensions
x1 <- c("ZZ","YY","AA","BB-2","DD","CCdef","Dxy") # modif of single ID (no concat)
tab1 <- c("AA","WW,Vde,BB-5,E","CCab","FF,Uef")
match(x1, tab1) # match finds only the 'simplest' case (ie "AA")
concatMatch(x1, tab1) # finds all hits as in example above
x2 <- c("ZZ,Z","YY,Y","AA,Z,Y","BB-2","DD","X,CCdef","Dxy") # conatenated in 'x'
tab2 <- c("AA","WW,Vde,BB-5,E","CCab,WW","FF,UU")
concatMatch(x2, tab2) # concatenation in both 'x' and 'table'
## ----checkStrictOrder1, echo=TRUE---------------------------------------------
set.seed(2005); mat1 <- rbind(matrix(round(runif(40),1),nc=4), rep(1,4))
head(mat1)
checkStrictOrder(mat1); mat1[which(checkStrictOrder(mat1)[,2]==0),]
## ----checkGrpOrder1, echo=TRUE------------------------------------------------
head(mat1)
checkGrpOrder(mat1)
checkGrpOrder(mat1, revRank=FALSE) # only constant 'up' tested
## ----linModelSelect1, echo=TRUE-----------------------------------------------
li1 <- rep(c(4,3,3:6), each=3) + round(runif(18)/5,2)
names(li1) <- paste0(rep(letters[1:5], each=3), rep(1:3,6))
li2 <- rep(c(6,3:7), each=3) + round(runif(18)/5, 2)
dat2 <- rbind(P1=li1, P2=li2)
exp2 <- rep(c(11:16), each=3)
exp4 <- rep(c(3,10,30,100,300,1000), each=3)
## Check & plot for linear model
linModelSelect("P1", dat2, expect=exp2)
linModelSelect("P2", dat2, expect=exp2)
## ----plotLinModelCoef1, echo=TRUE---------------------------------------------
set.seed(2020)
x1 <- matrix(rep(c(2,2:5),each=20) + runif(100) +rep(c(0,0.5,2:3,5),20),
byrow=FALSE, ncol=10, dimnames=list(LETTERS[1:10],NULL))
## just the 1st regression :
summary(lm(b~a, data=data.frame(b=x1[,1], a=rep(1:5,each=2))))
## all regressions
x1.lmSum <- t(sapply(lapply(rownames(x1), linModelSelect, dat=x1,
expect=rep(1:5,each=2), silent=TRUE, plotGraph=FALSE),
function(x) c(x$coef[2,c(4,1)], startFr=x$startLev)))
x1.lmSum <- cbind(x1.lmSum, medQuantity=apply(x1,1,median))
x1.lmSum[,1] <- log10(x1.lmSum[,1])
head(x1.lmSum)
## ----plotLinModelCoef2, echo=TRUE---------------------------------------------
wrGraphOK <- requireNamespace("wrGraph", quietly=TRUE) # check if package is available
if(wrGraphOK) wrGraph::plotW2Leg(x1.lmSum, useCol=c("Pr(>|t|)","Estimate","medQuantity","startFr"),
legendloc="topleft", txtLegend="start at")
## ----ratioAllComb0, echo=TRUE-------------------------------------------------
set.seed(2014); ra1 <- c(rnorm(9,2,1), runif(8,1,2))
## ----ratioAllComb1, echo=TRUE-------------------------------------------------
median(ra1[1:9]) / median(ra1[10:17])
## ----ratioAllComb2, echo=TRUE-------------------------------------------------
summary( ratioAllComb(ra1[1:9], ra1[10:17]))
boxplot(list(norm=ra1[1:9], unif=ra1[10:17], rat=ratioAllComb(ra1[1:9],ra1[10:17])))
## ----combineAsN1, echo=TRUE---------------------------------------------------
tm1 <- list(a1=LETTERS[1:7], a2=LETTERS[3:9], a3=LETTERS[6:10], a4=LETTERS[8:12])
combineAsN(tm1, nCombin=3, lev=gl(1,4))[,1,]
## ----combineAsN2, echo=TRUE---------------------------------------------------
## different levels/groups in list-elements
tm4 <- list(a1=LETTERS[1:15], a2=LETTERS[3:16], a3=LETTERS[6:17], a4=LETTERS[8:19],
b1=LETTERS[5:19], b2=LETTERS[7:20], b3=LETTERS[11:24], b4=LETTERS[13:25], c1=LETTERS[17:26],
d1=LETTERS[4:12], d2=LETTERS[5:11], d3=LETTERS[6:12], e1=LETTERS[7:10])
te4 <- combineAsN(tm4, nCombin=4, lev=substr(names(tm4),1,1))
str(te4)
te4[,,1] # the counts part only
## ----readCsvBatch, echo=TRUE--------------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("pl01_1.csv","pl01_2.csv","pl02_1.csv","pl02_2.csv")
datAll <- readCsvBatch(fiNa, path1, silent=TRUE)
str(datAll)
## ----readCsvBatch2, echo=TRUE-------------------------------------------------
## batch reading of all csv files in specified path :
datAll2 <- readCsvBatch(fileNames=NULL, path=path1, silent=TRUE)
str(datAll2)
## ----readTabulatedBatch1, echo=TRUE-------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("a1.txt","a2.txt")
allTxt <- readTabulatedBatch(fiNa, path1)
str(allTxt)
## ----readVarColumns, echo=TRUE------------------------------------------------
path1 <- system.file("extdata", package="wrMisc")
fiNa <- "Names1.tsv"
datAll <- readVarColumns(fiName=file.path(path1,fiNa), sep="\t")
str(datAll)
## ----readGit1, echo=TRUE------------------------------------------------------
## An example url with tabulated data :
url1 <- "https://github.com/bigbio/proteomics-metadata-standard/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv"
gitDataUrl(url1)
## ----readGit2, echo=TRUE------------------------------------------------------
dataPxd <- try(read.delim(gitDataUrl(url1), sep='\t', header=TRUE))
str(dataPxd)
## ----presenceGrpFilt1, echo=TRUE----------------------------------------------
dat1 <- matrix(1:56,ncol=7)
dat1[c(2,3,4,5,6,10,12,18,19,20,22,23,26,27,28,30,31,34,38,39,50,54)] <- NA
grp1 <- gl(3,3)[-(3:4)]
dat1
## now let's filter
presenceGrpFilt(dat1, gr=grp1, presThr=0.75) # stringent
presenceGrpFilt(dat1, gr=grp1, presThr=0.25) # less stringent
## ----presenceFilt, echo=TRUE--------------------------------------------------
presenceFilt(dat1, gr=grp1, maxGr=1, ratM=0.1)
presenceFilt(dat1, gr=grp1, maxGr=2, rat=0.5)
## ----cleanReplicates, echo=TRUE-----------------------------------------------
(mat3 <- matrix(c(19,20,30,40, 18,19,28,39, 16,14,35,41, 17,20,30,40), ncol=4))
cleanReplicates(mat3, nOutl=1)
cleanReplicates(mat3, nOutl=3)
## ----normalizeThis0, echo=TRUE------------------------------------------------
set.seed(2015); rand1 <- round(runif(300) +rnorm(300,0,2),3)
dat1 <- cbind(ser1=round(100:1 +rand1[1:100]), ser2=round(1.2*(100:1 +rand1[101:200]) -2),
ser3=round((100:1 +rand1[201:300])^1.2-3))
dat1 <- cbind(dat1, ser4=round(dat1[,1]^seq(2,5,length.out=100) +rand1[11:110],1))
## Let's introduce some NAs
dat1[dat1 <1] <- NA
## Let's get a quick overview of the data
summary(dat1)
## some selected lines (indeed, the 4th column appears always much higher)
dat1[c(1:5,50:54,95:100),]
## ----normalizeThis1, echo=TRUE------------------------------------------------
no1 <- normalizeThis(dat1, refGrp=1:3, meth="mean")
no2 <- normalizeThis(dat1, refGrp=1:3, meth="trimMean", trim=0.4)
no3 <- normalizeThis(dat1, refGrp=1:3, meth="median")
no4 <- normalizeThis(dat1, refGrp=1:3, meth="slope", quantFa=c(0.2,0.8))
## ----normalizeThis_plot1, echo=FALSE,eval=TRUE--------------------------------
boxplot(dat1, main="raw data", las=1)
## ----normalizeThis_plot2, echo=FALSE,eval=TRUE--------------------------------
layout(matrix(1:4, ncol=2))
boxplot(no1, main="mean normalization", las=1)
boxplot(no2, main="trimMean normalization", las=1)
boxplot(no3, main="median normalization", las=1)
boxplot(no4, main="slope normalization", las=1)
## ----rowNormalize1, echo=TRUE-------------------------------------------------
set.seed(2); AA <- matrix(rbinom(110, 10, 0.05), nrow=10)
AA[,4:5] <- AA[,4:5] *rep(4:3, each=nrow(AA))
AA1 <- rowNormalize(AA)
round(AA1, 2)
## ----rowNormalize2, echo=TRUE-------------------------------------------------
AC <- AA
AC[which(AC <1)] <- NA
(AC1 <- rowNormalize(AC))
## ----rowNormalize3, echo=TRUE-------------------------------------------------
(AC3 <- rowNormalize(AC, refLines=1:5, omitNonAlignable=TRUE))
## ----coordOfFilt1, echo=TRUE--------------------------------------------------
set.seed(2021); ma1 <- matrix(sample.int(n=40, size=27, replace=TRUE), ncol=9)
## let's test which values are >37
which(ma1 >37) # doesn't tell which row & col
coordOfFilt(ma1, ma1 >37)
## ----rnormW1, echo=TRUE-------------------------------------------------------
## some sample data :
x1 <- (11:16)[-5]
mean(x1); sd(x1)
## ----rnormW2, echo=TRUE-------------------------------------------------------
## the standard way for gerenating normal random values
ra1 <- rnorm(n=length(x1), mean=mean(x1), sd=sd(x1))
## In particular with low n, the random values deviate somehow from expected mean and sd :
mean(ra1) -mean(x1)
sd(ra1) -sd(x1)
## ----rnormW3, echo=TRUE-------------------------------------------------------
## random numbers with close fit to expected mean and sd :
ra2 <- rnormW(length(x1), mean(x1), sd(x1))
mean(ra2) -mean(x1)
sd(ra2) -sd(x1) # much closer to expected value
## ----moderTest2grp, echo=TRUE-------------------------------------------------
set.seed(2017); t8 <- matrix(round(rnorm(1600,10,0.4),2), ncol=8,
dimnames=list(paste("l",1:200), c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2]+3 # augment lines 3:6 for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6]+3 # augment lines 5:8 for AA2&BB2 (c,d,g,h should be found)
t4 <- log2(t8[,1:4]/t8[,5:8])
fit4 <- moderTest2grp(t4, gl(2,2))
## now we'll use limma's topTable() function to look at the 'best' results
if("list" %in% mode(fit4)) { # if you have limma installed we can look further
library(limma)
topTable(fit4, coef=1,n=5) # effect for 3,4,7,8
fit4in <- moderTest2grp(t4, gl(2,2), testO="<")
if("list" %in% mode(fit4in)) topTable(fit4in, coef=1,n=5) }
## ----moderTestXgrp, echo=TRUE-------------------------------------------------
grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
dimnames=list(paste(letters[], rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
## expect to find C/A in c,d,g, (h)
## expect to find C/D in c,d,e,f
## expect to find A/D in f,g,(h)
test8 <- moderTestXgrp(t8, grp)
head(test8$p.value, n=8)
## ----pVal2lfdr, echo=TRUE-----------------------------------------------------
set.seed(2017); t8 <- matrix(round(rnorm(160,10,0.4),2), ncol=8, dimnames=list(letters[1:20],
c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f) for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6] +3 # augment lines 5:8 (e-h) for AA2&BB2 (c,d,g,h should be found)
head(pVal2lfdr(apply(t8, 1, function(x) t.test(x[1:4], x[5:8])$p.value)))
## ----fcCI, echo=TRUE----------------------------------------------------------
set.seed(2022); ran <- rnorm(50)
confInt(ran, alpha=0.05)
## plot points and confindence interval of mean
plot(ran, jitter(rep(1, length(ran))), ylim=c(0.95, 1.05), xlab="random variable 'ran'",main="Points and Confidence Interval of Mean (alpha=0.05)", ylab="", las=1)
points(mean(ran), 0.97, pch=3, col=4) # mean
lines(mean(ran) +c(-1, 1) *confInt(ran, 0.05), c(0.97, 0.97), lwd=4, col=4) # CI
legend("topleft","95% conficence interval of mean", text.col=4,col=4,lty=1,lwd=1,seg.len=1.2,cex=0.9,xjust=0,yjust=0.5)
## ----matchSampToPairw, echo=TRUE----------------------------------------------
## make example if limma is not installed
if(!requireNamespace("limma", quietly=TRUE)) test8 <- list(FDR=matrix(1, nrow=2, ncol=3, dimnames=list(NULL,c("A-C","A-D","C-D"))))
matchSampToPairw(unique(grp), colnames(test8$FDR))
## ----pairWiseConc1, echo=TRUE-------------------------------------------------
mat1 <- matrix(1:8, nrow=2, dimnames=list(NULL, paste0(1:4,"-",6:9)))
numPairDeColNames(mat1)
## ----replicateStructure1, echo=TRUE-------------------------------------------
## column a is all different, b is groups of 2,
## c & d are groups of 2 nut NOT 'same general' pattern as b
strX <- data.frame(a=letters[18:11], b=letters[rep(c(3:1,4), each=2)],
c=letters[rep(c(5,8:6), each=2)], d=letters[c(1:2,1:3,3:4,4)],
e=letters[rep(c(4,8,4,7),each=2)], f=rep("z",8) )
strX
replicateStructure(strX[,1:2])
replicateStructure(strX[,1:4], method="combAll")
replicateStructure(strX[,1:4], method="combAll", exclNoRepl=FALSE)
replicateStructure(strX[,1:4], method="combNonOrth", exclNoRepl=TRUE)
replicateStructure(strX, method="lowest")
## ----std1, echo=TRUE----------------------------------------------------------
dat <- matrix(2*round(runif(100),2), ncol=4)
mean(dat); sd(dat)
datS <- scale(dat)
apply(datS, 2, sd)
# each column was teated separately
mean(datS); sd(datS); range(datS)
# the mean is almost 0.0 and the sd almost 1.0
datB <- scale(dat, center=TRUE, scale=FALSE)
mean(datB); sd(datB); range(datB) # mean is almost 0
## ----std2, echo=TRUE----------------------------------------------------------
datS2 <- standardW(dat)
apply(datS2, 2, sd)
summary(datS2)
mean(datS2); sd(datS2)
datS3 <- standardW(dat, byColumn=TRUE)
apply(datS3, 2, sd)
summary(datS3)
mean(datS3); sd(datS3)
## ----scale1, echo=TRUE--------------------------------------------------------
datR2 <- apply(dat, 2, scaleXY, 1, 100)
summary(datR2); sd(datR2)
## ----clu01, echo=TRUE---------------------------------------------------------
nGr <- 3
irKm <- stats::kmeans(iris[,1:4], nGr, nstart=nGr*4) # no need to standardize
table(irKm$cluster, iris$Species)
#wrGraph::plotPCAw(t(as.matrix(iris[,1:4])), sampleGrp=irKm,colBase=irKm$cluster,useSymb=as.numeric(as.factor(iris$Species)))
## ----clu02, echo=TRUE---------------------------------------------------------
## sort results by cluster number
head(reorgByCluNo(iris[,-5], irKm$cluster))
tail(reorgByCluNo(iris[,-5], irKm$cluster))
## ----clu03, echo=TRUE---------------------------------------------------------
## median an CV
ir2 <- reorgByCluNo(iris[,-5], irKm$cluster, addInfo=FALSE, retList=TRUE)
## ----clu04, echo=TRUE---------------------------------------------------------
sapply(ir2, function(x) apply(x, 2, median))
## ----clu05, echo=TRUE---------------------------------------------------------
sapply(ir2, colSds)
## ----filterNetw0, echo=TRUE---------------------------------------------------
lst2 <- list('121'=data.frame(ID=as.character(c(141,221,228,229,449)),11:15),
'131'=data.frame(ID=as.character(c(228,331,332,333,339)),11:15),
'141'=data.frame(ID=as.character(c(121,151,229,339,441,442,449)),c(11:17)),
'151'=data.frame(ID=as.character(c(449,141,551,552)),11:14),
'161'=data.frame(ID=as.character(171),11),
'171'=data.frame(ID=as.character(161),11),
'181'=data.frame(ID=as.character(881:882),11:12) )
## ----filterNetw1, echo=TRUE---------------------------------------------------
(nw1 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=FALSE))
## ----filterNetw2, echo=TRUE---------------------------------------------------
(nw2 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=TRUE))
## ----filterNetw3, echo=TRUE---------------------------------------------------
(nw3 <- filterNetw(lst2, limInt=20, sandwLim=14, remOrphans=TRUE))
## ----propMatr1, echo=TRUE-----------------------------------------------------
pairs3L <- matrix(LETTERS[c(1,3,3, 2,2,1)], ncol=2) # loop of 3
(netw13pr <- pairsAsPropensMatr(pairs3L)) # as prop matr
## ----contribToContigPerFrag, echo=TRUE----------------------------------------
path1 <- matrix(c(17,19,18,17, 4,4,2,3), ncol=2,
dimnames=list(c("A/B/C/D","A/B/G/D","A/H","A/H/I"), c("sumLen","n")))
contribToContigPerFrag(path1)
## ----simpleFragFig, echo=TRUE-------------------------------------------------
frag1 <- cbind(beg=c(2,3,7,13,13,15,7,9,7, 3,3,5), end=c(6,12,8,18,20,20,19,12,12, 4,5,7))
rownames(frag1) <- letters[1:nrow(frag1)]
simpleFragFig(frag1)
## ----countSameStartEnd, echo=TRUE---------------------------------------------
countSameStartEnd(frag1)
## ----pasteC, echo=TRUE--------------------------------------------------------
pasteC(1:4)
pasteC(letters[1:4],quoteC="'")
## ----color-gradient1, echo=TRUE-----------------------------------------------
set.seed(2015); dat1 <- round(runif(15),2)
plot(1:15, dat1, pch=16, cex=2, las=1, col=colorAccording2(dat1),
main="Color gradient according to value in y")
# Here we modify the span of the color gradient
plot(1:15, dat1, pch=16, cex=2, las=1,
col=colorAccording2(dat1, nStartO=0, nEndO=4, revCol=TRUE), main="blue to red")
# It is also possible to work with scales of transparency
plot(1:9, pch=3, las=1)
points(1:9, 1:9, col=transpGraySca(st=0, en=0.8, nSt=9,trans=0.3), cex=42, pch=16)
## ----convColorToTransp, fig.height=6, fig.width=3, echo=TRUE------------------
col0 <- c("#998FCC","#5AC3BA","#CBD34E","#FF7D73")
col1 <- convColorToTransp(col0,alph=0.7)
layout(1:2)
pie(rep(1,length(col0)), col=col0, main="no transparency")
pie(rep(1,length(col1)), col=col1, main="new transparency")
## ----sysDate1, echo=TRUE------------------------------------------------------
## To get started
Sys.Date()
## Compact English names (in European order), no matter what your local settings are :
sysDate()
## ----DateTab, echo=TRUE-------------------------------------------------------
tabD <- cbind(paste0("univ",1:6), c(sysDate(style="univ1"), sysDate(style="univ2"),
sysDate(style="univ3"), sysDate(style="univ4"), as.character(sysDate(style="univ5")),
sysDate(style="univ6")), paste0(" local",1:6),
c(sysDate(style="local1"), sysDate(style="local2"), sysDate(style="local3"),
sysDate(style="local4"), sysDate(style="local5"), sysDate(style="local6")))
knitr::kable(tabD, caption="Various ways of writing current date")
## ----sessionInfo, echo=FALSE--------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/inst/doc/wrMiscVignette1.R
|
---
title: "Getting started with wrMisc"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrMiscVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains a collection of various (low-level) tools which may be of general interest.
These functions were accumulated over a number of years of data-wrangling when treating high-throughput data from biomedical applications.
Besides, these functions are further used/integrated in more specialized functions dedicated to specific applications in the packages [wrProteo](https://CRAN.R-project.org/package=wrProteo), [wrGraph](https://CRAN.R-project.org/package=wrGraph) or [wrTopDownFrag](https://CRAN.R-project.org/package=wrTopDownFrag).
All these packages are available on [CRAN](https://cran.r-project.org/).
If you are not familiar with [R](https://www.r-project.org/) you may find many introductory documents on the official R-site
in [contributed documents](https://cran.r-project.org/other-docs.html) or under [Documentation/Manuals](https://cran.r-project.org/manuals.html).
Of course, numerous other documents/sites with tutorials and courses exist, too.
### Dependencies and Compilation
One of the aims was to write a package easy to install, with low system requirements and few obligatory dependencies.
All code is written in pure R and does not need any special compilers.
The number of obligatory dependencies was kept to a minumum.
Most of additional packages used in some of the functions were declared as 'suggested' (ie not obligatory), to allow installation of _wrMisc_ even if some these additional packages can't be installed/compiled by the user's instance.
When a feature/function of one of the 'suggested' packages is about to be used, its presence/installation will be checked and,
only if found as missing, the user will be prompted a message inviting to install specific package(s) before using these specific functions.
This helps to avoid not being able installing this package at all if some dependencies may fail to get installed themselves.
### Installation And Loading
To get started, we need to install (if not yet installed) and load the package "[wrMisc](https://CRAN.R-project.org/package=wrMisc)" available from [CRAN](https://cran.r-project.org/).
```{r setup0, include=FALSE, echo=FALSE, messages=FALSE, warnings=FALSE}
suppressPackageStartupMessages({
library(wrMisc)
})
```
```{r install, echo=TRUE, eval=FALSE}
## If not already installed, you'll have to install the package first.
## This is the basic installation commande in R
install.packages("wrMisc")
```
Since the functions illustrated in this vignette require a number of the _suggested_ packages,
let's check if they are installed and add them (via a small function), if not yet installed.
```{r install2, echo=TRUE, eval=FALSE}
packages <- c("knitr", "rmarkdown", "BiocManager", "kableExtra", "boot", "data.tree", "data.table",
"fdrtool", "RColorBrewer", "Rcpp", "wrMisc", "wrGraph", "wrProteo")
checkInstallPkg <- function(pkg) { # install function
if(!requireNamespace(pkg, quietly=TRUE)) install.packages(pkg) }
## install if not yet present
sapply(packages, checkInstallPkg)
```
Finally, this package also uses the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
which has to be installed differently (see also help on [Bioconductor](https://bioconductor.org)):
```{r install3, echo=TRUE, eval=FALSE}
## Installation of limma
BiocManager::install("limma")
```
This vignette is also accessible from R command-line or on CRAN at [wrMisc](https://CRAN.R-project.org/package=wrMisc):
```{r install4, echo=TRUE, eval=FALSE}
## Now you can open this vignette out of R:
vignette("wrMiscVignette1", package="wrMisc")
```
Before using the functions of this package, we actually need to load the package first (best on a fresh R-session):
```{r setup1}
library("wrMisc")
library("knitr")
## This is 'wrMisc' version number :
packageVersion("wrMisc")
```
## Speed Optimized Functions In The Package wrMisc
In high-throughput experiments in biology (like transcriptomics, proteomics etc...) many different features get measured a number if times (different samples like patients or evolution of a disease). The resulting data typically contain many (independent) rows (eg >1000 different genes or proteins who's abundance was measured) and much fewer columns that may get further organized in groups of replicates.
As R is a versatile language, multiple options exist for assessing the global characteristics of such data, some are more efficient on a computational point of view.
In order to allow fast treatment of very large data-sets some tools have been re-designed for optimal performance.
### Assessing Basic Information About Variability (for matrix)
Many measurement techniques applied in high throughput manner suffer from precision.
This means, the same measurements taken twice in a row (ie repeated on the same subject) will very likely not give an identical result.
For this reason it is common practice to make replicate measurements to i) estimate mean (ie representative) values and ii) asses the factors contributing to the variablity observed.
Briefly, technical replicates represent the case where multiple read-outs of the very same sample are generated and the resulting variability is associated to technical issues during the process of taking measures. Biological replicates represent independant samples and reflect therefore the varibility a given parameter may have in a certain population of individuals.
With the tools presented here, both technical and biological replicates can be dealt with.
In several cases the interpretation of the resulting numbers should consider the experimental setup, though.
Let's make a simple matrix as toy data:
```{r basicVariability, echo=TRUE}
grp1 <- rep(LETTERS[1:3], c(3,4,3))
sampNa1 <- paste0(grp1, c(1:3,1:4,1:3))
set.seed(2016); dat1 <- matrix(round(c(runif(50000) +rep(1:1000,50)),3),
ncol=10, dimnames=list(NULL,sampNa1))
dim(dat1)
head(dat1)
```
Now lets estimate the standard deviation _(sd)_ for every row:
```{r sdForEachRow, echo=TRUE}
head(rowSds(dat1))
system.time(sd1 <- rowSds(dat1))
system.time(sd2 <- apply(dat1, 1, sd))
```
On most systems the equivalent calculation using *apply()* will run much slower compared to `rowSds`.
Note, there is a minor issue with rounding :
```{r usingApply, echo=TRUE}
table(round(sd1, 13)==round(sd2, 13))
```
Similarly we can easily calculate the CV (coefficient of variation, ie sd / mean, see also [CV](https://en.wikipedia.org/wiki/Coefficient_of_variation)) for every row using `rowCVs` :
```{r calculateRowCV, echo=TRUE}
system.time(cv1 <- rowCVs(dat1))
system.time(cv2 <- apply(dat1, 1, sd) / rowMeans(dat1))
# typically the calculation using rowCVs is much faster
head(cv1)
# results from the 'conventional' way
head(cv2)
```
Note, these calculations will be very efficient as long as the number of rows is much higher (>>) than the number of columns.
### Data Organized In (Sub-)Groups As Sets Of Columns
Now, let's assume our data is contains 3 initial samples measured as several replicates (already defined in _grp1_).
Similarly, we can also calculate the sd or CV for each line while splitting into groups of replicates (functions `rowGrpMeans`, `rowGrpSds` and `rowGrpCV`):
```{r rowGrpMeans1, echo=TRUE}
# we already defined the grouping :
grp1
## the mean for each group and row
system.time(mean1Gr <- rowGrpMeans(dat1, grp1))
```
```{r sdOrCVbyGrp, echo=TRUE}
## Now the sd for each row and group
system.time(sd1Gr <- rowGrpSds(dat1, grp1))
# will give us a matrix with the sd for each group & line
head(sd1Gr)
# Let's check the results of the first line :
sd1Gr[1,] == c(sd(dat1[1,1:3]), sd(dat1[1,4:7]), sd(dat1[1,8:10]))
# The CV :
system.time(cv1Gr <- rowGrpCV(dat1, grp1))
head(cv1Gr)
```
#### Counting Number Of NAs Per Row And Group Of Columns
Some data, like with quantitative proteomics measures, may contain an elevated number of _NAs_ (see also the package [wrProteo](https://CRAN.R-project.org/package=wrProteo) for further options for dealing with such data).
Furthermore, many other packages on CRAN and Bioconductor cover this topic, see also the [missing data task-view](https://CRAN.R-project.org/view=MissingData) on CRAN.
Similar as above there is an easy way to count the number of _NAs_ to get an overview how NAs are distributed.
Let's assume we have measures from 3 groups/samples with 4 replicates each :
```{r rowGrpNA1, echo=TRUE}
mat2 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
mat2 <- matrix(mat2, ncol=12, byrow=TRUE)
## The definition of the groups (ie replicates)
gr4 <- gl(3, 4, labels=LETTERS[1:3])
```
Now we can easily count the number of NAs per row and set of replicates.
```{r rowGrpNA2, echo=TRUE}
rowGrpNA(mat2,gr4)
```
### Fast NA-omit For Very Large Objects
The function _na.omit()_ from the package _stats_ also keeps a trace of all omitted instances.
This can be penalizing in terms of memory usage when handling very large vectors with a high content of NAs (eg >10000 NAs).
If you don't need to document precisely which elements got eliminated, the function `naOmit()` may offer
smoother functioning for very large objects.
```{r naOmit, echo=TRUE}
aA <- c(11:13,NA,10,NA)
str(naOmit(aA))
# the 'classical' na.omit also stores which elements were NA
str(na.omit(aA))
```
### Minimum Distance/Difference Between Values
If you need to find the closest neighbour(s) of a numeric vector, the function `minDiff()` will tell you the
distance ("dif","ppm" or "ratio") and index ("best") of the closest neighbour.
In case of multiple shortest distances the index if the first one is reported, and the column "nbest" will display a value of >1.
```{r minDiff, echo=TRUE}
set.seed(2017); aa <- 10 *c(0.1 +round(runif(20),2), 0.53, 0.53)
head(aa)
minDiff(aa,ppm=FALSE)
```
When you look at the first line, the value of 10.2 has one single closest value which is 10.4,
which is located in line number 19 (the column 'best' gives the index of the best).
Line number 19 points back to line number 1.
You can see, that some elements (like 5.7) occure multiple times (line no 3 and 9), multiple occurences are counted in the column _ncur_.
This is why column _nbest_ for line 15 (_value_ =6.0) indicates that it appears twice as closest value _nbest_.
## Working With Lists (And Lists Of Lists) {#WorkingWithLists}
### Partial unlist
When input from different places gets collected and combined into a list, this may give a collection of different types of data.
The function `partUnlist()` will to preserve multi-column elements as they are (and just bring down one level):
```{r partUnlist_1, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
partUnlist(bb)
partUnlist(lapply(bb,.asDF2))
```
This won't be possible using _unlist()_.
```{r unlist_1, echo=TRUE}
head(unlist(bb, recursive=FALSE))
```
To uniform such data to obtain a list with one column only for each list-element, the function `asSepList()` provides help :
```{r asSepList, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
asSepList(bb)
```
### Appending/Combining Lists
Separate lists may be combined using the _append()_ command, which also allows treating simple vectors.
```{r lappend1, echo=TRUE}
li1 <- list(a=1, b=2, c=3)
li2 <- list(A=11, b=2, C=13)
append(li1, li2)
```
However, this way there is no checking if some of the list-elements are present in both lists and thus will appear twice.
The function `appendNR()` allows to checking if some list-elements will appear twice, and thus avoid such duplicate entries.
```{r lappend2, echo=TRUE}
appendNR(li1, li2)
```
### rbind On Lists
When a matrix (or data.frame) gets split into a list, like in the example using _by()_, as a reverse-function such lists can get joined using `lrbind()` in an _rbind_-like fashion.
```{r lrbind, echo=TRUE}
dat2 <- matrix(11:34, ncol=3, dimnames=list(letters[1:8], colnames=LETTERS[1:3]))
lst2 <- by(dat2, rep(1:3,c(3,2,3)), as.matrix)
lst2
# join list-elements (back) into single matrix
lrbind(lst2)
```
### Merge Multiple Matrices From List
When combining different datasets the function `mergeMatrixList()` allows merging multiple matrices (or data.frames) into a single matrix.
Two types of mode of operation are available : i) Returning only the common/shared elements (as defined by the rownames), this is default _mode='intersect'_ ;
alternatively one may ii) fuse/merge all matrices together without any loss of data (using _mode='union'_, additional _NA_s may appear when a given rowname is absent in one of the input matrices).
Furthermore, one may specifically select which columns should be used for fusing using the argument _useColumn_.
```{r mergeMatrixList, echo=TRUE}
mat1 <- matrix(11:18, ncol=2, dimnames=list(letters[3:6],LETTERS[1:2]))
mat2 <- matrix(21:28, ncol=2, dimnames=list(letters[2:5],LETTERS[3:4]))
mat3 <- matrix(31:38, ncol=2, dimnames=list(letters[c(1,3:4,3)],LETTERS[4:5]))
#
mergeMatrixList(list(mat1, mat2), useColumn="all")
# with custom names for the individual matrices
mergeMatrixList(list(m1=mat1, m2=mat2, mat3), mode="union", useColumn=2)
```
Similarly, separate entries may be merged using `mergeMatrices()` :
```{r mergeMatrices, echo=TRUE}
mergeMatrices(mat1, mat2)
mergeMatrices(mat1, mat2, mat3, mode="union", useColumn=2)
## custom names for matrix-origin
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=2)
## flexible/custom selection of columns
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=list(1,1:2,2))
```
### Fuse Content Of List-Elements With Redundant (Duplicated) Names
When list-elements have the same name, their content (of named numeric or character vectors)
may get fused using `fuseCommonListElem()` according to the names of the list-elements :
```{r fuseCommonListElem, echo=TRUE}
val1 <- 10 +1:26
names(val1) <- letters
(lst1 <- list(c=val1[3:6], a=val1[1:3], b=val1[2:3] ,a=val1[12], c=val1[13]))
## here the names 'a' and 'c' appear twice :
names(lst1)
## now, let's fuse all 'a' and 'c'
fuseCommonListElem(lst1)
```
### Filtering Lines And/Or Columns For All List-Elements Of Same Size
In a number of cases the information in various list-elements is somehow related.
Eg, in S3-objects produced by [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), or data produced using [wrProteo](https://CRAN.R-project.org/package=wrProteo) several instances of matrix or data.frame refer to data that are related.
Some matrixes may conatain abundance data (or weights, etc) while another matrix or data.frame may contain the annotation information related to each line of the abundance data.
So if one wants to filter the data, ie remove some lines, this should be done in the same way with all related list-elements.
This way one may maintain a conventient 1:1 matching of lines.
The function `filterLiColDeList()` searches if other list-elements have suitable dimensions and will then run the same filtering as in the 'target' list-element.
In consequence this can be used with the output of wrProteo to remove simultaneously the same lines and/or columns.
```{r listBatchReplace1, echo=TRUE}
lst1 <- list(m1=matrix(11:18, ncol=2), m2=matrix(21:30, ncol=2), indR=31:34,
m3=matrix(c(21:23,NA,25:27,NA), ncol=2))
filterLiColDeList(lst1, useLines=2:3)
filterLiColDeList(lst1, useLines="allNA", ref=3)
```
### Replacements In List
The function `listBatchReplace()` works similar to _sub()_ and allows to search & replace exact matches to a character string along all elements of a list.
```{r replInList1, echo=TRUE}
(lst1 <- list(aa=1:4, bb=c("abc","efg","abhh","effge"), cc=c("abdc","efg","efgh")))
listBatchReplace(lst1, search="efg", repl="EFG", silent=FALSE)
```
### Organize Values Into list and Sort By Names
Named numeric or character vectors can be organized into lists using `listGroupsByNames()`,
based on their names (only the part before any extensions starting with a point gets considered).
Of course, other separators may be defined using the argument _sep_.
```{r listGroupsByNames, echo=TRUE}
ser1 <- 1:7; names(ser1) <- c("AA","BB","AA.1","CC","AA.b","BB.e","A")
listGroupsByNames(ser1)
```
If no names are present, the content of the vector itself will be used as name :
```{r listGroupsByNames2, echo=TRUE}
listGroupsByNames((1:10)/5)
```
### Batch-filter List-Elements
In the view of object-oriented programming several methods produce results integrated into lists or S3-objects (eg
[limma](https://bioconductor.org/packages/release/bioc/html/limma.html)).
The function `filterList()` aims facilitating the filtering of all elements of lists or S3-objects.
List-elements with inappropriate number of lines will be ignored.
```{r filterList, echo=TRUE}
set.seed(2020); dat1 <- round(runif(80),2)
list1 <- list(m1=matrix(dat1[1:40], ncol=8), m2=matrix(dat1[41:80], ncol=8), other=letters[1:8])
rownames(list1$m1) <- rownames(list1$m2) <- paste0("line",1:5)
# Note: the list-element list1$other has a length different to that of filt. Thus, it won't get filtered.
filterList(list1, list1$m1[,1] >0.4) # filter according to 1st column of $m1 ...
filterList(list1, list1$m1 >0.4)
```
### Transform Columns Of Matrix To List Of Vectors
At some occasions it may be useful separate columns of a matrix into separate vectors inside a list.
This can be done using `matr2list()`:
```{r matr2list, echo=TRUE}
(mat1 <- matrix(1:12, ncol=3, dimnames=list(letters[1:4],LETTERS[1:3])))
str(matr2list(mat1))
```
## Working With Arrays {#WorkingWithArrays}
Let's get stared with a little toy-array:
```{r array0, echo=TRUE}
(arr1 <- array(c(6:4,4:24), dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""),c("ch1","ch2"))))
```
### CV (Coefficient Of Variance) With Arrays
Now we can obtain the CV (coefficient of variance) by splitting along 3rd dimesion (ie this is equivalent to an _apply_ along the 3rd dimension) using `arrayCV()`:
```{r arrayCV1, echo=TRUE}
arrayCV(arr1)
# this is equivalent to
cbind(rowCVs(arr1[,,1]), rowCVs(arr1[,,2]))
```
Similarly we can split along any other dimension, eg the 2nd dimension :
```{r arrayCV2, echo=TRUE}
arrayCV(arr1, byDim=2)
```
### Slice 3-dim Array In List Of Matrixes (Or Arrays)
This procedure is similar to (re-)organizing an initial array into clusters, here we split along a user-defined factor/vector.
If a clustering-algorithm produces the cluster assignments, this function can be used to organize the input data accordingly using `cutArrayInCluLike()`.
```{r cutArrayInCluLike, echo=TRUE}
cutArrayInCluLike(arr1, cluOrg=c(2,1,2,1))
```
Let's cut by filtering along the 3rd dimension for all lines where column 'col2' is >7, and then display only the content of columns 'col1' and 'col2' (using `filt3dimArr()`):
```{r filt3dimArr, echo=TRUE}
filt3dimArr(arr1, displCrit=c("col1","col2"), filtCrit="col2", filtVal=7, filtTy=">")
```
## Working With Redundant Data {#WorkingWithRedundantData}
$_Semantics_$ : Please note, that there are two ways of interpreting the term '**unique**' :
* In regular understanding one describes this way an event which occurs only once, and thus does not occur/happen anywhere else.
* The command `unique()` will eliminate redundant entries to obtain a shorter 'unique' output vector, ie in the resultant vector all values/content (values) occur only once.
However, from the result of _unique()_ you cannot tell any more which ones were not unique initially !
In some applications (eg proteomics) initial identifiers (IDs) may occur multiple times in the data and we frequently need to identify events/values that occur only once, as the first meaning of '_unique_'.
This package provides (additional) functions to easily distinguish values occurring just once (ie _unique_) from those occurring multiple times. Furthermore, there are functions to rename/remove/combine replicated elements, eg `correctToUnique()` or `nonAmbiguousNum()`, so that no elements or lines of data get lost.
### Identify What Is Repeated (and Where Repeated Do Occur)
```{r repeated1, echo=TRUE}
## some text toy data
tr <- c("li0","n",NA,NA, rep(c("li2","li3"),2), rep("n",4))
```
The function _table()_ (from the package _base_) is very useful get some insights when working with smaller objects, but may be slow to handle very large objects.
As mentioned, _unique()_ will make everything unique, and afterwards you won't know any more who was unique in the first place !
The function `duplicated()` (also from package base) helps us getting the information who is repeated.
```{r repeated2, echo=TRUE}
table(tr)
unique(tr)
duplicated(tr, fromLast=FALSE)
```
```{r repeated3, echo=TRUE}
aa <- c(11:16,NA,14:12,NA,14)
names(aa) <- letters[1:length(aa)]
aa
```
`findRepeated()` (from this package) will return the position/index (and content/value) of repeated elements. However, the output in form of a list is not very convenient to the human reader.
```{r findRepeated, echo=TRUE}
findRepeated(aa)
```
`firstOfRepeated()` tells the index of the first instance of repeated elements, which elements you need to make the vector 'unique', and which elements get stripped off when making unique.
Please note, that _NA_ (no matter if they occure once or more times) are automatically in the part suggested to be removed.
```{r firstOfRepeated, echo=TRUE}
firstOfRepeated(aa)
aa[firstOfRepeated(aa)$indUniq] # only unique with their names
unique(aa) # unique() does not return any names !
```
### Correct Vector To Unique (While Maintaining The Original Vector Length)
If necessary, a counter can be added to non-unique entries, thus no individual values get eliminated and the length and order of the resultant object maintains the same using `correctToUnique()`.
This is of importance when assigning rownames to a data.frame : Assigning redundant values/text as rownames of a data.frame will result in an error !
```{r correctToUnique1, echo=TRUE}
correctToUnique(aa)
correctToUnique(aa, sep=".", NAenum=FALSE) # keep NAs (ie without transforming to character)
```
You see from the last example above, that this function has an argument for controlling enumerating elements.
### Mark Any Duplicated (ie Ambiguous) Elements by Changing Their Names (and Separate from Unqiue)
First, the truly unique values are reported and then the first occurance of repeated elements is given, _NA_ instances get ignored.
This can be done using `nonAmbiguousNum()` which maintains the length of the initial character vector.
```{r nonAmbiguousNum, echo=TRUE}
unique(aa) # names are lost
nonAmbiguousNum(aa)
nonAmbiguousNum(aa, uniq=FALSE, asLi=TRUE) # separate in list unique and repeated
```
### Compare Multiple Vectors And Sort By Number Of Common/Repeated Values/Words
The main aim of the function `sortByNRepeated()` is allowing to compare multiple vectors for common values/words and providing an output sorted by number of repeats.
Suppose 3 persons are asked which cities they wanted to visit.
Then we would like to make a counting of the most frequently cited cities.
Here we consider individual choices as equally ranked.
By default intra-repeats are eliminated.
```{r sortByNRepeated, echo=TRUE}
cities <- c("Bangkok","London","Paris", "Singapore","New York City", "Istambul","Delhi","Rome","Dubai")
sortByNRepeated(x=cities[c(1:4)], y=cities[c(2:3,5:8)])
## or (unlimited) multiple inputs via list
choices1 <- list(Mary=cities[c(1:4)], Olivia=cities[c(2:3,5:8)], Paul=cities[c(5:3,9,5)]) # Note : Paul cited NYC twice !
table(unlist(choices1))
sortByNRepeated(choices1)
sortByNRepeated(choices1, filterIntraRep=FALSE) # without correcting multiple citation of NYC by Paul
```
### Combine Multiple Matrixes Where Some Column-Names Are The Same
Here, it is supposed that you want to join 2 or more matrixes describing different properties of the same collection of individuals (as rows).
Common column-names are interpreted that their respective information should be combined (either as average or as sum).
This can be done using `cbindNR()` :
```{r cbindNR, echo=TRUE}
## First we'll make soe toy data :
(ma1 <- matrix(1:6, ncol=3, dimnames=list(1:2,LETTERS[3:1])))
(ma2 <- matrix(11:16, ncol=3, dimnames=list(1:2,LETTERS[3:5])))
## now we can join 2 or more matrixes
cbindNR(ma1, ma2, summarizeAs="mean") # average of both columns 'C'
```
### Filter Matrix To Keep Only First Of Repeated Lines
This ressembles to the functioning of _unique()_, but applies to a user-specified column of the matrix.
```{r firstLineOfDat, echo=TRUE}
(mat1 <- matrix(c(1:6, rep(1:3,1:3)), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2])))
```
The function `firstLineOfDat()` allows to access/extract the first line of repeated instances.
```{r firstLineOfDat2, echo=TRUE}
firstLineOfDat(mat1, refCol=2)
```
This function was rather designed for dealing with character input, it allows concatenating all columns and to remove redundant.
```{r firstOfRepLines, echo=TRUE}
mat2 <- matrix(c("e","n","a","n","z","z","n","z","z","b",
"","n","c","n","","","n","","","z"), ncol=2)
firstOfRepLines(mat2, out="conc")
# or as index :
firstOfRepLines(mat2)
```
### Filter To Unique Column-Content Of Matrix, Add Counter And Concatenated Information
```{r nonredDataFrame, echo=TRUE}
(df1 <- data.frame(cbind(xA=letters[1:5], xB=c("h","h","f","e","f"), xC=LETTERS[1:5])))
```
The function `nonredDataFrame()` offers to include a counter of redundant instances encountered (for 1st column specified) :
```{r nonredDataFrame2, echo=TRUE}
nonredDataFrame(df1, useCol=c("xB","xC"))
# without counter or concatenating
df1[which(!duplicated(df1[,2])),]
# or
df1[firstOfRepLines(df1,useCol=2),]
```
### Get First Of Repeated By Column
```{r get1stOfRepeatedByCol, echo=TRUE}
mat2 <- cbind(no=as.character(1:20), seq=sample(LETTERS[1:15], 20, repl=TRUE),
ty=sample(c("full","Nter","inter"),20,repl=TRUE), ambig=rep(NA,20), seqNa=1:20)
(mat2uniq <- get1stOfRepeatedByCol(mat2, sortBy="seq", sortSupl="ty"))
# the values from column 'seq' are indeed unique
table(mat2uniq[,"seq"])
# This will return all first repeated (may be >1) but without furter sorting
# along column 'ty' neither marking in comumn 'ambig').
mat2[which(duplicated(mat2[,2],fromLast=FALSE)),]
```
### Transform (ambigous) Matrix To Non-ambiguous Matrix (In Respect To Given Column)
```{r nonAmbiguousMat, echo=TRUE}
nonAmbiguousMat(mat1,by=2)
```
Here another example, ambiguous will be marked by an '_' :
```{r nonAmbiguousMat2, echo=TRUE}
set.seed(2017); mat3 <- matrix(c(1:100,round(rnorm(200),2)), ncol=3,
dimnames=list(1:100,LETTERS[1:3]));
head(mat3U <- nonAmbiguousMat(mat3, by="B", na="_", uniqO=FALSE), n=15)
head(get1stOfRepeatedByCol(mat3, sortB="B", sortS="B"))
```
### Combine Replicates From List To Matrix
```{r combineReplFromListToMatr, echo=TRUE}
lst2 <- list(aa_1x=matrix(1:12, nrow=4, byrow=TRUE), ab_2x=matrix(24:13, nrow=4, byrow=TRUE))
combineReplFromListToMatr(lst2)
```
### Combine Redundant Lines From List with (Multiple) Matrix According to Reference
The function `combineRedundLinesInList()` provides help for combining/summarizing lines of numeric data which may be summaried according to reference vector or matrix (part of the same input-list).
Initial data and reference will be aligned based on rownames and the content of reference (or the column specified by \code{refColNa}).
```{r combineRedundLinesInListAcRef, echo=TRUE}
x1 <- list(quant=matrix(11:34, ncol=3, dimnames=list(letters[8:1], LETTERS[11:13])),
annot=matrix(paste0(LETTERS[c(1:4,6,3:5)],LETTERS[c(1:4,6,3:5)]), ncol=1,
dimnames=list(paste(letters[1:8]),"xx")) )
combineRedundLinesInList(lst=x1, refNa="annot", datNa="quant", refColNa="xx")
```
### Non-redundant Lines Of Matrix
```{r nonRedundLines, echo=TRUE}
mat4 <- matrix(rep(c(1,1:3,3,1),2), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2]))
nonRedundLines(mat4)
```
### Filter For Unique Elements /2
```{r filtSizeUniq, echo=TRUE}
# input: c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), filtUn=TRUE, minSi=NULL)
# here a,b,c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), ref=c(letters[c(1:26,1:3)],
"dd","dd","bb","ddd"), filtUn=TRUE, minSi=NULL)
```
### Make Non-redundant Matrix
```{r makeNRedMatr, echo=TRUE}
t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
matrix(round(runif(30,-3,2),1), nc=2), stringsAsFactors=FALSE)
# First we split the data.frame in list
by(t3,t3[,1],function(x) x)
t(sapply(by(t3,t3[,1],function(x) x), summarizeCols, me="maxAbsOfRef"))
(xt3 <- makeNRedMatr(t3, summ="mean", iniID="ref"))
(xt3 <- makeNRedMatr(t3, summ=unlist(list(X1="maxAbsOfRef")), iniID="ref"))
```
### Combine/Reduce Redundant Lines Based On Specified Column
```{r combineRedBasedOnCol, echo=TRUE}
matr <- matrix(c(letters[1:6],"h","h","f","e",LETTERS[1:5]), ncol=3,
dimnames=list(letters[11:15],c("xA","xB","xC")))
combineRedBasedOnCol(matr, colN="xB")
combineRedBasedOnCol(rbind(matr[1,],matr), colN="xB")
```
### Convert Matrix (eg With Redundant) Row-Names To data.frame
```{r convMatr2df, echo=TRUE}
x <- 1
dat1 <- matrix(1:10, ncol=2)
rownames(dat1) <- letters[c(1:3,2,5)]
## as.data.frame(dat1) ... would result in an error
convMatr2df(dat1)
convMatr2df(data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3))
tmp <- data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
tmp <- data.frame(a=as.character((1:3)/2), b=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
```
### Find And Combine Points Located Very Close In X/Y Space
```{r combineOverlapInfo, echo=TRUE}
set.seed(2013)
datT2 <- matrix(round(rnorm(200)+3,1), ncol=2, dimnames=list(paste("li",1:100,sep=""),
letters[23:24]))
# (mimick) some short and longer names for each line
inf2 <- cbind(sh=paste(rep(letters[1:4],each=26), rep(letters,4),1:(26*4),sep=""),
lo=paste(rep(LETTERS[1:4],each=26), rep(LETTERS,4), 1:(26*4), ",",
rep(letters[sample.int(26)],4), rep(letters[sample.int(26)],4), sep=""))[1:100,]
## We'll use this to test :
head(datT2, n=10)
## let's assign to each pair of x & y values a 'cluster' (column _clu_, the column _combInf_ tells us which lines/indexes are in this cluster)
head(combineOverlapInfo(datT2, disThr=0.03), n=10)
## it is also possible to rather display names (eg gene or protein-names) instead of index values
head(combineOverlapInfo(datT2, suplI=inf2[,2], disThr=0.03), n=10)
```
### Bin And Summarize Values According To Their Names
```{r getValuesByUnique, echo=TRUE}
dat <- 11:19
names(dat) <- letters[c(6:3,2:4,8,3)]
## Here the names are not unique.
## Thus, the values can be binned by their (non-unique) names and a representative values calculated.
## Let's make a 'datUniq' with the mean of each group of values :
datUniq <- round(tapply(dat, names(dat), mean),1)
## now we propagate the mean values to the full vector
getValuesByUnique(dat, datUniq)
cbind(ini=dat,firstOfRep=getValuesByUnique(dat, datUniq),
indexUniq=getValuesByUnique(dat, datUniq, asIn=TRUE))
```
### Regrouping Simultaneaously by Two Factors
For example, if you wish to create group-labels considering the eye- and hair-color of a small group students (supposed a sort of controlled vocabulary was used),
the function `combineByEitherFactor()` will help. So basically, this is an empiric segmentation-approach for two categorical variables.
Please note, that with large data-sets and very disperse data this approach will not provide great results.
In the example below we'll attempt to 'cluster' according to columns _nn_ and _qq_, the resultant cluster number can be found in column _grp_.
```{r combineByEitherFactor, echo=TRUE}
nn <- rep(c("a","e","b","c","d","g","f"),c(3,1,2,2,1,2,1))
qq <- rep(c("m","n","p","o","q"),c(2,1,1,4,4))
nq <- cbind(nn,qq)[c(4,2,9,11,6,10,7,3,5,1,12,8),]
## Here we consider 2 columns 'nn' and 'qq' whe trying to regroup common values
## (eg value 'a' from column 'nn' and value 'o' from 'qq')
combineByEitherFactor(nq, 1, 2, nBy=FALSE)
```
The argument _nBy_ simply allows adding an additional column with the group/cluster-number.
```{r combineByEitherFactor2, echo=TRUE}
## the same, but including n by group/cluster
combineByEitherFactor(nq, 1, 2, nBy=TRUE)
## Not running further iterations works faster, but you may not reach 'convergence' immediately
combineByEitherFactor(nq,1, 2, nBy=FALSE)
```
```{r combineByEitherFactor3, echo=TRUE}
## another example
mm <- rep(c("a","b","c","d","e"), c(3,4,2,3,1))
pp <- rep(c("m","n","o","p","q"), c(2,2,2,2,5))
combineByEitherFactor(cbind(mm,pp), 1, 2, con=FALSE, nBy=TRUE)
```
### Batch Replacing Of Values Or Character-Strings
The function `multiCharReplace()` facilitates multiple replacements in a vector, matrix or data.frame.
```{r multiCharReplace1, echo=TRUE}
# replace character content
x1 <- c("ab","bc","cd","efg","ghj")
multiCharReplace(x1, cbind(old=c("bc","efg"), new=c("BBCC","EF")))
# works also on matrix and/or to replace numeric content :
x3 <- matrix(11:16, ncol=2)
multiCharReplace(x3, cbind(12:13,112:113))
```
Sometimes data get imported using different encoding for what should be interpreted as _FALSE_ and _TRUE_ :
```{r multiCharReplace2, echo=TRUE}
# replace and return logical vactor
x2 <- c("High","n/a","High","High","Low")
multiCharReplace(x2,cbind(old=c("n/a","Low","High"), new=c(NA,FALSE,TRUE)), convTo="logical")
```
### Multi-to-multi Matching Of (Concatenated) Terms
The function allows to split (if necessary, using _strsplit()_) two vectors and compare each isolated tag (eg identifyer) from the 1st vector/object against each isolated tag from the second vector/object. This runs like a loop of one to many comparisons. The basic output is a list with indexes of which element of the 1st vector/object has matches in the 2nd vector/object. Since this is not convenient to the human reader, tabular output can be created, too.
```{r multiMatch1, echo=TRUE}
aa <- c("m","k","j; aa","m; aa; bb; o","n; dd","aa","cc")
bb <- c("aa","dd","aa; bb; q","p; cc")
## result as list of indexes
(bOnA <- multiMatch(aa, bb, method="asIndex")) # match bb on aa
## more convenient to the human reader
(bOnA <- multiMatch(aa, bb)) # match bb on aa
(bOnA <- multiMatch(aa, bb, method="matchedL")) # match bb on aa
```
### Comparing Global Patterns
In most programming languages it is fairly easy to compare _exact_ content of character vectors or factors with unordered levels.
However, sometimes - due to semantic issues - some people may call a color 'purple' while others call it 'violet'.
Thus, without using controled vocabulary the _exact_ terms may vary.
Here, let's address the case, where no dictionaries of controled vocabulary are available for substituting equivalent terms.
Thus, we'll compare 4 vectors of equal length and check if the words/letters used could be substituted to result in the first vector.
Vectors _aa_ and _ab_ have the same global pattern, ie after repeating a word twice it moves to another word.
Vectors _ac_ and _ad_ have different general patterns, either with alternating words or falling back to a word previsously used.
Based and extended on a post on stackoverflow [https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/](https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/) :
```{r compGlobPat1, echo=TRUE}
aa <- letters[rep(c(3:1,4), each=2)]
ab <- letters[rep(c(5,8:6), each=2)] # 'same general' pattern to aa
ac <- letters[c(1:2,1:3,3:4,4)] # NOT 'same general' pattern to any other
ad <- letters[c(6:8,8:6,7:6)] # NOT 'same general' pattern to any other
```
The basic pattern can be extracted combining match() and unique():
```{r compGlobPat2, echo=TRUE}
## get global patterns
cbind(aa= match(aa, unique(aa)),
ab= match(ab, unique(ab)),
ac= match(ac, unique(ac)),
ad= match(ad, unique(ad)) )
```
Let's make a data.frame with the annotation toy-data from above.
Each line is supposed to represent a sample, and the columns show different aspects of annotation.
```{r compGlobPat3, echo=TRUE}
bb <- data.frame(ind=1:length(aa), a=aa, b=ab, c=ac, d=ad)
```
Via the function `replicateStructure()` is it possible to compare annotation as different columns for equivalent global patterns.
By default, this function excludes all columns not designating any replicates, like the numbers in the first column ($ind).
Also it will try to find the column with the median number of levels, when comparing to all other columns.
The output is a list with *\$col* inidicating which column(s) may be used, *\$lev* for the correpsonding global pattern, *\$meth* for the method finally used and
_\$allCols_ for documenting the global pattern in each column (whether it was selected or not).
```{r compGlobPat4, echo=TRUE}
replicateStructure(bb)
```
Besides, it is also possible to combine all columns if one considers they contribute complementary substructures of the overal annotation.
```{r compGlobPat5, echo=TRUE}
replicateStructure(bb, method="combAll")
```
However, when combining multiple columns it may happen -like in the example above- that finally no more lines remain being considered as replicates.
This can also be found when one column describes the groups and another gives the order of the replicates therein.
However, for calling a (standard) statistical test it may be necessary exclude these replicate-numbers to designate the groups of replicates.
To overcome the problem of loosing the understanding of replicate-structure when combining all factors, it is possible to look for non-orthogonal structures,
ie to try excluding columns which (after combining) would suggest no replicates after combining all columns.
See the example below :
```{r compGlobPat6, echo=TRUE}
replicateStructure(bb, method="combNonOrth")
```
## Search For Similar (Numeric) Values {#SearchForSimilarNumericValues}
This section addresses values that are not truly _identical_ but may differ only in the very last digit(s)
and thus may be in a pragmatic view get considered and treated as 'about the same'.
The simplest approach would be to round values and then look for identical values.
The functions presented here (like `checkSimValueInSer()`) offer this type of search in a convenient way.
Of course the user must define a threshold for how similar may retained as positive (in the the logical vector returned).
With the function _checkSimValueInSer()_ this threshod must be given as [ppm](https://simple.wikipedia.org/wiki/Parts_per_million) (parts per million).
```{r checkSimValueInSer, echo=TRUE}
va1 <- c(4:7,7,7,7,7,8:10) + (1:11)/28600
checkSimValueInSer(va1, ppm=5)
data.frame(va=sort(va1), simil=checkSimValueInSer(va1))
```
### Find Similar Numeric Values Of Two Columns Of A Matrix
The search for similar values may be preformed as absolute distance or as 'ppm' (as it is eg usual in proteomics when comparing measured and theoretically expected mass).
```{r findCloseMatch1, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,15.9,13.5,15.7,14.1,5)
(cloMa <- findCloseMatch(x=aA, y=cC, com="diff", lim=0.5, sor=FALSE))
```
The result of _findCloseMatch()_ is a list organized by each 'x', telling all instances of 'y' found within the distance tolerance given by _lim_.
Using `closeMatchMatrix()` the result obtained above, can be presented in a more convenient format for the human eye.
```{r closeMatchMatrix1, echo=TRUE}
# all matches (of 2d arg) to/within limit for each of 1st arg ('x'); 'y' ..to 2nd arg = cC
# first let's display only one single closest/best hit
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=TRUE)) #
```
Using the argument _limitToBest=FALSE_ we can display all distances within the limits imposed, some values/points may occur multiple times.
For example, value number 4 of 'cC' (=12.5) or value number 3 of 'aA' (=13) now occur multiple times...
```{r closeMatchMatrix2, echo=TRUE}
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=FALSE,origN=TRUE)) #
(maAa <- closeMatchMatrix(cloMa, cbind(valA=81:87, aA), cbind(valC=91:99, cC), colM=2,
colP=2, lim=FALSE))
(maAa <- closeMatchMatrix(cloMa, cbind(aA,valA=81:87), cC, lim=FALSE, deb=TRUE)) #
a2 <- aA; names(a2) <- letters[1:length(a2)]; c2 <- cC; names(c2) <- letters[10 +1:length(c2)]
(cloM2 <- findCloseMatch(x=a2, y=c2, com="diff", lim=0.5, sor=FALSE))
(maA2 <- closeMatchMatrix(cloM2, predM=cbind(valA=81:87, a2),
measM=cbind(valC=91:99, c2), colM=2, colP=2, lim=FALSE, asData=TRUE))
(maA2 <- closeMatchMatrix(cloM2, cbind(id=names(a2), valA=81:87,a2), cbind(id=names(c2),
valC=91:99,c2), colM=3, colP=3, lim=FALSE, deb=FALSE))
```
### Find Similar Numeric Values From Two Vectors/Matrixes
For comparing two sets of data one may use `findSimilarFrom2sets()`.
```{r findSimilFrom2sets, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,12.6,15.9,14.1)
aZ <- matrix(c(aA,aA+20), ncol=2, dimnames=list(letters[1:length(aA)],c("aaA","aZ")))
cZ <- matrix(c(cC,cC+20), ncol=2, dimnames=list(letters[1:length(cC)],c("ccC","cZ")))
findCloseMatch(cC, aA, com="diff", lim=0.5, sor=FALSE)
findSimilFrom2sets(aA, cC)
findSimilFrom2sets(cC, aA)
findSimilFrom2sets(aA, cC, best=FALSE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=5e4, deb=TRUE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=FALSE)
# below: find fewer 'best matches' since search window larger (ie more good hits compete !)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=TRUE)
```
### Fuse Previously Identified Pairs To 'Clusters'
When you have already identified the closest neighbour of a set of values, you may want to
re-organize/fuse such pairs to a given number of total clusters (using `fusePairs()`).
```{r fusePairs, echo=TRUE}
(daPa <- matrix(c(1:5,8,2:6,9), ncol=2))
fusePairs(daPa, maxFuse=4)
```
### Eliminate Close (Overlapping) Points (In Bivariate x & y Space)
When visualizing larger data-sets in an x&y space one may find many points overlapping when their values are almost the same.
The function `elimCloseCoord()` aims to do reduce a bivariate data-set to 'non-overlapping' points, somehow similar to human perception.
```{r elimCloseCoord1, echo=TRUE}
da1 <- matrix(c(rep(0:4,5),0.01,1.1,2.04,3.07,4.5), ncol=2); da1[,1] <- da1[,1]*99; head(da1)
elimCloseCoord(da1)
```
### Mode Of (Continuous) Data
Looking for the _mode_ is rather easy with counting data, the result of _table()_ will get you there quickly.
However, with continuous data the mode may be more tricky to defne and identify.
Intuitively most people consider the mode asthe peak of a density estimation (which remains to be defined and estimated).
With continuous data most frequent (precise) value may be quite different/distant to the most dense region of data.
The function `stableMode()` presented here has different modes of operation, at this point there is no clear rule which mode may perform most satisfactory in different situations.
```{r stableMode, echo=TRUE}
set.seed(2012); dat <- round(c(rnorm(120,0,1.2), rnorm(80,0.8,0.6), rnorm(25,-0.6,0.05), runif(200)),3)
dat <- dat[which(dat > -2 & dat <2)]
stableMode(dat)
```
Now we can try to show on a plot :
```{r stableMode2, fig.height=8, fig.width=9, fig.align="center", echo=TRUE}
layout(1:2)
plot(1:length(dat), sort(dat), type="l", main="Sorted Values", xlab="rank", las=1)
abline(h=stableMode(dat, silent=TRUE), lty=2,col=2)
legend("topleft",c("stableMode"), text.col=2, col=2, lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
plot(density(dat, kernel="gaussian", adjust=0.7), xlab="Value of dat", main="Density Estimate Plot")
useCol <- c("red","green","blue","grey55")
legend("topleft",c("dens","binning","BBmisc","allModes"), text.col=useCol, col=useCol,
lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
abline(v=stableMode(dat, method="dens", silent=TRUE), lty=2, col="red", lwd=2)
abline(v=stableMode(dat, method="binning", silent=TRUE), lty=2, col="green")
abline(v=stableMode(dat, method="BBmisc", silent=TRUE), lty=2, col="blue")
abline(v=stableMode(dat, method="allModes"), lty=2, col="grey55")
```
Please note, that plotting data modelled via a Kernell function (as above) also relies on strong hypothesis which may not be well justified in a number of cases !
For this reason, the _sorted values_ were plotted, too.
As you can see from this example above, looking for the most frequent exact value may not be a perfect choice for continous data.
In this example the method _'allModes'_ (ie the multiple instances of most frequent exact values) gave partially usable results (dashed grey lines), due to the rounding to 3 digits.
As you can see in the example above, the method _'allModes'_ may give multiple ties !
More rounding will make to data more discrete and ultimately ressemble cunting data. However, with rounding some of the finer resolution/details will get lost.
### Most Frequently Occuring Value (traditional mode)
The function `stableMode()` can also be used to locate _the_ most frequently occuring exact value of numeric or character vectors.
As we just saw at the end of the previous example, the argument _method="allModes"_ allows finding all ties (if present).
```{r stableMode3, echo=TRUE}
set.seed(2021)
x <- sample(letters, 50000, replace=TRUE)
stableMode(dat, method="mode")
stableMode(dat, method="allModes")
```
## Text-Manipulations {#Text-Manipulations}
There are several packages offering interesting functions for manipulating text. Here are a few functions to complement these.
### Trimming Redundant Text
Automatic annotation has the tendency to concatenate many parameters into a single names.
The function `trimRedundText()` was designed to allow trimming redundant text from left and/or right side of a character-vector
(when the same portion of text appears in _each_ element).
However, as in some cases (like the first element of the example below) nothing would remain, it is possible to define a _minimum_ width for the remaining/resulting text.
```{r trimRedundText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1)
```
### Extract Common Part Of Text
The original idea was to do something resembling the inverse process of trimming redundant text (example above), but this time to discard the variable text.
In the end this is not as trivial when 'common' or 'redundant' text is not at the beginning or end of a chain of characters.
In particular with very large text this is an active field of research (eg for sequence alignment).
The function presented here is a very light-weight solution designed for smaller and simple settings, like inspecting column-names.
Furthermore, the function `keepCommonText()` only reports the first (longest) hit.
So, when there are multiple conserved 'words' of equal length, only the first of them will be identified.
When setting the argument 'hiResol=FALSE' this function has an option to decrease the resulution of searching, which in turn increases the speed, howevere, at cost of missing the optimal solution.
In this case the resultant chain of characters should be inspected if it can be further extended/optimized.
With terminal common text :
```{r keepCommonText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1, side="left") # remove redundant
keepCommonText(txt1, side="terminal") # keep redundant
keepCommonText(txt1, side="center") # computationally easier
```
With internal coomon text:
```{r keepCommonText2, echo=TRUE}
txt2 <- c("abcd_abc_kjh", "bcd_abc123", "cd_abc_po")
keepCommonText(txt2, side="center")
```
### Manipulating Enumerator-Extensions
Human operators may have many ways to write enumerators like 'xx_sample_1', 'xx_Sample_2', 'xx_s3', 'xx_4', etc.
Many times you may find such text as names or column-names for measures underneith.
The functions presented below will work only if _consistent numerators_, ie (text +) digit-character(s) are at the end of all character-strings to be treated.
Please note, that with large vectors testing/checking a larger panel of enumerator-abreviations may result in slower performance.
In cases of such larger data-sets it may be more effective to first study the data and then run simple subsitions using _sub()_ targeted for this very case.
#### Remove/Modify Enumerators
The aim of this function consists in identifying a _common_ pattern for terminal enumeratos (ie at end of words/character strings) and to subsequently modify or remove them.
As separator-symbols and separator-words are given indedently all combinations thereof may be tested.
Furthermore the user has the choice to (automatically) all truncated versions of separator-words (eg _Sam_ instead of _Sample_).
As basic setting `rmEnumeratorName()` allows to identify and then modify a _common_ terminal enumerator from all elements of a character string :
```{r rmEnumeratorName1, echo=TRUE}
xx <- c("hg_Re1","hjRe2_Re2","hk-Re3_Re33")
rmEnumeratorName(xx)
rmEnumeratorName(xx, newSep="--")
rmEnumeratorName(xx, incl="anyCase")
```
Furthermore, this function allows scanning a matrix of text-data and to perform similar operations to the _first_ column found containing a _common_ terminal enumerator.
```{r rmEnumeratorName2, echo=TRUE}
xy <- cbind(a=11:13, b=c("11#11","2_No2","333_samp333"), c=xx)
rmEnumeratorName(xy)
rmEnumeratorName(xy,incl=c("anyCase","trim2","rmEnumL"))
```
If you which to remove/subsitute mutiple types of enumerators the function \code{rmEnumeratorName} must be run independently, see last example below.
```{r rmEnumeratorName3, echo=TRUE}
xz <- cbind(a=11:13, b=c("23#11","4#2","567#333"), c=xx)
apply(xz, 2, rmEnumeratorName, sepEnum=c("","_"), newSep="_", silent=TRUE)
```
#### Unify Enumerators
The (slightly older) function `unifyEnumerator()` offers less options, in particular the potential separator-words must be given explicitly, only lower/upper-case may be kept flexible.
```{r unifyEnumerator1, echo=TRUE}
unifyEnumerator(c("ab-1","ab-2","c-3"))
unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
unifyEnumerator(c("ab-1","c3-2","dR3"), stringentMatch=FALSE)
```
### Adjust Decimal Prefixes And Extact Numeric+Unit Part
The function `adjustUnitPrefix()` provides help extracting the numeric part of character vectors and allows adjusting to a single million-unit type.
This can be used to convert a vector of mixed prefixes like 'z','a','f','p','n','u' and 'm' (note: the 'u' is used for 'micro').
The output is a numeric vector with numeric+unit as names.
```{r adjustUnitPrefix1, echo=TRUE}
adjustUnitPrefix(c("10.psec","2 fsec"), unit="sec")
```
In the example below you can see that additional text (to the right of the digit+unit) get's stripped off.
```{r adjustUnitPrefix2, echo=TRUE}
adjustUnitPrefix(c("10.psec abc","2 fsec etc"), unit="sec")
```
### Merging Multiple Named Vectors To Matrix
The function `mergeVectors()` allows merging for multiple named vectors (each element needs to be named).
Basically, all elements carrying the same name across different input-vectors will be aligned in the same column of the output (input-vectors appear as lines).
Different to _merge()_ which allows merging only 2 data.frames, here multiple vectors may be merge at once.
```{r mergeVectors1, echo=TRUE}
x1 <- c(a=1, b=11, c=21)
x2 <- c(b=12, c=22, a=2)
x3 <- c(a=3, d=43)
mergeVectors(vect1=x1, vect2=x2, vect3=x3)
```
```{r mergeVectors2, echo=TRUE}
mergeVectors(vect1=x1, vect2=x2, vect3=x3, inclInfo=TRUE) # return list with additional info
```
In the example below we'll add another vector _without_ named elements.
As you can see a message tells the this vector was been ignored/omitted.
```{r mergeVectors3, echo=TRUE}
x4 <- 41:44 # no names - not conform for merging and will be ignored
mergeVectors(x1, x2, x3, x4)
```
### Match All Lines of Matrix To Reference Note
This function allows adjusting the order of lines of a matrix \code{mat} to a reference character-vector \code{ref},
even when initial direct matching of character-strings using \code{match} is not possible/successful.
In this case, various variants of using \code{grep} will be used to see if unambiguous matching is possible of characteristic parts of the text.
All columns of \code{mat} will be tested an the column giving the bes resuts will be used.
```{r matchMatrixLinesToRef1, echo=TRUE}
## Note : columns b and e allow non-ambigous match, not all elements of e are present in a
mat0 <- cbind(a=c("mvvk","axxd","bxxd","vv"),b=c("iwwy","iyyu","kvvh","gxx"), c=rep(9,4),
d=c("hgf","hgf","vxc","nvnn"), e=c("_vv_","_ww_","_xx_","_yy_"))
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[,5])
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[1:3,5], inclInfo=TRUE)
matchMatrixLinesToRef(mat0[,-2], ref=mat0[,2], inclInfo=TRUE) # needs 'reverse grep'
```
### Order Matrix According To Reference
The function `orderMatrToRef()` has the aim of facilitating brining a matrix of text/data in the order of a given reference (character vector).
This function will try all columns of the input-matrix to see which gives the best coverage/ highest number of matches to the reference.
If no hits are found, this function will try by partial matching (using _grep()_) all entries of the reference and vice-versa all entries of the matrix.
```{r orderMatrToRef1, echo=TRUE}
mat1 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], rep(1:5)), ncol=3)
orderMatrToRef(mat1, paste0(letters[c(3,4,5,3,4)],c(1,3,5,2,4)))
mat2 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3:5 )), ncol=3)
orderMatrToRef(mat2, paste0(letters[c(3,4,5,3,4)],c(1,3,5,1,4)))
mat3 <- matrix(paste0(letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3,3,5 )), ncol=3)
orderMatrToRef(mat3, paste0("__",letters[c(3,4,5,3,4)],c(1,3,5,1,3)))
```
### Value Matching With Option For Concatenated Terms
Sometimes we need to match terms in concatenated tables.
The function `concatMatch()` was designed to behave similar to _match()_ but also allowing to serach among concatenated terms and some further text-simplifications.
```{r concatMatch1, echo=TRUE}
## simple example without concatenations or text-extensions
x0 <- c("ZZ","YY","AA","BB","DD","CC","D")
tab0 <- c("AA","BB,E","CC","FF,U")
match(x0, tab0)
concatMatch(x0, tab0) # same result as match(), but with names
## now let's construct somthing similar but with concatenations and text-extensions
x1 <- c("ZZ","YY","AA","BB-2","DD","CCdef","Dxy") # modif of single ID (no concat)
tab1 <- c("AA","WW,Vde,BB-5,E","CCab","FF,Uef")
match(x1, tab1) # match finds only the 'simplest' case (ie "AA")
concatMatch(x1, tab1) # finds all hits as in example above
x2 <- c("ZZ,Z","YY,Y","AA,Z,Y","BB-2","DD","X,CCdef","Dxy") # conatenated in 'x'
tab2 <- c("AA","WW,Vde,BB-5,E","CCab,WW","FF,UU")
concatMatch(x2, tab2) # concatenation in both 'x' and 'table'
```
### Check for (Strict) Order
Thi function `checkStrictOrder()` was designed to scan each line of an (numeric) input matrix for up- down- or equal-development, ie the chang to the next value on the right.
For example when working with a matrix of with 4 columns one can look 3 times a the neighbour value following to the right (in the same line), thus the output will mention 3 events (for each line).
If _all counts_ are 'up' and 0 counts are 'down' or 'eq', the line follows a permanently increase (not necessarily linear), etc.
In some automated procedures (where the numer of columns of initial input may vary) it may be easier to test if any 0 occur.
For this reason the argument _invertCount_ was introduced, in this case a line with a '0' occurring characterizes a constant behaviour (for the respective column).
```{r checkStrictOrder1, echo=TRUE}
set.seed(2005); mat1 <- rbind(matrix(round(runif(40),1),nc=4), rep(1,4))
head(mat1)
checkStrictOrder(mat1); mat1[which(checkStrictOrder(mat1)[,2]==0),]
```
A slightly more general way of testing can be done using `checkGrpOrder()`. Here, simlpy a logical value will produced for each line of input indicating if there is constant behaviour.
When the argument _revRank=TRUE_ (default) constant up- or constant down-characteristics will be tested
```{r checkGrpOrder1, echo=TRUE}
head(mat1)
checkGrpOrder(mat1)
checkGrpOrder(mat1, revRank=FALSE) # only constant 'up' tested
```
## Working With Regressions {#WorkingWithRegressions}
### Best Starting Point For Linear Regressions (Start of linearity)
In many types of measurments the very low level measures are delicate.
Especially when the readout starts with a baseline signal before increasing amounts of the analyte start producing a linear relationship.
In such cases some of the very lowest levels of the analyte are masked by the (random) baseline signal.
The function `linModelSelect()` presented here allows omitting some of the lowest analyte measures to focus on the linear part of the dose-response relationship.
```{r linModelSelect1, echo=TRUE}
li1 <- rep(c(4,3,3:6), each=3) + round(runif(18)/5,2)
names(li1) <- paste0(rep(letters[1:5], each=3), rep(1:3,6))
li2 <- rep(c(6,3:7), each=3) + round(runif(18)/5, 2)
dat2 <- rbind(P1=li1, P2=li2)
exp2 <- rep(c(11:16), each=3)
exp4 <- rep(c(3,10,30,100,300,1000), each=3)
## Check & plot for linear model
linModelSelect("P1", dat2, expect=exp2)
linModelSelect("P2", dat2, expect=exp2)
```
This function was designed for use with rather small data-sets with no (or very few) measures of base-line.
When larger panels of data ara available, it may be better to first define a confidence interval for the base-line measurement
and then only to consider points outside this confidence interval for regressing dose-response relationships
(see also [Detection limit](https://en.wikipedia.org/wiki/Detection_limit)).
### High Throughput Testing For Linear Regressions
Once we have run multiple linear regressions on differt parts of the data we might wat to compare them in a single plot.
Below, we construct 10 series of data that get modeled the same way, ideally one would obtain a slope close to 1.0.
We still allow omitting some starting points, if the resulting model would fit better.
```{r plotLinModelCoef1, echo=TRUE}
set.seed(2020)
x1 <- matrix(rep(c(2,2:5),each=20) + runif(100) +rep(c(0,0.5,2:3,5),20),
byrow=FALSE, ncol=10, dimnames=list(LETTERS[1:10],NULL))
## just the 1st regression :
summary(lm(b~a, data=data.frame(b=x1[,1], a=rep(1:5,each=2))))
## all regressions
x1.lmSum <- t(sapply(lapply(rownames(x1), linModelSelect, dat=x1,
expect=rep(1:5,each=2), silent=TRUE, plotGraph=FALSE),
function(x) c(x$coef[2,c(4,1)], startFr=x$startLev)))
x1.lmSum <- cbind(x1.lmSum, medQuantity=apply(x1,1,median))
x1.lmSum[,1] <- log10(x1.lmSum[,1])
head(x1.lmSum)
```
Now we can try to plot :
```{r plotLinModelCoef2, echo=TRUE}
wrGraphOK <- requireNamespace("wrGraph", quietly=TRUE) # check if package is available
if(wrGraphOK) wrGraph::plotW2Leg(x1.lmSum, useCol=c("Pr(>|t|)","Estimate","medQuantity","startFr"),
legendloc="topleft", txtLegend="start at")
```
## Combinatorics Issues {#CombinatoricsIssues}
### All Pairwise Ratios
`ratioAllComb()` calculates all possible pairwise ratios between all individual calues of x and y.
```{r ratioAllComb0, echo=TRUE}
set.seed(2014); ra1 <- c(rnorm(9,2,1), runif(8,1,2))
```
Let's assume there are 2 parts of 'x' for which we would like to know the representative ratio :
The ratio of medians does not well reflect the typical ratio (if each element has the same chance to be picked).
```{r ratioAllComb1, echo=TRUE}
median(ra1[1:9]) / median(ra1[10:17])
```
Instead, we'll build all possible ratios and summarize then.
```{r ratioAllComb2, echo=TRUE}
summary( ratioAllComb(ra1[1:9], ra1[10:17]))
boxplot(list(norm=ra1[1:9], unif=ra1[10:17], rat=ratioAllComb(ra1[1:9],ra1[10:17])))
```
### Count Frequency Of Terms Combined From Different Drawings (combineAsN)
The main idea of this function is to count frequency of terms when combining different drawings.
Suppose, you are asking students for their prefered hobbies.
Now, you want to know how many terms will occur in common in groups of 3 students.
In the example below, simple letters are shown instead of names of hobbies ...
In the simplest way of using `combineAsN()` does something similar to _table_ :
Here we're looking at the full combinatorics of making groups of _nCombin_ students and let's count the frequency of terms found 3 times identical, 2 times or only once (ie not cited by the others).
In case multiple groups of _nCombin_ students can be formed, the average of the counts, standard error of the mean (sem), 95% confidence interval (CI) and sd aregiven to resume the results.
```{r combineAsN1, echo=TRUE}
tm1 <- list(a1=LETTERS[1:7], a2=LETTERS[3:9], a3=LETTERS[6:10], a4=LETTERS[8:12])
combineAsN(tm1, nCombin=3, lev=gl(1,4))[,1,]
```
One may imagine that different locations/coties/countries will give different results.
Thus, we'll declare the different origins/location using the _lev_ argument.
Now, this function focusses (by default) on combinations of students from _nCombin_ different origins/location and
counts how many hobbies were mentioned as all different ('sing', ie number of hobbies only one student mentioned),
single repeat ('doub') or three times repeated ('trip'), plus minumum twice or 'any' (ie number of hobies citied no matter how many repeats).
The output is an array, the 3rd dimension contains the counts, fllowed by sem, CI and sd.
```{r combineAsN2, echo=TRUE}
## different levels/groups in list-elements
tm4 <- list(a1=LETTERS[1:15], a2=LETTERS[3:16], a3=LETTERS[6:17], a4=LETTERS[8:19],
b1=LETTERS[5:19], b2=LETTERS[7:20], b3=LETTERS[11:24], b4=LETTERS[13:25], c1=LETTERS[17:26],
d1=LETTERS[4:12], d2=LETTERS[5:11], d3=LETTERS[6:12], e1=LETTERS[7:10])
te4 <- combineAsN(tm4, nCombin=4, lev=substr(names(tm4),1,1))
str(te4)
te4[,,1] # the counts part only
```
## Import/Export
### Batch-Reading Of CSV Files
Some software do produce a series of csv files, where a large experiment/data-set get recorded as multiple files.
The function `readCsvBatch()` was designed for reading multiple csv files of exactly the same layout and to join their content.
As output a list with the content of each file can be produced (one matrix per file), or the data may be fused into an array, as shown below.
```{r readCsvBatch, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("pl01_1.csv","pl01_2.csv","pl02_1.csv","pl02_2.csv")
datAll <- readCsvBatch(fiNa, path1, silent=TRUE)
str(datAll)
```
When setting the first argument _fileNames_ to _NULL_, you can read all files of a given path.
```{r readCsvBatch2, echo=TRUE}
## batch reading of all csv files in specified path :
datAll2 <- readCsvBatch(fileNames=NULL, path=path1, silent=TRUE)
str(datAll2)
```
### Batch-Reading Of Tabulated Files
The function `readTabulatedBatch()` allows fast batch reading of tabulated files.
All files specified (or all files from a given directory) will be read into separate data.frames of a list.
Default options are US-style comma, automatic testing for head in case the package _data.table_ is available (otheriwse : no header).
Furthermore it is possible to design a given (numeric) column and directly filter for all lines passing a given threshold, allowing to get smaller objects.
```{r readTabulatedBatch1, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("a1.txt","a2.txt")
allTxt <- readTabulatedBatch(fiNa, path1)
str(allTxt)
```
### Reading Incomplete Tables
Sometimes were may get confronted with data which look like 'incomplete' tables.
In such cases some rows do not contain as many elements/columns as other columns.
Files with this type of data may pose a problem for `read.table()` (from the _utils_ package).
In some cases using the argument _fill=TRUE_ may allow to overcome this problem.
The function _readVarColumns()_ (from this package) was designed to provide better help in such odd cases.
Basically, each line is read and parsed separately, the user should check/decide on the separator to be used.
The example below lists people's names in different locations, some locations have more persons ...
Sometimes exporting such data will generate shorter lines in locations with fewer elements (here 'London') and no additional separators will get added (to mark all empty fields) towards the end.
The function `readVarColumns()` (from this package) provides help to read such data, if the content (and separators) of the last columns are missing.
```{r readVarColumns, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- "Names1.tsv"
datAll <- readVarColumns(fiName=file.path(path1,fiNa), sep="\t")
str(datAll)
```
In this example _readVarColumns()_ would give a warning (and column-names are not recognized), if you use the argument _header=TRUE_ you'll get an error and nothing gets read.
### Converting Url For Reading Tabulated Data From GitHub
[GitHub](https://github.com/) allows sharing code and (to a lower degree) data.
In order to properly read tabulated (txt, tsv or csv) data directly from a given url, the user should switch to the 'Raw' view.
The function `gitDataUrl()` allows to conventiently switch any url (on git) to the format from 'Raw view', suitable for directly reading the data using _read.delim()_ , _read.table()_ or _read.csv()_ etc ...).
```{r readGit1, echo=TRUE}
## An example url with tabulated data :
url1 <- "https://github.com/bigbio/proteomics-metadata-standard/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv"
gitDataUrl(url1)
```
The example below shows how this is used in the function _readSampleMetaData()_ in [wrProteo](https://CRAN.R-project.org/package=wrProteo).
```{r readGit2, echo=TRUE}
dataPxd <- try(read.delim(gitDataUrl(url1), sep='\t', header=TRUE))
str(dataPxd)
```
---
## Normalization {#Normalization}
The main reason of normalization is to remove variability in the data which is not directly linked to the (original/biological) concept of a given experiment.
High throughput data from real world measurements may easily contain various deformations due to technical reasons, eg slight temperature variations, electromagnetic interference, instability of reagents etc.
In particular, transferring constant amounts of liquids/reagents in highly repeated steps over large experiments is often also very challenging, small variations of the amounts of liquid (or similar) are typically addressed by normalization. However, applying aggressive normalization to the data also brings considerable risk of starting to loose some of the effects one intended to study.
At some point it may rather be better to eliminate a few samples or branches of an experiment to avoid too invasive intervention. This shows that quality control can be tightly linked to decisions about data-normalization.
In conclusion, normalization may be far more challenging than simply running some algorithms..
In general, the use has to assume/define some hypothesis to justify intervention.
Sometimes specific elements of an experiment are known to be not affected and can therefore be used to normalize the rest.
Eg, if you observe growth of trees in a forest, big blocks of rock on the floor are assumed no to change their location.
So one could use them as alignment-marks to superpose pictures taken at slightly different positions.
The hypothesis of no global changes is very common : During the course of many biological experiments (eg change of nutrient) one
assumes that only a small portion of the elements measured (eg the abundance of all different gene-products) do change,
since many processes of a living cell like growth, replication and interaction with neighbour-cells are assumed not to be affected.
So, if one assumes that there are no global changes one normalizes the input-data in a way that the average or median across each experiment will give the same value.
In analogy, if one takes photographs on a partially cloudy day, most cameras will adjust light settings (sun r clouds) so that global luminosity stays the same.
However, if too many of the measured elements are affected, this normalization approach will lead to (additional) loss of information.
It is _essential_ to understand the type of deformation(s) data may suffer from in order to choose the appropriate approacges for normalization.
Of course, graphical representations ([PCA](https://en.wikipedia.org/wiki/Principal_component_analysis), [MA-plots](https://en.wikipedia.org/wiki/MA_plot), etc) are extremely important to identifying abnormalities and potential problems.
The package [wrGraph](https://CRAN.R-project.org/package=wrGraph) offers also complementary options useful in the context of normalization.
Again, graphical representation(s) of the data help to visualize how different normalization procedures affect outcomes.
Before jumping into normalization it may be quite useful to _filter_ the data first.
The overall idea is, that most high-throughput experiments do produce some non-meaningful data (artefacts) and it may be wise to remove such 'bad' data
first, as they may effect normalization (in particular _extreme values_).
A special case of problematic data concerns _NA_-values.
### Filter Lines Of Matrix To Reduce Content Of NAs
Frequent _NA_-values may represent another potential issue. With NA-values there is no general optimal advice.
To get started, you should try to investigate how and why NA-values occurred to check if there is a special 'meaning' to them.
For example, on some measurement systems values below detection limit may be simply reported as NAs.
If the lines of your data represent different features quantified (eg proteins), than lines with mostly NA-values represent features
that may not be well exploited anyway. Therefore many times one tries to filter away lines of 'bad' data.
Of course, if there is a column (sample) with an extremely high content of NAs, one should also investigate what might be particular
with this column (sample), to see if one might be better of to eliminate the entire column.
Please note, that imputing _NA_-values represents another option instead of filtering and removing, multiple other packages address this in detail, too.
All decisions of which approach to use should be data-driven.
#### Filter For Each Group Of Columns For Sufficient Data As Non-NA
Filter for each group of columns for sufficient data as non-NA
The function `presenceGrpFilt()` allows to
```{r presenceGrpFilt1, echo=TRUE}
dat1 <- matrix(1:56,ncol=7)
dat1[c(2,3,4,5,6,10,12,18,19,20,22,23,26,27,28,30,31,34,38,39,50,54)] <- NA
grp1 <- gl(3,3)[-(3:4)]
dat1
## now let's filter
presenceGrpFilt(dat1, gr=grp1, presThr=0.75) # stringent
presenceGrpFilt(dat1, gr=grp1, presThr=0.25) # less stringent
```
#### Filter As Separate Pairwise Groups Of Samples
If you want to use your data in a pair-wise view (like running t-tests on each line) the function `presenceFilt()`
allows to eliminate lines containing too many _NA_-values for each pair-wise combination of the groups/levles.
```{r presenceFilt, echo=TRUE}
presenceFilt(dat1, gr=grp1, maxGr=1, ratM=0.1)
presenceFilt(dat1, gr=grp1, maxGr=2, rat=0.5)
```
#### Cleaning Replicates
This procedures aims to remove (by setting to as _NA_) the most extreme of noisy replicates.
Thus, it is assumed that all columns of the input matrix (or data.frame) are replicates of the other columns.
The _nOutl_ most distant points are identified and will be set to _NA_.
```{r cleanReplicates, echo=TRUE}
(mat3 <- matrix(c(19,20,30,40, 18,19,28,39, 16,14,35,41, 17,20,30,40), ncol=4))
cleanReplicates(mat3, nOutl=1)
cleanReplicates(mat3, nOutl=3)
```
### The Function normalizeThis()
In biological high-throughput data columns typically represent different samples, which may be organized as replicates.
During high-throughput experiments thousands of (independent) elements are measured (eg abundance of gene-products), they are represented by rows.
As real-world experiments are not always as perfect as we may think, small changes in the signal measured may easily happen.
Thus, the aim of normalizing is to remove or reduce any trace/variability in the data not related to the original experiement but due to imperfections during detection.
Note, that some experiments may produce a considerable amount of missing data (NAs) which require special attention (dedicated developments exist in other R-packages eg in [wrProteo](https://CRAN.R-project.org/package=wrProteo)).
My general advice is to first carefully look where such missing data is observed and to pay attention to replicate measurements
where a given element once was measured with a real numeric value and once as missing information (NA).
```{r normalizeThis0, echo=TRUE}
set.seed(2015); rand1 <- round(runif(300) +rnorm(300,0,2),3)
dat1 <- cbind(ser1=round(100:1 +rand1[1:100]), ser2=round(1.2*(100:1 +rand1[101:200]) -2),
ser3=round((100:1 +rand1[201:300])^1.2-3))
dat1 <- cbind(dat1, ser4=round(dat1[,1]^seq(2,5,length.out=100) +rand1[11:110],1))
## Let's introduce some NAs
dat1[dat1 <1] <- NA
## Let's get a quick overview of the data
summary(dat1)
## some selected lines (indeed, the 4th column appears always much higher)
dat1[c(1:5,50:54,95:100),]
```
Our toy data may be normalized by a number of different criteria.
In real applications the nature of the data and the type of deformation detected/expected will largely help
deciding which normalization might be the 'best' choice. Here we'll try first normalizing by the mean,
ie all columns will be forced to end up with the same column-mean.
The trimmed mean does not consider values at extremes (as outliers are frequently artefacts and display extreme values).
When restricting even stronger which values to consider one will eventually end up with the median (3rd method used below).
```{r normalizeThis1, echo=TRUE}
no1 <- normalizeThis(dat1, refGrp=1:3, meth="mean")
no2 <- normalizeThis(dat1, refGrp=1:3, meth="trimMean", trim=0.4)
no3 <- normalizeThis(dat1, refGrp=1:3, meth="median")
no4 <- normalizeThis(dat1, refGrp=1:3, meth="slope", quantFa=c(0.2,0.8))
```
It is suggested to verify normalization results by plots.
Note, that [Box plots](https://en.wikipedia.org/wiki/Box_plot) may not be appropriate in some cases (eg multimodal distributions),
for displaying more details you may consider using [Violin-Plots](https://en.wikipedia.org/wiki/Violin_plot) from packages [vioplot](https://CRAN.R-project.org/package=vioplot) or [wrGraph](https://CRAN.R-project.org/package=wrGraph), another option might be a (cumulated) frequency plot (eg in package [wrGraph](https://CRAN.R-project.org/package=wrGraph)).
```{r normalizeThis_plot1, echo=FALSE,eval=TRUE}
boxplot(dat1, main="raw data", las=1)
```
You can see clearly, that the 4th data-set has a problem of range. So we'll see if some proportional normalization
may help to make it more comparable to the other ones.
```{r normalizeThis_plot2, echo=FALSE,eval=TRUE}
layout(matrix(1:4, ncol=2))
boxplot(no1, main="mean normalization", las=1)
boxplot(no2, main="trimMean normalization", las=1)
boxplot(no3, main="median normalization", las=1)
boxplot(no4, main="slope normalization", las=1)
```
### Normalize By Rows
The standard approach for normalizing relies on consisting all columns as collections of data who's distribution is not supposed to change.
In some cases/projects we may want to formulate a much more 'aggressive' hypothesis : We consider the content of all columns strictly as the same.
For example this may be the case when comparing with technical replicates only.
In such cases one may use the function `rowNormalize()` which tries to find the average or mean optimal within-line normalization factor.
Besides, an additional mode of operation for _sparse data_ has been added :
Basically, once a row contains just one NA, this row can't be used any more to derive a normalization factor for all rows.
Thus, with many NA-values the number of 'complete' rows will be low or even 0 redering this approach inefficient or impossible.
Once the content of NA-values is above a customizable threshold, the data will be broken in smaller subsets with fewer groups of fewer columns,
thus increasing the chances of finding 'complete' subsets of data which will be normalized first and added to other subsets in later steps.
This approach relies on the **hypothesis** that *all data in a given line should be (aproximately) the same value* !
Thus, this procedure is particularly well adopted to the case when _all_ samples are multiple replicate measurements of the _same_ sample.
```{r rowNormalize1, echo=TRUE}
set.seed(2); AA <- matrix(rbinom(110, 10, 0.05), nrow=10)
AA[,4:5] <- AA[,4:5] *rep(4:3, each=nrow(AA))
AA1 <- rowNormalize(AA)
round(AA1, 2)
```
Now, let's make this sparse and try normalizing:
```{r rowNormalize2, echo=TRUE}
AC <- AA
AC[which(AC <1)] <- NA
(AC1 <- rowNormalize(AC))
```
Like with _normalizeThis()_ we can define some reference-lines (only these lines will be considered to determine normalization-factors)
```{r rowNormalize3, echo=TRUE}
(AC3 <- rowNormalize(AC, refLines=1:5, omitNonAlignable=TRUE))
```
Please note, that the iterative procedure for _sparse data_ may consume large amounts of computational resources, in particular when
a small number of subgroups has been selected.
### Matrix Coordinates Of Values/Points According To Filtering
Sometimes one needs to obtain the coordinates of values/points of a matrix according to a given filtering condition.
The standard approach using _which()_ gives only a _linearized_ index but not row/column, which is sufficient for replacing indexed values.
If you need to know the true row/column indexes, you may use `coordOfFilt()`.
```{r coordOfFilt1, echo=TRUE}
set.seed(2021); ma1 <- matrix(sample.int(n=40, size=27, replace=TRUE), ncol=9)
## let's test which values are >37
which(ma1 >37) # doesn't tell which row & col
coordOfFilt(ma1, ma1 >37)
```
## Statistical Testing {#StatisticalTesting}
### Normal Random Number Generation with Close Fit to Expected mean and sd
When creating random values to an expected _mean_ and _sd_, the results ontained using the standard function `rnorm()`
may deviate somehow from the expected mean and sd, in particular with low _n_.
To still produce random values fitting closely to the expected _mean_ and _sd_ you may use the function `rnormW()`.
The case of _n=2_ is quite simple with one possible results.
In other cases (_n>2_), there will be a random initiation which can be fixed using the argument _seed_.
```{r rnormW1, echo=TRUE}
## some sample data :
x1 <- (11:16)[-5]
mean(x1); sd(x1)
```
```{r rnormW2, echo=TRUE}
## the standard way for gerenating normal random values
ra1 <- rnorm(n=length(x1), mean=mean(x1), sd=sd(x1))
## In particular with low n, the random values deviate somehow from expected mean and sd :
mean(ra1) -mean(x1)
sd(ra1) -sd(x1)
```
```{r rnormW3, echo=TRUE}
## random numbers with close fit to expected mean and sd :
ra2 <- rnormW(length(x1), mean(x1), sd(x1))
mean(ra2) -mean(x1)
sd(ra2) -sd(x1) # much closer to expected value
```
Thus, the second data-sets fits even with few _n_ very well to the global characteristics defined/expected.
### Moderated Pair-Wise t-Test from limma
If you are not familiar with the way data is handled in the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
and you would like to use some of the tools for running moderated t-tests therein, this will provide easy access using `moderTest2grp()` :
```{r moderTest2grp, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(1600,10,0.4),2), ncol=8,
dimnames=list(paste("l",1:200), c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2]+3 # augment lines 3:6 for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6]+3 # augment lines 5:8 for AA2&BB2 (c,d,g,h should be found)
t4 <- log2(t8[,1:4]/t8[,5:8])
fit4 <- moderTest2grp(t4, gl(2,2))
## now we'll use limma's topTable() function to look at the 'best' results
if("list" %in% mode(fit4)) { # if you have limma installed we can look further
library(limma)
topTable(fit4, coef=1,n=5) # effect for 3,4,7,8
fit4in <- moderTest2grp(t4, gl(2,2), testO="<")
if("list" %in% mode(fit4in)) topTable(fit4in, coef=1,n=5) }
```
### Multiple Moderated Pair-Wise t-Tests From limma
If you want to make multiple pair-wise comparisons using `moderTestXgrp()` :
```{r moderTestXgrp, echo=TRUE}
grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
dimnames=list(paste(letters[], rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
## expect to find C/A in c,d,g, (h)
## expect to find C/D in c,d,e,f
## expect to find A/D in f,g,(h)
test8 <- moderTestXgrp(t8, grp)
head(test8$p.value, n=8)
```
### Transform p-values To Local False Discovery Rate (lfdr)
To get an introduction into local false discovery rate estimations you may read [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209).
A convenient way to get lfdr values calculated by the package [fdrtool](https://CRAN.R-project.org/package=fdrtool) is available via the function `pVal2lfdr()`.
Note, that the toy-example used below is too small for estimating meaningful lfdr values.
For this reason the function _fdrtool()_ from package [fdrtool](https://CRAN.R-project.org/package=fdrtool) will issue warnings.
```{r pVal2lfdr, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(160,10,0.4),2), ncol=8, dimnames=list(letters[1:20],
c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f) for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6] +3 # augment lines 5:8 (e-h) for AA2&BB2 (c,d,g,h should be found)
head(pVal2lfdr(apply(t8, 1, function(x) t.test(x[1:4], x[5:8])$p.value)))
```
### Confindence Intervals (under Normal Distribution)
The [confindence interval (CI)](https://en.wikipedia.org/wiki/Confidence_interval) is a common way of describing the uncertainity of measured or estimated values.
The function `confInt()` allows calculating the confidence interval of the mean (using the functions _qt()_ and _sd()_) under
a given [significance level (alpha)](https://en.wikipedia.org/wiki/Statistical_significance).
assuming that the [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) is valid.
```{r fcCI, echo=TRUE}
set.seed(2022); ran <- rnorm(50)
confInt(ran, alpha=0.05)
## plot points and confindence interval of mean
plot(ran, jitter(rep(1, length(ran))), ylim=c(0.95, 1.05), xlab="random variable 'ran'",main="Points and Confidence Interval of Mean (alpha=0.05)", ylab="", las=1)
points(mean(ran), 0.97, pch=3, col=4) # mean
lines(mean(ran) +c(-1, 1) *confInt(ran, 0.05), c(0.97, 0.97), lwd=4, col=4) # CI
legend("topleft","95% conficence interval of mean", text.col=4,col=4,lty=1,lwd=1,seg.len=1.2,cex=0.9,xjust=0,yjust=0.5)
```
### Extract Groups Of Replicates From Pair-Wise Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the column-names are concatenated group-names.
To get the index of which group has been used in which pair-wise set you may use the function `matchSampToPairw()`, as shown below.
```{r matchSampToPairw, echo=TRUE}
## make example if limma is not installed
if(!requireNamespace("limma", quietly=TRUE)) test8 <- list(FDR=matrix(1, nrow=2, ncol=3, dimnames=list(NULL,c("A-C","A-D","C-D"))))
matchSampToPairw(unique(grp), colnames(test8$FDR))
```
### Extract Numeric Part Of Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the results will be in adjacent columns and the group-names reflected in the column-names.
In the case measurements from multiple levels of a given variable are compared it is useful to extract the numeric part, the function `numPairDeColNames()` provides support to do so.
When extracting just the numeric part, unit names will get lost, though. Note, if units used are not constant (eg seconds and milliseconds mixed) the extracted numeric values do not reflect the real quantitative context any more.
```{r pairWiseConc1, echo=TRUE}
mat1 <- matrix(1:8, nrow=2, dimnames=list(NULL, paste0(1:4,"-",6:9)))
numPairDeColNames(mat1)
```
### Automatic Determination Of Replicate Structure Based On Meta-Data
In order to run statistical testing the user must know which sample should be considered replicate of whom.
The function `()` aims to provide help by checking all column of a matrix of meta-data with the aim of identifying the replicate-status.
To do so, all columns are examined how many groups of replicats they may design. Depending on the argumen _method_ various options for choosing automatically exist :
The default _method="combAll"_ will select the column with the median number of groups (not counting all-different or all-same columns)).
When using as _method="combAll"_ (ie combine all columns that are neither all-different nor all-same), there is risk all lines (samples) will be be considered different and no replicates remain.
To avoid this situation the argument -method_ can be set to _"combNonOrth"_.
Then, it will be checked if adding more columns will lead to complete loss of replicates, and -if so- concerned columns omitted.
```{r replicateStructure1, echo=TRUE}
## column a is all different, b is groups of 2,
## c & d are groups of 2 nut NOT 'same general' pattern as b
strX <- data.frame(a=letters[18:11], b=letters[rep(c(3:1,4), each=2)],
c=letters[rep(c(5,8:6), each=2)], d=letters[c(1:2,1:3,3:4,4)],
e=letters[rep(c(4,8,4,7),each=2)], f=rep("z",8) )
strX
replicateStructure(strX[,1:2])
replicateStructure(strX[,1:4], method="combAll")
replicateStructure(strX[,1:4], method="combAll", exclNoRepl=FALSE)
replicateStructure(strX[,1:4], method="combNonOrth", exclNoRepl=TRUE)
replicateStructure(strX, method="lowest")
```
## Working With Clustering {#WorkingWithClustering}
Multiple concepts for clustering have been deeveloped, most of them allow extracting a vector with the cluster-numbers.
Here some functions helping to work with the output of such clustering results are presented.
### Prepare Data For Clustering
The way how to prepare data for clustering may be as important as the choice of the actual clustering-algorithm ...
Many clustering algorithms are available in R (eg see also [CRAN Task View: Cluster Analysis & Finite Mixture Models](https://CRAN.R-project.org/view=Cluster)), many of them require the input data to be standardized.
The regular way of standardizing sets all elements to mean=0 and sd=1.
To do so, the function `scale()` may be used.
```{r std1, echo=TRUE}
dat <- matrix(2*round(runif(100),2), ncol=4)
mean(dat); sd(dat)
datS <- scale(dat)
apply(datS, 2, sd)
# each column was teated separately
mean(datS); sd(datS); range(datS)
# the mean is almost 0.0 and the sd almost 1.0
datB <- scale(dat, center=TRUE, scale=FALSE)
mean(datB); sd(datB); range(datB) # mean is almost 0
```
However, if you want the entire data-set and not each column sparately, you may use `standardW()`.
Thus, relative differences visible within a line will be conserved.
Furthermore, in case of 3-dim arrays, this function returns also the same dimensions as the input.
```{r std2, echo=TRUE}
datS2 <- standardW(dat)
apply(datS2, 2, sd)
summary(datS2)
mean(datS2); sd(datS2)
datS3 <- standardW(dat, byColumn=TRUE)
apply(datS3, 2, sd)
summary(datS3)
mean(datS3); sd(datS3)
```
Sometimes it is sufficient to only set the minimum and maximum to a given range.
```{r scale1, echo=TRUE}
datR2 <- apply(dat, 2, scaleXY, 1, 100)
summary(datR2); sd(datR2)
```
### Characterize Clustering Results
Here a very basic clustering example...
```{r clu01, echo=TRUE}
nGr <- 3
irKm <- stats::kmeans(iris[,1:4], nGr, nstart=nGr*4) # no need to standardize
table(irKm$cluster, iris$Species)
#wrGraph::plotPCAw(t(as.matrix(iris[,1:4])), sampleGrp=irKm,colBase=irKm$cluster,useSymb=as.numeric(as.factor(iris$Species)))
```
Using the function `reorgByCluNo()` we can now 'apply' the clustering result to the initial data to obtain other information.
```{r clu02, echo=TRUE}
## sort results by cluster number
head(reorgByCluNo(iris[,-5], irKm$cluster))
tail(reorgByCluNo(iris[,-5], irKm$cluster))
```
Let's calculate the median and sd values for each cluster:
```{r clu03, echo=TRUE}
## median an CV
ir2 <- reorgByCluNo(iris[,-5], irKm$cluster, addInfo=FALSE, retList=TRUE)
```
```{r clu04, echo=TRUE}
sapply(ir2, function(x) apply(x, 2, median))
```
```{r clu05, echo=TRUE}
sapply(ir2, colSds)
```
Besides, we have already seen the function `cutArrayInCluLike()` in section [Working with Arrays](#WorkingWithArrays) 'Working with Arrays'.
## Tree-Like Structures {#TreeLikeStructures}
### Filter Lists Of Connected Nodes, Extension Of Networks As 'Sandwich'
When interogating network-databases (like String for proteins or the coexpressionDB for gene co-expression) typically a (semi-)quantitatve
value is supplied with the connection of node 'A' to node 'B'.
In many cases, it may be useful to filter the initial query-output to retain only strong interactions.
Furthermore, it may be of interest to expand such networks by nodes allowing to (further) inter-connect initial query-nodes
(so called 'Sandwich' nodes as they are in the middle of initial nodes), for such nodes a separate (eg even more stringent) threshold can be applied.
Here let's suppose nodes have 3-digit names (ie numbers). 7 nodes of an initial query gave 1 to 7 conected nodes,
the results are presented as list of data.frames where the 1st column is the connected node and the 2nd column the quality score of the connection (edge).
Furthemore, let's assume that here lower scores are better.
```{r filterNetw0, echo=TRUE}
lst2 <- list('121'=data.frame(ID=as.character(c(141,221,228,229,449)),11:15),
'131'=data.frame(ID=as.character(c(228,331,332,333,339)),11:15),
'141'=data.frame(ID=as.character(c(121,151,229,339,441,442,449)),c(11:17)),
'151'=data.frame(ID=as.character(c(449,141,551,552)),11:14),
'161'=data.frame(ID=as.character(171),11),
'171'=data.frame(ID=as.character(161),11),
'181'=data.frame(ID=as.character(881:882),11:12) )
```
Now, we'd like to keep the core network consisting of all (dirctly) interconnected nodes with scores below 20 :
```{r filterNetw1, echo=TRUE}
(nw1 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=FALSE))
```
In the resulting output the 1st column now represents the query-nodes, the 2nd column all connected nodes based on filtering scores for edges,
and the 3rd colum the score for the edges.
Let's also remove all nodes not connected to a backbone at least 3 nodes long, ie remove orphan pairs of nodes :
```{r filterNetw2, echo=TRUE}
(nw2 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=TRUE))
```
If you want to expand this network by nodes allowing to further interconnect the nodes from above,
we can add all 'sandwich' nodes (let's use a threshold of inferior/equal to 14 which will use only the better 'sandwich'-edges) :
```{r filterNetw3, echo=TRUE}
(nw3 <- filterNetw(lst2, limInt=20, sandwLim=14, remOrphans=TRUE))
```
### Convert Collection Of Pairs Of Nodes To Propensity Matrix
Many times networks get created from pairs of nodes. One way to represent the full network is via propensisty matrixes.
Several advanced tools and packages rather accept such propensisty matrixes as input.
Here, it is assumed that each line of the input represents a separate pair of nodes connected by an edge.
```{r propMatr1, echo=TRUE}
pairs3L <- matrix(LETTERS[c(1,3,3, 2,2,1)], ncol=2) # loop of 3
(netw13pr <- pairsAsPropensMatr(pairs3L)) # as prop matr
```
### Characterize Individual Contribution Of Single Edges In Tree-Structures
```{r contribToContigPerFrag, echo=TRUE}
path1 <- matrix(c(17,19,18,17, 4,4,2,3), ncol=2,
dimnames=list(c("A/B/C/D","A/B/G/D","A/H","A/H/I"), c("sumLen","n")))
contribToContigPerFrag(path1)
```
### Count Same Start- And End- Sites Of Edges (Or Fragments)
If you have a set of fragments from a common ancestor and the fragment's start- and end-sites
are marked by index-positions (integers), you can make a simple graphical display :
```{r simpleFragFig, echo=TRUE}
frag1 <- cbind(beg=c(2,3,7,13,13,15,7,9,7, 3,3,5), end=c(6,12,8,18,20,20,19,12,12, 4,5,7))
rownames(frag1) <- letters[1:nrow(frag1)]
simpleFragFig(frag1)
```
Now we can make a matrix telling if some fragments do start or end at exactely the same position.
```{r countSameStartEnd, echo=TRUE}
countSameStartEnd(frag1)
```
## Support for Graphical Output {#SupportForGraphicalOutput}
### Convenient Paste-Collapse
The function `pasteC()` allows adding quotes and separating the last element by specific text (eg 'and').
```{r pasteC, echo=TRUE}
pasteC(1:4)
pasteC(letters[1:4],quoteC="'")
```
### Transform Numeric Values to Color-Gradient
By default most color-gradients end with a color very close to the beginning.
```{r color-gradient1, echo=TRUE}
set.seed(2015); dat1 <- round(runif(15),2)
plot(1:15, dat1, pch=16, cex=2, las=1, col=colorAccording2(dat1),
main="Color gradient according to value in y")
# Here we modify the span of the color gradient
plot(1:15, dat1, pch=16, cex=2, las=1,
col=colorAccording2(dat1, nStartO=0, nEndO=4, revCol=TRUE), main="blue to red")
# It is also possible to work with scales of transparency
plot(1:9, pch=3, las=1)
points(1:9, 1:9, col=transpGraySca(st=0, en=0.8, nSt=9,trans=0.3), cex=42, pch=16)
```
### Assign New Transparency To Given Colors
For this purpose you may use `convColorToTransp`.
```{r convColorToTransp, fig.height=6, fig.width=3, echo=TRUE}
col0 <- c("#998FCC","#5AC3BA","#CBD34E","#FF7D73")
col1 <- convColorToTransp(col0,alph=0.7)
layout(1:2)
pie(rep(1,length(col0)), col=col0, main="no transparency")
pie(rep(1,length(col1)), col=col1, main="new transparency")
```
### Print Matrix-Content As Plot
There are many ways of creating reports. If you want simply to combine a few plots into a pdf, the function `tableToPlot()`
may be helpful to add a small table (eg overview of points/samples/files used in other plots of the same pdf).
This function prints tables in the current graphical output/window (which may by a pdf-device).
## Other Convenience Functions {#OtherConvenienceFunctions}
### Writing Compact Dates (more options ...)
Many times it may be useful to add the date to filenames when saving data or plots as files.
The built-in functions _date()_, _Sys.Date()_ and _Sys.Time()_ are a good way to start.
Generally I like to use abbreviated month-names since the order of writing the month is different in Europe compared to the USA,
so this may help avoiding mis-interpreting dates insetad of writing the number of the Month.
For example, 2021-03-05 means in Europe March 5th while in other places it means May 3rd.
The R-functions mentioned above use local language settings, so I wrote the function `sysDate` to
produce compact versions of current the date, **independent to local language settings** (or not -if you prefer), ie locale-specific,
(yes, in some languages - like French - the first 3 letters of the month may give ambiguous results !)
and to avoid white space ' ' (which I prefer to avoid in file-names).
Please look at the function's help-page for all available options.
```{r sysDate1, echo=TRUE}
## To get started
Sys.Date()
## Compact English names (in European order), no matter what your local settings are :
sysDate()
```
The table below shows a number of options to write the date in English or using local month-names :
```{r DateTab, echo=TRUE}
tabD <- cbind(paste0("univ",1:6), c(sysDate(style="univ1"), sysDate(style="univ2"),
sysDate(style="univ3"), sysDate(style="univ4"), as.character(sysDate(style="univ5")),
sysDate(style="univ6")), paste0(" local",1:6),
c(sysDate(style="local1"), sysDate(style="local2"), sysDate(style="local3"),
sysDate(style="local4"), sysDate(style="local5"), sysDate(style="local6")))
knitr::kable(tabD, caption="Various ways of writing current date")
```
## Session-Info
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/inst/doc/wrMiscVignette1.Rmd
|
---
title: "Getting started with wrMisc"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrMiscVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains a collection of various (low-level) tools which may be of general interest.
These functions were accumulated over a number of years of data-wrangling when treating high-throughput data from biomedical applications.
Besides, these functions are further used/integrated in more specialized functions dedicated to specific applications in the packages [wrProteo](https://CRAN.R-project.org/package=wrProteo), [wrGraph](https://CRAN.R-project.org/package=wrGraph) or [wrTopDownFrag](https://CRAN.R-project.org/package=wrTopDownFrag).
All these packages are available on [CRAN](https://cran.r-project.org/).
If you are not familiar with [R](https://www.r-project.org/) you may find many introductory documents on the official R-site
in [contributed documents](https://cran.r-project.org/other-docs.html) or under [Documentation/Manuals](https://cran.r-project.org/manuals.html).
Of course, numerous other documents/sites with tutorials and courses exist, too.
### Dependencies and Compilation
One of the aims was to write a package easy to install, with low system requirements and few obligatory dependencies.
All code is written in pure R and does not need any special compilers.
The number of obligatory dependencies was kept to a minumum.
Most of additional packages used in some of the functions were declared as 'suggested' (ie not obligatory), to allow installation of _wrMisc_ even if some these additional packages can't be installed/compiled by the user's instance.
When a feature/function of one of the 'suggested' packages is about to be used, its presence/installation will be checked and,
only if found as missing, the user will be prompted a message inviting to install specific package(s) before using these specific functions.
This helps to avoid not being able installing this package at all if some dependencies may fail to get installed themselves.
### Installation And Loading
To get started, we need to install (if not yet installed) and load the package "[wrMisc](https://CRAN.R-project.org/package=wrMisc)" available from [CRAN](https://cran.r-project.org/).
```{r setup0, include=FALSE, echo=FALSE, messages=FALSE, warnings=FALSE}
suppressPackageStartupMessages({
library(wrMisc)
})
```
```{r install, echo=TRUE, eval=FALSE}
## If not already installed, you'll have to install the package first.
## This is the basic installation commande in R
install.packages("wrMisc")
```
Since the functions illustrated in this vignette require a number of the _suggested_ packages,
let's check if they are installed and add them (via a small function), if not yet installed.
```{r install2, echo=TRUE, eval=FALSE}
packages <- c("knitr", "rmarkdown", "BiocManager", "kableExtra", "boot", "data.tree", "data.table",
"fdrtool", "RColorBrewer", "Rcpp", "wrMisc", "wrGraph", "wrProteo")
checkInstallPkg <- function(pkg) { # install function
if(!requireNamespace(pkg, quietly=TRUE)) install.packages(pkg) }
## install if not yet present
sapply(packages, checkInstallPkg)
```
Finally, this package also uses the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
which has to be installed differently (see also help on [Bioconductor](https://bioconductor.org)):
```{r install3, echo=TRUE, eval=FALSE}
## Installation of limma
BiocManager::install("limma")
```
This vignette is also accessible from R command-line or on CRAN at [wrMisc](https://CRAN.R-project.org/package=wrMisc):
```{r install4, echo=TRUE, eval=FALSE}
## Now you can open this vignette out of R:
vignette("wrMiscVignette1", package="wrMisc")
```
Before using the functions of this package, we actually need to load the package first (best on a fresh R-session):
```{r setup1}
library("wrMisc")
library("knitr")
## This is 'wrMisc' version number :
packageVersion("wrMisc")
```
## Speed Optimized Functions In The Package wrMisc
In high-throughput experiments in biology (like transcriptomics, proteomics etc...) many different features get measured a number if times (different samples like patients or evolution of a disease). The resulting data typically contain many (independent) rows (eg >1000 different genes or proteins who's abundance was measured) and much fewer columns that may get further organized in groups of replicates.
As R is a versatile language, multiple options exist for assessing the global characteristics of such data, some are more efficient on a computational point of view.
In order to allow fast treatment of very large data-sets some tools have been re-designed for optimal performance.
### Assessing Basic Information About Variability (for matrix)
Many measurement techniques applied in high throughput manner suffer from precision.
This means, the same measurements taken twice in a row (ie repeated on the same subject) will very likely not give an identical result.
For this reason it is common practice to make replicate measurements to i) estimate mean (ie representative) values and ii) asses the factors contributing to the variablity observed.
Briefly, technical replicates represent the case where multiple read-outs of the very same sample are generated and the resulting variability is associated to technical issues during the process of taking measures. Biological replicates represent independant samples and reflect therefore the varibility a given parameter may have in a certain population of individuals.
With the tools presented here, both technical and biological replicates can be dealt with.
In several cases the interpretation of the resulting numbers should consider the experimental setup, though.
Let's make a simple matrix as toy data:
```{r basicVariability, echo=TRUE}
grp1 <- rep(LETTERS[1:3], c(3,4,3))
sampNa1 <- paste0(grp1, c(1:3,1:4,1:3))
set.seed(2016); dat1 <- matrix(round(c(runif(50000) +rep(1:1000,50)),3),
ncol=10, dimnames=list(NULL,sampNa1))
dim(dat1)
head(dat1)
```
Now lets estimate the standard deviation _(sd)_ for every row:
```{r sdForEachRow, echo=TRUE}
head(rowSds(dat1))
system.time(sd1 <- rowSds(dat1))
system.time(sd2 <- apply(dat1, 1, sd))
```
On most systems the equivalent calculation using *apply()* will run much slower compared to `rowSds`.
Note, there is a minor issue with rounding :
```{r usingApply, echo=TRUE}
table(round(sd1, 13)==round(sd2, 13))
```
Similarly we can easily calculate the CV (coefficient of variation, ie sd / mean, see also [CV](https://en.wikipedia.org/wiki/Coefficient_of_variation)) for every row using `rowCVs` :
```{r calculateRowCV, echo=TRUE}
system.time(cv1 <- rowCVs(dat1))
system.time(cv2 <- apply(dat1, 1, sd) / rowMeans(dat1))
# typically the calculation using rowCVs is much faster
head(cv1)
# results from the 'conventional' way
head(cv2)
```
Note, these calculations will be very efficient as long as the number of rows is much higher (>>) than the number of columns.
### Data Organized In (Sub-)Groups As Sets Of Columns
Now, let's assume our data is contains 3 initial samples measured as several replicates (already defined in _grp1_).
Similarly, we can also calculate the sd or CV for each line while splitting into groups of replicates (functions `rowGrpMeans`, `rowGrpSds` and `rowGrpCV`):
```{r rowGrpMeans1, echo=TRUE}
# we already defined the grouping :
grp1
## the mean for each group and row
system.time(mean1Gr <- rowGrpMeans(dat1, grp1))
```
```{r sdOrCVbyGrp, echo=TRUE}
## Now the sd for each row and group
system.time(sd1Gr <- rowGrpSds(dat1, grp1))
# will give us a matrix with the sd for each group & line
head(sd1Gr)
# Let's check the results of the first line :
sd1Gr[1,] == c(sd(dat1[1,1:3]), sd(dat1[1,4:7]), sd(dat1[1,8:10]))
# The CV :
system.time(cv1Gr <- rowGrpCV(dat1, grp1))
head(cv1Gr)
```
#### Counting Number Of NAs Per Row And Group Of Columns
Some data, like with quantitative proteomics measures, may contain an elevated number of _NAs_ (see also the package [wrProteo](https://CRAN.R-project.org/package=wrProteo) for further options for dealing with such data).
Furthermore, many other packages on CRAN and Bioconductor cover this topic, see also the [missing data task-view](https://CRAN.R-project.org/view=MissingData) on CRAN.
Similar as above there is an easy way to count the number of _NAs_ to get an overview how NAs are distributed.
Let's assume we have measures from 3 groups/samples with 4 replicates each :
```{r rowGrpNA1, echo=TRUE}
mat2 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
mat2 <- matrix(mat2, ncol=12, byrow=TRUE)
## The definition of the groups (ie replicates)
gr4 <- gl(3, 4, labels=LETTERS[1:3])
```
Now we can easily count the number of NAs per row and set of replicates.
```{r rowGrpNA2, echo=TRUE}
rowGrpNA(mat2,gr4)
```
### Fast NA-omit For Very Large Objects
The function _na.omit()_ from the package _stats_ also keeps a trace of all omitted instances.
This can be penalizing in terms of memory usage when handling very large vectors with a high content of NAs (eg >10000 NAs).
If you don't need to document precisely which elements got eliminated, the function `naOmit()` may offer
smoother functioning for very large objects.
```{r naOmit, echo=TRUE}
aA <- c(11:13,NA,10,NA)
str(naOmit(aA))
# the 'classical' na.omit also stores which elements were NA
str(na.omit(aA))
```
### Minimum Distance/Difference Between Values
If you need to find the closest neighbour(s) of a numeric vector, the function `minDiff()` will tell you the
distance ("dif","ppm" or "ratio") and index ("best") of the closest neighbour.
In case of multiple shortest distances the index if the first one is reported, and the column "nbest" will display a value of >1.
```{r minDiff, echo=TRUE}
set.seed(2017); aa <- 10 *c(0.1 +round(runif(20),2), 0.53, 0.53)
head(aa)
minDiff(aa,ppm=FALSE)
```
When you look at the first line, the value of 10.2 has one single closest value which is 10.4,
which is located in line number 19 (the column 'best' gives the index of the best).
Line number 19 points back to line number 1.
You can see, that some elements (like 5.7) occure multiple times (line no 3 and 9), multiple occurences are counted in the column _ncur_.
This is why column _nbest_ for line 15 (_value_ =6.0) indicates that it appears twice as closest value _nbest_.
## Working With Lists (And Lists Of Lists) {#WorkingWithLists}
### Partial unlist
When input from different places gets collected and combined into a list, this may give a collection of different types of data.
The function `partUnlist()` will to preserve multi-column elements as they are (and just bring down one level):
```{r partUnlist_1, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
partUnlist(bb)
partUnlist(lapply(bb,.asDF2))
```
This won't be possible using _unlist()_.
```{r unlist_1, echo=TRUE}
head(unlist(bb, recursive=FALSE))
```
To uniform such data to obtain a list with one column only for each list-element, the function `asSepList()` provides help :
```{r asSepList, echo=TRUE}
bb <- list(fa=gl(2,2), ve=31:33, L2=matrix(21:28,ncol=2), li=list(li1=11:14,li2=data.frame(41:44)))
asSepList(bb)
```
### Appending/Combining Lists
Separate lists may be combined using the _append()_ command, which also allows treating simple vectors.
```{r lappend1, echo=TRUE}
li1 <- list(a=1, b=2, c=3)
li2 <- list(A=11, b=2, C=13)
append(li1, li2)
```
However, this way there is no checking if some of the list-elements are present in both lists and thus will appear twice.
The function `appendNR()` allows to checking if some list-elements will appear twice, and thus avoid such duplicate entries.
```{r lappend2, echo=TRUE}
appendNR(li1, li2)
```
### rbind On Lists
When a matrix (or data.frame) gets split into a list, like in the example using _by()_, as a reverse-function such lists can get joined using `lrbind()` in an _rbind_-like fashion.
```{r lrbind, echo=TRUE}
dat2 <- matrix(11:34, ncol=3, dimnames=list(letters[1:8], colnames=LETTERS[1:3]))
lst2 <- by(dat2, rep(1:3,c(3,2,3)), as.matrix)
lst2
# join list-elements (back) into single matrix
lrbind(lst2)
```
### Merge Multiple Matrices From List
When combining different datasets the function `mergeMatrixList()` allows merging multiple matrices (or data.frames) into a single matrix.
Two types of mode of operation are available : i) Returning only the common/shared elements (as defined by the rownames), this is default _mode='intersect'_ ;
alternatively one may ii) fuse/merge all matrices together without any loss of data (using _mode='union'_, additional _NA_s may appear when a given rowname is absent in one of the input matrices).
Furthermore, one may specifically select which columns should be used for fusing using the argument _useColumn_.
```{r mergeMatrixList, echo=TRUE}
mat1 <- matrix(11:18, ncol=2, dimnames=list(letters[3:6],LETTERS[1:2]))
mat2 <- matrix(21:28, ncol=2, dimnames=list(letters[2:5],LETTERS[3:4]))
mat3 <- matrix(31:38, ncol=2, dimnames=list(letters[c(1,3:4,3)],LETTERS[4:5]))
#
mergeMatrixList(list(mat1, mat2), useColumn="all")
# with custom names for the individual matrices
mergeMatrixList(list(m1=mat1, m2=mat2, mat3), mode="union", useColumn=2)
```
Similarly, separate entries may be merged using `mergeMatrices()` :
```{r mergeMatrices, echo=TRUE}
mergeMatrices(mat1, mat2)
mergeMatrices(mat1, mat2, mat3, mode="union", useColumn=2)
## custom names for matrix-origin
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=2)
## flexible/custom selection of columns
mergeMatrices(m1=mat1, m2=mat2, mat3, mode="union", useColumn=list(1,1:2,2))
```
### Fuse Content Of List-Elements With Redundant (Duplicated) Names
When list-elements have the same name, their content (of named numeric or character vectors)
may get fused using `fuseCommonListElem()` according to the names of the list-elements :
```{r fuseCommonListElem, echo=TRUE}
val1 <- 10 +1:26
names(val1) <- letters
(lst1 <- list(c=val1[3:6], a=val1[1:3], b=val1[2:3] ,a=val1[12], c=val1[13]))
## here the names 'a' and 'c' appear twice :
names(lst1)
## now, let's fuse all 'a' and 'c'
fuseCommonListElem(lst1)
```
### Filtering Lines And/Or Columns For All List-Elements Of Same Size
In a number of cases the information in various list-elements is somehow related.
Eg, in S3-objects produced by [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), or data produced using [wrProteo](https://CRAN.R-project.org/package=wrProteo) several instances of matrix or data.frame refer to data that are related.
Some matrixes may conatain abundance data (or weights, etc) while another matrix or data.frame may contain the annotation information related to each line of the abundance data.
So if one wants to filter the data, ie remove some lines, this should be done in the same way with all related list-elements.
This way one may maintain a conventient 1:1 matching of lines.
The function `filterLiColDeList()` searches if other list-elements have suitable dimensions and will then run the same filtering as in the 'target' list-element.
In consequence this can be used with the output of wrProteo to remove simultaneously the same lines and/or columns.
```{r listBatchReplace1, echo=TRUE}
lst1 <- list(m1=matrix(11:18, ncol=2), m2=matrix(21:30, ncol=2), indR=31:34,
m3=matrix(c(21:23,NA,25:27,NA), ncol=2))
filterLiColDeList(lst1, useLines=2:3)
filterLiColDeList(lst1, useLines="allNA", ref=3)
```
### Replacements In List
The function `listBatchReplace()` works similar to _sub()_ and allows to search & replace exact matches to a character string along all elements of a list.
```{r replInList1, echo=TRUE}
(lst1 <- list(aa=1:4, bb=c("abc","efg","abhh","effge"), cc=c("abdc","efg","efgh")))
listBatchReplace(lst1, search="efg", repl="EFG", silent=FALSE)
```
### Organize Values Into list and Sort By Names
Named numeric or character vectors can be organized into lists using `listGroupsByNames()`,
based on their names (only the part before any extensions starting with a point gets considered).
Of course, other separators may be defined using the argument _sep_.
```{r listGroupsByNames, echo=TRUE}
ser1 <- 1:7; names(ser1) <- c("AA","BB","AA.1","CC","AA.b","BB.e","A")
listGroupsByNames(ser1)
```
If no names are present, the content of the vector itself will be used as name :
```{r listGroupsByNames2, echo=TRUE}
listGroupsByNames((1:10)/5)
```
### Batch-filter List-Elements
In the view of object-oriented programming several methods produce results integrated into lists or S3-objects (eg
[limma](https://bioconductor.org/packages/release/bioc/html/limma.html)).
The function `filterList()` aims facilitating the filtering of all elements of lists or S3-objects.
List-elements with inappropriate number of lines will be ignored.
```{r filterList, echo=TRUE}
set.seed(2020); dat1 <- round(runif(80),2)
list1 <- list(m1=matrix(dat1[1:40], ncol=8), m2=matrix(dat1[41:80], ncol=8), other=letters[1:8])
rownames(list1$m1) <- rownames(list1$m2) <- paste0("line",1:5)
# Note: the list-element list1$other has a length different to that of filt. Thus, it won't get filtered.
filterList(list1, list1$m1[,1] >0.4) # filter according to 1st column of $m1 ...
filterList(list1, list1$m1 >0.4)
```
### Transform Columns Of Matrix To List Of Vectors
At some occasions it may be useful separate columns of a matrix into separate vectors inside a list.
This can be done using `matr2list()`:
```{r matr2list, echo=TRUE}
(mat1 <- matrix(1:12, ncol=3, dimnames=list(letters[1:4],LETTERS[1:3])))
str(matr2list(mat1))
```
## Working With Arrays {#WorkingWithArrays}
Let's get stared with a little toy-array:
```{r array0, echo=TRUE}
(arr1 <- array(c(6:4,4:24), dim=c(4,3,2), dimnames=list(c(LETTERS[1:4]),
paste("col",1:3,sep=""),c("ch1","ch2"))))
```
### CV (Coefficient Of Variance) With Arrays
Now we can obtain the CV (coefficient of variance) by splitting along 3rd dimesion (ie this is equivalent to an _apply_ along the 3rd dimension) using `arrayCV()`:
```{r arrayCV1, echo=TRUE}
arrayCV(arr1)
# this is equivalent to
cbind(rowCVs(arr1[,,1]), rowCVs(arr1[,,2]))
```
Similarly we can split along any other dimension, eg the 2nd dimension :
```{r arrayCV2, echo=TRUE}
arrayCV(arr1, byDim=2)
```
### Slice 3-dim Array In List Of Matrixes (Or Arrays)
This procedure is similar to (re-)organizing an initial array into clusters, here we split along a user-defined factor/vector.
If a clustering-algorithm produces the cluster assignments, this function can be used to organize the input data accordingly using `cutArrayInCluLike()`.
```{r cutArrayInCluLike, echo=TRUE}
cutArrayInCluLike(arr1, cluOrg=c(2,1,2,1))
```
Let's cut by filtering along the 3rd dimension for all lines where column 'col2' is >7, and then display only the content of columns 'col1' and 'col2' (using `filt3dimArr()`):
```{r filt3dimArr, echo=TRUE}
filt3dimArr(arr1, displCrit=c("col1","col2"), filtCrit="col2", filtVal=7, filtTy=">")
```
## Working With Redundant Data {#WorkingWithRedundantData}
$_Semantics_$ : Please note, that there are two ways of interpreting the term '**unique**' :
* In regular understanding one describes this way an event which occurs only once, and thus does not occur/happen anywhere else.
* The command `unique()` will eliminate redundant entries to obtain a shorter 'unique' output vector, ie in the resultant vector all values/content (values) occur only once.
However, from the result of _unique()_ you cannot tell any more which ones were not unique initially !
In some applications (eg proteomics) initial identifiers (IDs) may occur multiple times in the data and we frequently need to identify events/values that occur only once, as the first meaning of '_unique_'.
This package provides (additional) functions to easily distinguish values occurring just once (ie _unique_) from those occurring multiple times. Furthermore, there are functions to rename/remove/combine replicated elements, eg `correctToUnique()` or `nonAmbiguousNum()`, so that no elements or lines of data get lost.
### Identify What Is Repeated (and Where Repeated Do Occur)
```{r repeated1, echo=TRUE}
## some text toy data
tr <- c("li0","n",NA,NA, rep(c("li2","li3"),2), rep("n",4))
```
The function _table()_ (from the package _base_) is very useful get some insights when working with smaller objects, but may be slow to handle very large objects.
As mentioned, _unique()_ will make everything unique, and afterwards you won't know any more who was unique in the first place !
The function `duplicated()` (also from package base) helps us getting the information who is repeated.
```{r repeated2, echo=TRUE}
table(tr)
unique(tr)
duplicated(tr, fromLast=FALSE)
```
```{r repeated3, echo=TRUE}
aa <- c(11:16,NA,14:12,NA,14)
names(aa) <- letters[1:length(aa)]
aa
```
`findRepeated()` (from this package) will return the position/index (and content/value) of repeated elements. However, the output in form of a list is not very convenient to the human reader.
```{r findRepeated, echo=TRUE}
findRepeated(aa)
```
`firstOfRepeated()` tells the index of the first instance of repeated elements, which elements you need to make the vector 'unique', and which elements get stripped off when making unique.
Please note, that _NA_ (no matter if they occure once or more times) are automatically in the part suggested to be removed.
```{r firstOfRepeated, echo=TRUE}
firstOfRepeated(aa)
aa[firstOfRepeated(aa)$indUniq] # only unique with their names
unique(aa) # unique() does not return any names !
```
### Correct Vector To Unique (While Maintaining The Original Vector Length)
If necessary, a counter can be added to non-unique entries, thus no individual values get eliminated and the length and order of the resultant object maintains the same using `correctToUnique()`.
This is of importance when assigning rownames to a data.frame : Assigning redundant values/text as rownames of a data.frame will result in an error !
```{r correctToUnique1, echo=TRUE}
correctToUnique(aa)
correctToUnique(aa, sep=".", NAenum=FALSE) # keep NAs (ie without transforming to character)
```
You see from the last example above, that this function has an argument for controlling enumerating elements.
### Mark Any Duplicated (ie Ambiguous) Elements by Changing Their Names (and Separate from Unqiue)
First, the truly unique values are reported and then the first occurance of repeated elements is given, _NA_ instances get ignored.
This can be done using `nonAmbiguousNum()` which maintains the length of the initial character vector.
```{r nonAmbiguousNum, echo=TRUE}
unique(aa) # names are lost
nonAmbiguousNum(aa)
nonAmbiguousNum(aa, uniq=FALSE, asLi=TRUE) # separate in list unique and repeated
```
### Compare Multiple Vectors And Sort By Number Of Common/Repeated Values/Words
The main aim of the function `sortByNRepeated()` is allowing to compare multiple vectors for common values/words and providing an output sorted by number of repeats.
Suppose 3 persons are asked which cities they wanted to visit.
Then we would like to make a counting of the most frequently cited cities.
Here we consider individual choices as equally ranked.
By default intra-repeats are eliminated.
```{r sortByNRepeated, echo=TRUE}
cities <- c("Bangkok","London","Paris", "Singapore","New York City", "Istambul","Delhi","Rome","Dubai")
sortByNRepeated(x=cities[c(1:4)], y=cities[c(2:3,5:8)])
## or (unlimited) multiple inputs via list
choices1 <- list(Mary=cities[c(1:4)], Olivia=cities[c(2:3,5:8)], Paul=cities[c(5:3,9,5)]) # Note : Paul cited NYC twice !
table(unlist(choices1))
sortByNRepeated(choices1)
sortByNRepeated(choices1, filterIntraRep=FALSE) # without correcting multiple citation of NYC by Paul
```
### Combine Multiple Matrixes Where Some Column-Names Are The Same
Here, it is supposed that you want to join 2 or more matrixes describing different properties of the same collection of individuals (as rows).
Common column-names are interpreted that their respective information should be combined (either as average or as sum).
This can be done using `cbindNR()` :
```{r cbindNR, echo=TRUE}
## First we'll make soe toy data :
(ma1 <- matrix(1:6, ncol=3, dimnames=list(1:2,LETTERS[3:1])))
(ma2 <- matrix(11:16, ncol=3, dimnames=list(1:2,LETTERS[3:5])))
## now we can join 2 or more matrixes
cbindNR(ma1, ma2, summarizeAs="mean") # average of both columns 'C'
```
### Filter Matrix To Keep Only First Of Repeated Lines
This ressembles to the functioning of _unique()_, but applies to a user-specified column of the matrix.
```{r firstLineOfDat, echo=TRUE}
(mat1 <- matrix(c(1:6, rep(1:3,1:3)), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2])))
```
The function `firstLineOfDat()` allows to access/extract the first line of repeated instances.
```{r firstLineOfDat2, echo=TRUE}
firstLineOfDat(mat1, refCol=2)
```
This function was rather designed for dealing with character input, it allows concatenating all columns and to remove redundant.
```{r firstOfRepLines, echo=TRUE}
mat2 <- matrix(c("e","n","a","n","z","z","n","z","z","b",
"","n","c","n","","","n","","","z"), ncol=2)
firstOfRepLines(mat2, out="conc")
# or as index :
firstOfRepLines(mat2)
```
### Filter To Unique Column-Content Of Matrix, Add Counter And Concatenated Information
```{r nonredDataFrame, echo=TRUE}
(df1 <- data.frame(cbind(xA=letters[1:5], xB=c("h","h","f","e","f"), xC=LETTERS[1:5])))
```
The function `nonredDataFrame()` offers to include a counter of redundant instances encountered (for 1st column specified) :
```{r nonredDataFrame2, echo=TRUE}
nonredDataFrame(df1, useCol=c("xB","xC"))
# without counter or concatenating
df1[which(!duplicated(df1[,2])),]
# or
df1[firstOfRepLines(df1,useCol=2),]
```
### Get First Of Repeated By Column
```{r get1stOfRepeatedByCol, echo=TRUE}
mat2 <- cbind(no=as.character(1:20), seq=sample(LETTERS[1:15], 20, repl=TRUE),
ty=sample(c("full","Nter","inter"),20,repl=TRUE), ambig=rep(NA,20), seqNa=1:20)
(mat2uniq <- get1stOfRepeatedByCol(mat2, sortBy="seq", sortSupl="ty"))
# the values from column 'seq' are indeed unique
table(mat2uniq[,"seq"])
# This will return all first repeated (may be >1) but without furter sorting
# along column 'ty' neither marking in comumn 'ambig').
mat2[which(duplicated(mat2[,2],fromLast=FALSE)),]
```
### Transform (ambigous) Matrix To Non-ambiguous Matrix (In Respect To Given Column)
```{r nonAmbiguousMat, echo=TRUE}
nonAmbiguousMat(mat1,by=2)
```
Here another example, ambiguous will be marked by an '_' :
```{r nonAmbiguousMat2, echo=TRUE}
set.seed(2017); mat3 <- matrix(c(1:100,round(rnorm(200),2)), ncol=3,
dimnames=list(1:100,LETTERS[1:3]));
head(mat3U <- nonAmbiguousMat(mat3, by="B", na="_", uniqO=FALSE), n=15)
head(get1stOfRepeatedByCol(mat3, sortB="B", sortS="B"))
```
### Combine Replicates From List To Matrix
```{r combineReplFromListToMatr, echo=TRUE}
lst2 <- list(aa_1x=matrix(1:12, nrow=4, byrow=TRUE), ab_2x=matrix(24:13, nrow=4, byrow=TRUE))
combineReplFromListToMatr(lst2)
```
### Combine Redundant Lines From List with (Multiple) Matrix According to Reference
The function `combineRedundLinesInList()` provides help for combining/summarizing lines of numeric data which may be summaried according to reference vector or matrix (part of the same input-list).
Initial data and reference will be aligned based on rownames and the content of reference (or the column specified by \code{refColNa}).
```{r combineRedundLinesInListAcRef, echo=TRUE}
x1 <- list(quant=matrix(11:34, ncol=3, dimnames=list(letters[8:1], LETTERS[11:13])),
annot=matrix(paste0(LETTERS[c(1:4,6,3:5)],LETTERS[c(1:4,6,3:5)]), ncol=1,
dimnames=list(paste(letters[1:8]),"xx")) )
combineRedundLinesInList(lst=x1, refNa="annot", datNa="quant", refColNa="xx")
```
### Non-redundant Lines Of Matrix
```{r nonRedundLines, echo=TRUE}
mat4 <- matrix(rep(c(1,1:3,3,1),2), ncol=2, dimnames=list(letters[1:6],LETTERS[1:2]))
nonRedundLines(mat4)
```
### Filter For Unique Elements /2
```{r filtSizeUniq, echo=TRUE}
# input: c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), filtUn=TRUE, minSi=NULL)
# here a,b,c and dd are repeated :
filtSizeUniq(list(A="a", B=c("b","bb","c"), D=c("dd","d","ddd","c")), ref=c(letters[c(1:26,1:3)],
"dd","dd","bb","ddd"), filtUn=TRUE, minSi=NULL)
```
### Make Non-redundant Matrix
```{r makeNRedMatr, echo=TRUE}
t3 <- data.frame(ref=rep(11:15,3), tx=letters[1:15],
matrix(round(runif(30,-3,2),1), nc=2), stringsAsFactors=FALSE)
# First we split the data.frame in list
by(t3,t3[,1],function(x) x)
t(sapply(by(t3,t3[,1],function(x) x), summarizeCols, me="maxAbsOfRef"))
(xt3 <- makeNRedMatr(t3, summ="mean", iniID="ref"))
(xt3 <- makeNRedMatr(t3, summ=unlist(list(X1="maxAbsOfRef")), iniID="ref"))
```
### Combine/Reduce Redundant Lines Based On Specified Column
```{r combineRedBasedOnCol, echo=TRUE}
matr <- matrix(c(letters[1:6],"h","h","f","e",LETTERS[1:5]), ncol=3,
dimnames=list(letters[11:15],c("xA","xB","xC")))
combineRedBasedOnCol(matr, colN="xB")
combineRedBasedOnCol(rbind(matr[1,],matr), colN="xB")
```
### Convert Matrix (eg With Redundant) Row-Names To data.frame
```{r convMatr2df, echo=TRUE}
x <- 1
dat1 <- matrix(1:10, ncol=2)
rownames(dat1) <- letters[c(1:3,2,5)]
## as.data.frame(dat1) ... would result in an error
convMatr2df(dat1)
convMatr2df(data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3))
tmp <- data.frame(a=as.character((1:3)/2), b=LETTERS[1:3], c=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
tmp <- data.frame(a=as.character((1:3)/2), b=1:3, stringsAsFactors=FALSE)
convMatr2df(tmp)
```
### Find And Combine Points Located Very Close In X/Y Space
```{r combineOverlapInfo, echo=TRUE}
set.seed(2013)
datT2 <- matrix(round(rnorm(200)+3,1), ncol=2, dimnames=list(paste("li",1:100,sep=""),
letters[23:24]))
# (mimick) some short and longer names for each line
inf2 <- cbind(sh=paste(rep(letters[1:4],each=26), rep(letters,4),1:(26*4),sep=""),
lo=paste(rep(LETTERS[1:4],each=26), rep(LETTERS,4), 1:(26*4), ",",
rep(letters[sample.int(26)],4), rep(letters[sample.int(26)],4), sep=""))[1:100,]
## We'll use this to test :
head(datT2, n=10)
## let's assign to each pair of x & y values a 'cluster' (column _clu_, the column _combInf_ tells us which lines/indexes are in this cluster)
head(combineOverlapInfo(datT2, disThr=0.03), n=10)
## it is also possible to rather display names (eg gene or protein-names) instead of index values
head(combineOverlapInfo(datT2, suplI=inf2[,2], disThr=0.03), n=10)
```
### Bin And Summarize Values According To Their Names
```{r getValuesByUnique, echo=TRUE}
dat <- 11:19
names(dat) <- letters[c(6:3,2:4,8,3)]
## Here the names are not unique.
## Thus, the values can be binned by their (non-unique) names and a representative values calculated.
## Let's make a 'datUniq' with the mean of each group of values :
datUniq <- round(tapply(dat, names(dat), mean),1)
## now we propagate the mean values to the full vector
getValuesByUnique(dat, datUniq)
cbind(ini=dat,firstOfRep=getValuesByUnique(dat, datUniq),
indexUniq=getValuesByUnique(dat, datUniq, asIn=TRUE))
```
### Regrouping Simultaneaously by Two Factors
For example, if you wish to create group-labels considering the eye- and hair-color of a small group students (supposed a sort of controlled vocabulary was used),
the function `combineByEitherFactor()` will help. So basically, this is an empiric segmentation-approach for two categorical variables.
Please note, that with large data-sets and very disperse data this approach will not provide great results.
In the example below we'll attempt to 'cluster' according to columns _nn_ and _qq_, the resultant cluster number can be found in column _grp_.
```{r combineByEitherFactor, echo=TRUE}
nn <- rep(c("a","e","b","c","d","g","f"),c(3,1,2,2,1,2,1))
qq <- rep(c("m","n","p","o","q"),c(2,1,1,4,4))
nq <- cbind(nn,qq)[c(4,2,9,11,6,10,7,3,5,1,12,8),]
## Here we consider 2 columns 'nn' and 'qq' whe trying to regroup common values
## (eg value 'a' from column 'nn' and value 'o' from 'qq')
combineByEitherFactor(nq, 1, 2, nBy=FALSE)
```
The argument _nBy_ simply allows adding an additional column with the group/cluster-number.
```{r combineByEitherFactor2, echo=TRUE}
## the same, but including n by group/cluster
combineByEitherFactor(nq, 1, 2, nBy=TRUE)
## Not running further iterations works faster, but you may not reach 'convergence' immediately
combineByEitherFactor(nq,1, 2, nBy=FALSE)
```
```{r combineByEitherFactor3, echo=TRUE}
## another example
mm <- rep(c("a","b","c","d","e"), c(3,4,2,3,1))
pp <- rep(c("m","n","o","p","q"), c(2,2,2,2,5))
combineByEitherFactor(cbind(mm,pp), 1, 2, con=FALSE, nBy=TRUE)
```
### Batch Replacing Of Values Or Character-Strings
The function `multiCharReplace()` facilitates multiple replacements in a vector, matrix or data.frame.
```{r multiCharReplace1, echo=TRUE}
# replace character content
x1 <- c("ab","bc","cd","efg","ghj")
multiCharReplace(x1, cbind(old=c("bc","efg"), new=c("BBCC","EF")))
# works also on matrix and/or to replace numeric content :
x3 <- matrix(11:16, ncol=2)
multiCharReplace(x3, cbind(12:13,112:113))
```
Sometimes data get imported using different encoding for what should be interpreted as _FALSE_ and _TRUE_ :
```{r multiCharReplace2, echo=TRUE}
# replace and return logical vactor
x2 <- c("High","n/a","High","High","Low")
multiCharReplace(x2,cbind(old=c("n/a","Low","High"), new=c(NA,FALSE,TRUE)), convTo="logical")
```
### Multi-to-multi Matching Of (Concatenated) Terms
The function allows to split (if necessary, using _strsplit()_) two vectors and compare each isolated tag (eg identifyer) from the 1st vector/object against each isolated tag from the second vector/object. This runs like a loop of one to many comparisons. The basic output is a list with indexes of which element of the 1st vector/object has matches in the 2nd vector/object. Since this is not convenient to the human reader, tabular output can be created, too.
```{r multiMatch1, echo=TRUE}
aa <- c("m","k","j; aa","m; aa; bb; o","n; dd","aa","cc")
bb <- c("aa","dd","aa; bb; q","p; cc")
## result as list of indexes
(bOnA <- multiMatch(aa, bb, method="asIndex")) # match bb on aa
## more convenient to the human reader
(bOnA <- multiMatch(aa, bb)) # match bb on aa
(bOnA <- multiMatch(aa, bb, method="matchedL")) # match bb on aa
```
### Comparing Global Patterns
In most programming languages it is fairly easy to compare _exact_ content of character vectors or factors with unordered levels.
However, sometimes - due to semantic issues - some people may call a color 'purple' while others call it 'violet'.
Thus, without using controled vocabulary the _exact_ terms may vary.
Here, let's address the case, where no dictionaries of controled vocabulary are available for substituting equivalent terms.
Thus, we'll compare 4 vectors of equal length and check if the words/letters used could be substituted to result in the first vector.
Vectors _aa_ and _ab_ have the same global pattern, ie after repeating a word twice it moves to another word.
Vectors _ac_ and _ad_ have different general patterns, either with alternating words or falling back to a word previsously used.
Based and extended on a post on stackoverflow [https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/](https://stackoverflow.com/questions/71353218/extracting-flexible-general-patterns/) :
```{r compGlobPat1, echo=TRUE}
aa <- letters[rep(c(3:1,4), each=2)]
ab <- letters[rep(c(5,8:6), each=2)] # 'same general' pattern to aa
ac <- letters[c(1:2,1:3,3:4,4)] # NOT 'same general' pattern to any other
ad <- letters[c(6:8,8:6,7:6)] # NOT 'same general' pattern to any other
```
The basic pattern can be extracted combining match() and unique():
```{r compGlobPat2, echo=TRUE}
## get global patterns
cbind(aa= match(aa, unique(aa)),
ab= match(ab, unique(ab)),
ac= match(ac, unique(ac)),
ad= match(ad, unique(ad)) )
```
Let's make a data.frame with the annotation toy-data from above.
Each line is supposed to represent a sample, and the columns show different aspects of annotation.
```{r compGlobPat3, echo=TRUE}
bb <- data.frame(ind=1:length(aa), a=aa, b=ab, c=ac, d=ad)
```
Via the function `replicateStructure()` is it possible to compare annotation as different columns for equivalent global patterns.
By default, this function excludes all columns not designating any replicates, like the numbers in the first column ($ind).
Also it will try to find the column with the median number of levels, when comparing to all other columns.
The output is a list with *\$col* inidicating which column(s) may be used, *\$lev* for the correpsonding global pattern, *\$meth* for the method finally used and
_\$allCols_ for documenting the global pattern in each column (whether it was selected or not).
```{r compGlobPat4, echo=TRUE}
replicateStructure(bb)
```
Besides, it is also possible to combine all columns if one considers they contribute complementary substructures of the overal annotation.
```{r compGlobPat5, echo=TRUE}
replicateStructure(bb, method="combAll")
```
However, when combining multiple columns it may happen -like in the example above- that finally no more lines remain being considered as replicates.
This can also be found when one column describes the groups and another gives the order of the replicates therein.
However, for calling a (standard) statistical test it may be necessary exclude these replicate-numbers to designate the groups of replicates.
To overcome the problem of loosing the understanding of replicate-structure when combining all factors, it is possible to look for non-orthogonal structures,
ie to try excluding columns which (after combining) would suggest no replicates after combining all columns.
See the example below :
```{r compGlobPat6, echo=TRUE}
replicateStructure(bb, method="combNonOrth")
```
## Search For Similar (Numeric) Values {#SearchForSimilarNumericValues}
This section addresses values that are not truly _identical_ but may differ only in the very last digit(s)
and thus may be in a pragmatic view get considered and treated as 'about the same'.
The simplest approach would be to round values and then look for identical values.
The functions presented here (like `checkSimValueInSer()`) offer this type of search in a convenient way.
Of course the user must define a threshold for how similar may retained as positive (in the the logical vector returned).
With the function _checkSimValueInSer()_ this threshod must be given as [ppm](https://simple.wikipedia.org/wiki/Parts_per_million) (parts per million).
```{r checkSimValueInSer, echo=TRUE}
va1 <- c(4:7,7,7,7,7,8:10) + (1:11)/28600
checkSimValueInSer(va1, ppm=5)
data.frame(va=sort(va1), simil=checkSimValueInSer(va1))
```
### Find Similar Numeric Values Of Two Columns Of A Matrix
The search for similar values may be preformed as absolute distance or as 'ppm' (as it is eg usual in proteomics when comparing measured and theoretically expected mass).
```{r findCloseMatch1, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,15.9,13.5,15.7,14.1,5)
(cloMa <- findCloseMatch(x=aA, y=cC, com="diff", lim=0.5, sor=FALSE))
```
The result of _findCloseMatch()_ is a list organized by each 'x', telling all instances of 'y' found within the distance tolerance given by _lim_.
Using `closeMatchMatrix()` the result obtained above, can be presented in a more convenient format for the human eye.
```{r closeMatchMatrix1, echo=TRUE}
# all matches (of 2d arg) to/within limit for each of 1st arg ('x'); 'y' ..to 2nd arg = cC
# first let's display only one single closest/best hit
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=TRUE)) #
```
Using the argument _limitToBest=FALSE_ we can display all distances within the limits imposed, some values/points may occur multiple times.
For example, value number 4 of 'cC' (=12.5) or value number 3 of 'aA' (=13) now occur multiple times...
```{r closeMatchMatrix2, echo=TRUE}
(maAa <- closeMatchMatrix(cloMa, aA, cC, lim=FALSE,origN=TRUE)) #
(maAa <- closeMatchMatrix(cloMa, cbind(valA=81:87, aA), cbind(valC=91:99, cC), colM=2,
colP=2, lim=FALSE))
(maAa <- closeMatchMatrix(cloMa, cbind(aA,valA=81:87), cC, lim=FALSE, deb=TRUE)) #
a2 <- aA; names(a2) <- letters[1:length(a2)]; c2 <- cC; names(c2) <- letters[10 +1:length(c2)]
(cloM2 <- findCloseMatch(x=a2, y=c2, com="diff", lim=0.5, sor=FALSE))
(maA2 <- closeMatchMatrix(cloM2, predM=cbind(valA=81:87, a2),
measM=cbind(valC=91:99, c2), colM=2, colP=2, lim=FALSE, asData=TRUE))
(maA2 <- closeMatchMatrix(cloM2, cbind(id=names(a2), valA=81:87,a2), cbind(id=names(c2),
valC=91:99,c2), colM=3, colP=3, lim=FALSE, deb=FALSE))
```
### Find Similar Numeric Values From Two Vectors/Matrixes
For comparing two sets of data one may use `findSimilarFrom2sets()`.
```{r findSimilFrom2sets, echo=TRUE}
aA <- c(11:17); bB <- c(12.001,13.999); cC <- c(16.2,8,9,12.5,12.6,15.9,14.1)
aZ <- matrix(c(aA,aA+20), ncol=2, dimnames=list(letters[1:length(aA)],c("aaA","aZ")))
cZ <- matrix(c(cC,cC+20), ncol=2, dimnames=list(letters[1:length(cC)],c("ccC","cZ")))
findCloseMatch(cC, aA, com="diff", lim=0.5, sor=FALSE)
findSimilFrom2sets(aA, cC)
findSimilFrom2sets(cC, aA)
findSimilFrom2sets(aA, cC, best=FALSE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=5e4, deb=TRUE)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=FALSE)
# below: find fewer 'best matches' since search window larger (ie more good hits compete !)
findSimilFrom2sets(aA, cC, comp="ppm", lim=9e4, bestO=TRUE)
```
### Fuse Previously Identified Pairs To 'Clusters'
When you have already identified the closest neighbour of a set of values, you may want to
re-organize/fuse such pairs to a given number of total clusters (using `fusePairs()`).
```{r fusePairs, echo=TRUE}
(daPa <- matrix(c(1:5,8,2:6,9), ncol=2))
fusePairs(daPa, maxFuse=4)
```
### Eliminate Close (Overlapping) Points (In Bivariate x & y Space)
When visualizing larger data-sets in an x&y space one may find many points overlapping when their values are almost the same.
The function `elimCloseCoord()` aims to do reduce a bivariate data-set to 'non-overlapping' points, somehow similar to human perception.
```{r elimCloseCoord1, echo=TRUE}
da1 <- matrix(c(rep(0:4,5),0.01,1.1,2.04,3.07,4.5), ncol=2); da1[,1] <- da1[,1]*99; head(da1)
elimCloseCoord(da1)
```
### Mode Of (Continuous) Data
Looking for the _mode_ is rather easy with counting data, the result of _table()_ will get you there quickly.
However, with continuous data the mode may be more tricky to defne and identify.
Intuitively most people consider the mode asthe peak of a density estimation (which remains to be defined and estimated).
With continuous data most frequent (precise) value may be quite different/distant to the most dense region of data.
The function `stableMode()` presented here has different modes of operation, at this point there is no clear rule which mode may perform most satisfactory in different situations.
```{r stableMode, echo=TRUE}
set.seed(2012); dat <- round(c(rnorm(120,0,1.2), rnorm(80,0.8,0.6), rnorm(25,-0.6,0.05), runif(200)),3)
dat <- dat[which(dat > -2 & dat <2)]
stableMode(dat)
```
Now we can try to show on a plot :
```{r stableMode2, fig.height=8, fig.width=9, fig.align="center", echo=TRUE}
layout(1:2)
plot(1:length(dat), sort(dat), type="l", main="Sorted Values", xlab="rank", las=1)
abline(h=stableMode(dat, silent=TRUE), lty=2,col=2)
legend("topleft",c("stableMode"), text.col=2, col=2, lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
plot(density(dat, kernel="gaussian", adjust=0.7), xlab="Value of dat", main="Density Estimate Plot")
useCol <- c("red","green","blue","grey55")
legend("topleft",c("dens","binning","BBmisc","allModes"), text.col=useCol, col=useCol,
lty=2, lwd=1, seg.len=1.2, cex=0.8, xjust=0, yjust=0.5)
abline(v=stableMode(dat, method="dens", silent=TRUE), lty=2, col="red", lwd=2)
abline(v=stableMode(dat, method="binning", silent=TRUE), lty=2, col="green")
abline(v=stableMode(dat, method="BBmisc", silent=TRUE), lty=2, col="blue")
abline(v=stableMode(dat, method="allModes"), lty=2, col="grey55")
```
Please note, that plotting data modelled via a Kernell function (as above) also relies on strong hypothesis which may not be well justified in a number of cases !
For this reason, the _sorted values_ were plotted, too.
As you can see from this example above, looking for the most frequent exact value may not be a perfect choice for continous data.
In this example the method _'allModes'_ (ie the multiple instances of most frequent exact values) gave partially usable results (dashed grey lines), due to the rounding to 3 digits.
As you can see in the example above, the method _'allModes'_ may give multiple ties !
More rounding will make to data more discrete and ultimately ressemble cunting data. However, with rounding some of the finer resolution/details will get lost.
### Most Frequently Occuring Value (traditional mode)
The function `stableMode()` can also be used to locate _the_ most frequently occuring exact value of numeric or character vectors.
As we just saw at the end of the previous example, the argument _method="allModes"_ allows finding all ties (if present).
```{r stableMode3, echo=TRUE}
set.seed(2021)
x <- sample(letters, 50000, replace=TRUE)
stableMode(dat, method="mode")
stableMode(dat, method="allModes")
```
## Text-Manipulations {#Text-Manipulations}
There are several packages offering interesting functions for manipulating text. Here are a few functions to complement these.
### Trimming Redundant Text
Automatic annotation has the tendency to concatenate many parameters into a single names.
The function `trimRedundText()` was designed to allow trimming redundant text from left and/or right side of a character-vector
(when the same portion of text appears in _each_ element).
However, as in some cases (like the first element of the example below) nothing would remain, it is possible to define a _minimum_ width for the remaining/resulting text.
```{r trimRedundText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1)
```
### Extract Common Part Of Text
The original idea was to do something resembling the inverse process of trimming redundant text (example above), but this time to discard the variable text.
In the end this is not as trivial when 'common' or 'redundant' text is not at the beginning or end of a chain of characters.
In particular with very large text this is an active field of research (eg for sequence alignment).
The function presented here is a very light-weight solution designed for smaller and simple settings, like inspecting column-names.
Furthermore, the function `keepCommonText()` only reports the first (longest) hit.
So, when there are multiple conserved 'words' of equal length, only the first of them will be identified.
When setting the argument 'hiResol=FALSE' this function has an option to decrease the resulution of searching, which in turn increases the speed, howevere, at cost of missing the optimal solution.
In this case the resultant chain of characters should be inspected if it can be further extended/optimized.
With terminal common text :
```{r keepCommonText1, echo=TRUE}
txt1 <- c("abcd","abcde","abcdefg","abcdE",NA,"abcdEF")
trimRedundText(txt1, side="left") # remove redundant
keepCommonText(txt1, side="terminal") # keep redundant
keepCommonText(txt1, side="center") # computationally easier
```
With internal coomon text:
```{r keepCommonText2, echo=TRUE}
txt2 <- c("abcd_abc_kjh", "bcd_abc123", "cd_abc_po")
keepCommonText(txt2, side="center")
```
### Manipulating Enumerator-Extensions
Human operators may have many ways to write enumerators like 'xx_sample_1', 'xx_Sample_2', 'xx_s3', 'xx_4', etc.
Many times you may find such text as names or column-names for measures underneith.
The functions presented below will work only if _consistent numerators_, ie (text +) digit-character(s) are at the end of all character-strings to be treated.
Please note, that with large vectors testing/checking a larger panel of enumerator-abreviations may result in slower performance.
In cases of such larger data-sets it may be more effective to first study the data and then run simple subsitions using _sub()_ targeted for this very case.
#### Remove/Modify Enumerators
The aim of this function consists in identifying a _common_ pattern for terminal enumeratos (ie at end of words/character strings) and to subsequently modify or remove them.
As separator-symbols and separator-words are given indedently all combinations thereof may be tested.
Furthermore the user has the choice to (automatically) all truncated versions of separator-words (eg _Sam_ instead of _Sample_).
As basic setting `rmEnumeratorName()` allows to identify and then modify a _common_ terminal enumerator from all elements of a character string :
```{r rmEnumeratorName1, echo=TRUE}
xx <- c("hg_Re1","hjRe2_Re2","hk-Re3_Re33")
rmEnumeratorName(xx)
rmEnumeratorName(xx, newSep="--")
rmEnumeratorName(xx, incl="anyCase")
```
Furthermore, this function allows scanning a matrix of text-data and to perform similar operations to the _first_ column found containing a _common_ terminal enumerator.
```{r rmEnumeratorName2, echo=TRUE}
xy <- cbind(a=11:13, b=c("11#11","2_No2","333_samp333"), c=xx)
rmEnumeratorName(xy)
rmEnumeratorName(xy,incl=c("anyCase","trim2","rmEnumL"))
```
If you which to remove/subsitute mutiple types of enumerators the function \code{rmEnumeratorName} must be run independently, see last example below.
```{r rmEnumeratorName3, echo=TRUE}
xz <- cbind(a=11:13, b=c("23#11","4#2","567#333"), c=xx)
apply(xz, 2, rmEnumeratorName, sepEnum=c("","_"), newSep="_", silent=TRUE)
```
#### Unify Enumerators
The (slightly older) function `unifyEnumerator()` offers less options, in particular the potential separator-words must be given explicitly, only lower/upper-case may be kept flexible.
```{r unifyEnumerator1, echo=TRUE}
unifyEnumerator(c("ab-1","ab-2","c-3"))
unifyEnumerator(c("ab-R1","ab-R2","c-R3"))
unifyEnumerator(c("ab-1","c3-2","dR3"), stringentMatch=FALSE)
```
### Adjust Decimal Prefixes And Extact Numeric+Unit Part
The function `adjustUnitPrefix()` provides help extracting the numeric part of character vectors and allows adjusting to a single million-unit type.
This can be used to convert a vector of mixed prefixes like 'z','a','f','p','n','u' and 'm' (note: the 'u' is used for 'micro').
The output is a numeric vector with numeric+unit as names.
```{r adjustUnitPrefix1, echo=TRUE}
adjustUnitPrefix(c("10.psec","2 fsec"), unit="sec")
```
In the example below you can see that additional text (to the right of the digit+unit) get's stripped off.
```{r adjustUnitPrefix2, echo=TRUE}
adjustUnitPrefix(c("10.psec abc","2 fsec etc"), unit="sec")
```
### Merging Multiple Named Vectors To Matrix
The function `mergeVectors()` allows merging for multiple named vectors (each element needs to be named).
Basically, all elements carrying the same name across different input-vectors will be aligned in the same column of the output (input-vectors appear as lines).
Different to _merge()_ which allows merging only 2 data.frames, here multiple vectors may be merge at once.
```{r mergeVectors1, echo=TRUE}
x1 <- c(a=1, b=11, c=21)
x2 <- c(b=12, c=22, a=2)
x3 <- c(a=3, d=43)
mergeVectors(vect1=x1, vect2=x2, vect3=x3)
```
```{r mergeVectors2, echo=TRUE}
mergeVectors(vect1=x1, vect2=x2, vect3=x3, inclInfo=TRUE) # return list with additional info
```
In the example below we'll add another vector _without_ named elements.
As you can see a message tells the this vector was been ignored/omitted.
```{r mergeVectors3, echo=TRUE}
x4 <- 41:44 # no names - not conform for merging and will be ignored
mergeVectors(x1, x2, x3, x4)
```
### Match All Lines of Matrix To Reference Note
This function allows adjusting the order of lines of a matrix \code{mat} to a reference character-vector \code{ref},
even when initial direct matching of character-strings using \code{match} is not possible/successful.
In this case, various variants of using \code{grep} will be used to see if unambiguous matching is possible of characteristic parts of the text.
All columns of \code{mat} will be tested an the column giving the bes resuts will be used.
```{r matchMatrixLinesToRef1, echo=TRUE}
## Note : columns b and e allow non-ambigous match, not all elements of e are present in a
mat0 <- cbind(a=c("mvvk","axxd","bxxd","vv"),b=c("iwwy","iyyu","kvvh","gxx"), c=rep(9,4),
d=c("hgf","hgf","vxc","nvnn"), e=c("_vv_","_ww_","_xx_","_yy_"))
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[,5])
matchMatrixLinesToRef(mat0[,1:4], ref=mat0[1:3,5], inclInfo=TRUE)
matchMatrixLinesToRef(mat0[,-2], ref=mat0[,2], inclInfo=TRUE) # needs 'reverse grep'
```
### Order Matrix According To Reference
The function `orderMatrToRef()` has the aim of facilitating brining a matrix of text/data in the order of a given reference (character vector).
This function will try all columns of the input-matrix to see which gives the best coverage/ highest number of matches to the reference.
If no hits are found, this function will try by partial matching (using _grep()_) all entries of the reference and vice-versa all entries of the matrix.
```{r orderMatrToRef1, echo=TRUE}
mat1 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], rep(1:5)), ncol=3)
orderMatrToRef(mat1, paste0(letters[c(3,4,5,3,4)],c(1,3,5,2,4)))
mat2 <- matrix(paste0("__",letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3:5 )), ncol=3)
orderMatrToRef(mat2, paste0(letters[c(3,4,5,3,4)],c(1,3,5,1,4)))
mat3 <- matrix(paste0(letters[rep(c(1,1,2,2,3),3) +rep(0:2,each=5)], c(rep(1:5,2),1,1,3,3,5 )), ncol=3)
orderMatrToRef(mat3, paste0("__",letters[c(3,4,5,3,4)],c(1,3,5,1,3)))
```
### Value Matching With Option For Concatenated Terms
Sometimes we need to match terms in concatenated tables.
The function `concatMatch()` was designed to behave similar to _match()_ but also allowing to serach among concatenated terms and some further text-simplifications.
```{r concatMatch1, echo=TRUE}
## simple example without concatenations or text-extensions
x0 <- c("ZZ","YY","AA","BB","DD","CC","D")
tab0 <- c("AA","BB,E","CC","FF,U")
match(x0, tab0)
concatMatch(x0, tab0) # same result as match(), but with names
## now let's construct somthing similar but with concatenations and text-extensions
x1 <- c("ZZ","YY","AA","BB-2","DD","CCdef","Dxy") # modif of single ID (no concat)
tab1 <- c("AA","WW,Vde,BB-5,E","CCab","FF,Uef")
match(x1, tab1) # match finds only the 'simplest' case (ie "AA")
concatMatch(x1, tab1) # finds all hits as in example above
x2 <- c("ZZ,Z","YY,Y","AA,Z,Y","BB-2","DD","X,CCdef","Dxy") # conatenated in 'x'
tab2 <- c("AA","WW,Vde,BB-5,E","CCab,WW","FF,UU")
concatMatch(x2, tab2) # concatenation in both 'x' and 'table'
```
### Check for (Strict) Order
Thi function `checkStrictOrder()` was designed to scan each line of an (numeric) input matrix for up- down- or equal-development, ie the chang to the next value on the right.
For example when working with a matrix of with 4 columns one can look 3 times a the neighbour value following to the right (in the same line), thus the output will mention 3 events (for each line).
If _all counts_ are 'up' and 0 counts are 'down' or 'eq', the line follows a permanently increase (not necessarily linear), etc.
In some automated procedures (where the numer of columns of initial input may vary) it may be easier to test if any 0 occur.
For this reason the argument _invertCount_ was introduced, in this case a line with a '0' occurring characterizes a constant behaviour (for the respective column).
```{r checkStrictOrder1, echo=TRUE}
set.seed(2005); mat1 <- rbind(matrix(round(runif(40),1),nc=4), rep(1,4))
head(mat1)
checkStrictOrder(mat1); mat1[which(checkStrictOrder(mat1)[,2]==0),]
```
A slightly more general way of testing can be done using `checkGrpOrder()`. Here, simlpy a logical value will produced for each line of input indicating if there is constant behaviour.
When the argument _revRank=TRUE_ (default) constant up- or constant down-characteristics will be tested
```{r checkGrpOrder1, echo=TRUE}
head(mat1)
checkGrpOrder(mat1)
checkGrpOrder(mat1, revRank=FALSE) # only constant 'up' tested
```
## Working With Regressions {#WorkingWithRegressions}
### Best Starting Point For Linear Regressions (Start of linearity)
In many types of measurments the very low level measures are delicate.
Especially when the readout starts with a baseline signal before increasing amounts of the analyte start producing a linear relationship.
In such cases some of the very lowest levels of the analyte are masked by the (random) baseline signal.
The function `linModelSelect()` presented here allows omitting some of the lowest analyte measures to focus on the linear part of the dose-response relationship.
```{r linModelSelect1, echo=TRUE}
li1 <- rep(c(4,3,3:6), each=3) + round(runif(18)/5,2)
names(li1) <- paste0(rep(letters[1:5], each=3), rep(1:3,6))
li2 <- rep(c(6,3:7), each=3) + round(runif(18)/5, 2)
dat2 <- rbind(P1=li1, P2=li2)
exp2 <- rep(c(11:16), each=3)
exp4 <- rep(c(3,10,30,100,300,1000), each=3)
## Check & plot for linear model
linModelSelect("P1", dat2, expect=exp2)
linModelSelect("P2", dat2, expect=exp2)
```
This function was designed for use with rather small data-sets with no (or very few) measures of base-line.
When larger panels of data ara available, it may be better to first define a confidence interval for the base-line measurement
and then only to consider points outside this confidence interval for regressing dose-response relationships
(see also [Detection limit](https://en.wikipedia.org/wiki/Detection_limit)).
### High Throughput Testing For Linear Regressions
Once we have run multiple linear regressions on differt parts of the data we might wat to compare them in a single plot.
Below, we construct 10 series of data that get modeled the same way, ideally one would obtain a slope close to 1.0.
We still allow omitting some starting points, if the resulting model would fit better.
```{r plotLinModelCoef1, echo=TRUE}
set.seed(2020)
x1 <- matrix(rep(c(2,2:5),each=20) + runif(100) +rep(c(0,0.5,2:3,5),20),
byrow=FALSE, ncol=10, dimnames=list(LETTERS[1:10],NULL))
## just the 1st regression :
summary(lm(b~a, data=data.frame(b=x1[,1], a=rep(1:5,each=2))))
## all regressions
x1.lmSum <- t(sapply(lapply(rownames(x1), linModelSelect, dat=x1,
expect=rep(1:5,each=2), silent=TRUE, plotGraph=FALSE),
function(x) c(x$coef[2,c(4,1)], startFr=x$startLev)))
x1.lmSum <- cbind(x1.lmSum, medQuantity=apply(x1,1,median))
x1.lmSum[,1] <- log10(x1.lmSum[,1])
head(x1.lmSum)
```
Now we can try to plot :
```{r plotLinModelCoef2, echo=TRUE}
wrGraphOK <- requireNamespace("wrGraph", quietly=TRUE) # check if package is available
if(wrGraphOK) wrGraph::plotW2Leg(x1.lmSum, useCol=c("Pr(>|t|)","Estimate","medQuantity","startFr"),
legendloc="topleft", txtLegend="start at")
```
## Combinatorics Issues {#CombinatoricsIssues}
### All Pairwise Ratios
`ratioAllComb()` calculates all possible pairwise ratios between all individual calues of x and y.
```{r ratioAllComb0, echo=TRUE}
set.seed(2014); ra1 <- c(rnorm(9,2,1), runif(8,1,2))
```
Let's assume there are 2 parts of 'x' for which we would like to know the representative ratio :
The ratio of medians does not well reflect the typical ratio (if each element has the same chance to be picked).
```{r ratioAllComb1, echo=TRUE}
median(ra1[1:9]) / median(ra1[10:17])
```
Instead, we'll build all possible ratios and summarize then.
```{r ratioAllComb2, echo=TRUE}
summary( ratioAllComb(ra1[1:9], ra1[10:17]))
boxplot(list(norm=ra1[1:9], unif=ra1[10:17], rat=ratioAllComb(ra1[1:9],ra1[10:17])))
```
### Count Frequency Of Terms Combined From Different Drawings (combineAsN)
The main idea of this function is to count frequency of terms when combining different drawings.
Suppose, you are asking students for their prefered hobbies.
Now, you want to know how many terms will occur in common in groups of 3 students.
In the example below, simple letters are shown instead of names of hobbies ...
In the simplest way of using `combineAsN()` does something similar to _table_ :
Here we're looking at the full combinatorics of making groups of _nCombin_ students and let's count the frequency of terms found 3 times identical, 2 times or only once (ie not cited by the others).
In case multiple groups of _nCombin_ students can be formed, the average of the counts, standard error of the mean (sem), 95% confidence interval (CI) and sd aregiven to resume the results.
```{r combineAsN1, echo=TRUE}
tm1 <- list(a1=LETTERS[1:7], a2=LETTERS[3:9], a3=LETTERS[6:10], a4=LETTERS[8:12])
combineAsN(tm1, nCombin=3, lev=gl(1,4))[,1,]
```
One may imagine that different locations/coties/countries will give different results.
Thus, we'll declare the different origins/location using the _lev_ argument.
Now, this function focusses (by default) on combinations of students from _nCombin_ different origins/location and
counts how many hobbies were mentioned as all different ('sing', ie number of hobbies only one student mentioned),
single repeat ('doub') or three times repeated ('trip'), plus minumum twice or 'any' (ie number of hobies citied no matter how many repeats).
The output is an array, the 3rd dimension contains the counts, fllowed by sem, CI and sd.
```{r combineAsN2, echo=TRUE}
## different levels/groups in list-elements
tm4 <- list(a1=LETTERS[1:15], a2=LETTERS[3:16], a3=LETTERS[6:17], a4=LETTERS[8:19],
b1=LETTERS[5:19], b2=LETTERS[7:20], b3=LETTERS[11:24], b4=LETTERS[13:25], c1=LETTERS[17:26],
d1=LETTERS[4:12], d2=LETTERS[5:11], d3=LETTERS[6:12], e1=LETTERS[7:10])
te4 <- combineAsN(tm4, nCombin=4, lev=substr(names(tm4),1,1))
str(te4)
te4[,,1] # the counts part only
```
## Import/Export
### Batch-Reading Of CSV Files
Some software do produce a series of csv files, where a large experiment/data-set get recorded as multiple files.
The function `readCsvBatch()` was designed for reading multiple csv files of exactly the same layout and to join their content.
As output a list with the content of each file can be produced (one matrix per file), or the data may be fused into an array, as shown below.
```{r readCsvBatch, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("pl01_1.csv","pl01_2.csv","pl02_1.csv","pl02_2.csv")
datAll <- readCsvBatch(fiNa, path1, silent=TRUE)
str(datAll)
```
When setting the first argument _fileNames_ to _NULL_, you can read all files of a given path.
```{r readCsvBatch2, echo=TRUE}
## batch reading of all csv files in specified path :
datAll2 <- readCsvBatch(fileNames=NULL, path=path1, silent=TRUE)
str(datAll2)
```
### Batch-Reading Of Tabulated Files
The function `readTabulatedBatch()` allows fast batch reading of tabulated files.
All files specified (or all files from a given directory) will be read into separate data.frames of a list.
Default options are US-style comma, automatic testing for head in case the package _data.table_ is available (otheriwse : no header).
Furthermore it is possible to design a given (numeric) column and directly filter for all lines passing a given threshold, allowing to get smaller objects.
```{r readTabulatedBatch1, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- c("a1.txt","a2.txt")
allTxt <- readTabulatedBatch(fiNa, path1)
str(allTxt)
```
### Reading Incomplete Tables
Sometimes were may get confronted with data which look like 'incomplete' tables.
In such cases some rows do not contain as many elements/columns as other columns.
Files with this type of data may pose a problem for `read.table()` (from the _utils_ package).
In some cases using the argument _fill=TRUE_ may allow to overcome this problem.
The function _readVarColumns()_ (from this package) was designed to provide better help in such odd cases.
Basically, each line is read and parsed separately, the user should check/decide on the separator to be used.
The example below lists people's names in different locations, some locations have more persons ...
Sometimes exporting such data will generate shorter lines in locations with fewer elements (here 'London') and no additional separators will get added (to mark all empty fields) towards the end.
The function `readVarColumns()` (from this package) provides help to read such data, if the content (and separators) of the last columns are missing.
```{r readVarColumns, echo=TRUE}
path1 <- system.file("extdata", package="wrMisc")
fiNa <- "Names1.tsv"
datAll <- readVarColumns(fiName=file.path(path1,fiNa), sep="\t")
str(datAll)
```
In this example _readVarColumns()_ would give a warning (and column-names are not recognized), if you use the argument _header=TRUE_ you'll get an error and nothing gets read.
### Converting Url For Reading Tabulated Data From GitHub
[GitHub](https://github.com/) allows sharing code and (to a lower degree) data.
In order to properly read tabulated (txt, tsv or csv) data directly from a given url, the user should switch to the 'Raw' view.
The function `gitDataUrl()` allows to conventiently switch any url (on git) to the format from 'Raw view', suitable for directly reading the data using _read.delim()_ , _read.table()_ or _read.csv()_ etc ...).
```{r readGit1, echo=TRUE}
## An example url with tabulated data :
url1 <- "https://github.com/bigbio/proteomics-metadata-standard/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv"
gitDataUrl(url1)
```
The example below shows how this is used in the function _readSampleMetaData()_ in [wrProteo](https://CRAN.R-project.org/package=wrProteo).
```{r readGit2, echo=TRUE}
dataPxd <- try(read.delim(gitDataUrl(url1), sep='\t', header=TRUE))
str(dataPxd)
```
---
## Normalization {#Normalization}
The main reason of normalization is to remove variability in the data which is not directly linked to the (original/biological) concept of a given experiment.
High throughput data from real world measurements may easily contain various deformations due to technical reasons, eg slight temperature variations, electromagnetic interference, instability of reagents etc.
In particular, transferring constant amounts of liquids/reagents in highly repeated steps over large experiments is often also very challenging, small variations of the amounts of liquid (or similar) are typically addressed by normalization. However, applying aggressive normalization to the data also brings considerable risk of starting to loose some of the effects one intended to study.
At some point it may rather be better to eliminate a few samples or branches of an experiment to avoid too invasive intervention. This shows that quality control can be tightly linked to decisions about data-normalization.
In conclusion, normalization may be far more challenging than simply running some algorithms..
In general, the use has to assume/define some hypothesis to justify intervention.
Sometimes specific elements of an experiment are known to be not affected and can therefore be used to normalize the rest.
Eg, if you observe growth of trees in a forest, big blocks of rock on the floor are assumed no to change their location.
So one could use them as alignment-marks to superpose pictures taken at slightly different positions.
The hypothesis of no global changes is very common : During the course of many biological experiments (eg change of nutrient) one
assumes that only a small portion of the elements measured (eg the abundance of all different gene-products) do change,
since many processes of a living cell like growth, replication and interaction with neighbour-cells are assumed not to be affected.
So, if one assumes that there are no global changes one normalizes the input-data in a way that the average or median across each experiment will give the same value.
In analogy, if one takes photographs on a partially cloudy day, most cameras will adjust light settings (sun r clouds) so that global luminosity stays the same.
However, if too many of the measured elements are affected, this normalization approach will lead to (additional) loss of information.
It is _essential_ to understand the type of deformation(s) data may suffer from in order to choose the appropriate approacges for normalization.
Of course, graphical representations ([PCA](https://en.wikipedia.org/wiki/Principal_component_analysis), [MA-plots](https://en.wikipedia.org/wiki/MA_plot), etc) are extremely important to identifying abnormalities and potential problems.
The package [wrGraph](https://CRAN.R-project.org/package=wrGraph) offers also complementary options useful in the context of normalization.
Again, graphical representation(s) of the data help to visualize how different normalization procedures affect outcomes.
Before jumping into normalization it may be quite useful to _filter_ the data first.
The overall idea is, that most high-throughput experiments do produce some non-meaningful data (artefacts) and it may be wise to remove such 'bad' data
first, as they may effect normalization (in particular _extreme values_).
A special case of problematic data concerns _NA_-values.
### Filter Lines Of Matrix To Reduce Content Of NAs
Frequent _NA_-values may represent another potential issue. With NA-values there is no general optimal advice.
To get started, you should try to investigate how and why NA-values occurred to check if there is a special 'meaning' to them.
For example, on some measurement systems values below detection limit may be simply reported as NAs.
If the lines of your data represent different features quantified (eg proteins), than lines with mostly NA-values represent features
that may not be well exploited anyway. Therefore many times one tries to filter away lines of 'bad' data.
Of course, if there is a column (sample) with an extremely high content of NAs, one should also investigate what might be particular
with this column (sample), to see if one might be better of to eliminate the entire column.
Please note, that imputing _NA_-values represents another option instead of filtering and removing, multiple other packages address this in detail, too.
All decisions of which approach to use should be data-driven.
#### Filter For Each Group Of Columns For Sufficient Data As Non-NA
Filter for each group of columns for sufficient data as non-NA
The function `presenceGrpFilt()` allows to
```{r presenceGrpFilt1, echo=TRUE}
dat1 <- matrix(1:56,ncol=7)
dat1[c(2,3,4,5,6,10,12,18,19,20,22,23,26,27,28,30,31,34,38,39,50,54)] <- NA
grp1 <- gl(3,3)[-(3:4)]
dat1
## now let's filter
presenceGrpFilt(dat1, gr=grp1, presThr=0.75) # stringent
presenceGrpFilt(dat1, gr=grp1, presThr=0.25) # less stringent
```
#### Filter As Separate Pairwise Groups Of Samples
If you want to use your data in a pair-wise view (like running t-tests on each line) the function `presenceFilt()`
allows to eliminate lines containing too many _NA_-values for each pair-wise combination of the groups/levles.
```{r presenceFilt, echo=TRUE}
presenceFilt(dat1, gr=grp1, maxGr=1, ratM=0.1)
presenceFilt(dat1, gr=grp1, maxGr=2, rat=0.5)
```
#### Cleaning Replicates
This procedures aims to remove (by setting to as _NA_) the most extreme of noisy replicates.
Thus, it is assumed that all columns of the input matrix (or data.frame) are replicates of the other columns.
The _nOutl_ most distant points are identified and will be set to _NA_.
```{r cleanReplicates, echo=TRUE}
(mat3 <- matrix(c(19,20,30,40, 18,19,28,39, 16,14,35,41, 17,20,30,40), ncol=4))
cleanReplicates(mat3, nOutl=1)
cleanReplicates(mat3, nOutl=3)
```
### The Function normalizeThis()
In biological high-throughput data columns typically represent different samples, which may be organized as replicates.
During high-throughput experiments thousands of (independent) elements are measured (eg abundance of gene-products), they are represented by rows.
As real-world experiments are not always as perfect as we may think, small changes in the signal measured may easily happen.
Thus, the aim of normalizing is to remove or reduce any trace/variability in the data not related to the original experiement but due to imperfections during detection.
Note, that some experiments may produce a considerable amount of missing data (NAs) which require special attention (dedicated developments exist in other R-packages eg in [wrProteo](https://CRAN.R-project.org/package=wrProteo)).
My general advice is to first carefully look where such missing data is observed and to pay attention to replicate measurements
where a given element once was measured with a real numeric value and once as missing information (NA).
```{r normalizeThis0, echo=TRUE}
set.seed(2015); rand1 <- round(runif(300) +rnorm(300,0,2),3)
dat1 <- cbind(ser1=round(100:1 +rand1[1:100]), ser2=round(1.2*(100:1 +rand1[101:200]) -2),
ser3=round((100:1 +rand1[201:300])^1.2-3))
dat1 <- cbind(dat1, ser4=round(dat1[,1]^seq(2,5,length.out=100) +rand1[11:110],1))
## Let's introduce some NAs
dat1[dat1 <1] <- NA
## Let's get a quick overview of the data
summary(dat1)
## some selected lines (indeed, the 4th column appears always much higher)
dat1[c(1:5,50:54,95:100),]
```
Our toy data may be normalized by a number of different criteria.
In real applications the nature of the data and the type of deformation detected/expected will largely help
deciding which normalization might be the 'best' choice. Here we'll try first normalizing by the mean,
ie all columns will be forced to end up with the same column-mean.
The trimmed mean does not consider values at extremes (as outliers are frequently artefacts and display extreme values).
When restricting even stronger which values to consider one will eventually end up with the median (3rd method used below).
```{r normalizeThis1, echo=TRUE}
no1 <- normalizeThis(dat1, refGrp=1:3, meth="mean")
no2 <- normalizeThis(dat1, refGrp=1:3, meth="trimMean", trim=0.4)
no3 <- normalizeThis(dat1, refGrp=1:3, meth="median")
no4 <- normalizeThis(dat1, refGrp=1:3, meth="slope", quantFa=c(0.2,0.8))
```
It is suggested to verify normalization results by plots.
Note, that [Box plots](https://en.wikipedia.org/wiki/Box_plot) may not be appropriate in some cases (eg multimodal distributions),
for displaying more details you may consider using [Violin-Plots](https://en.wikipedia.org/wiki/Violin_plot) from packages [vioplot](https://CRAN.R-project.org/package=vioplot) or [wrGraph](https://CRAN.R-project.org/package=wrGraph), another option might be a (cumulated) frequency plot (eg in package [wrGraph](https://CRAN.R-project.org/package=wrGraph)).
```{r normalizeThis_plot1, echo=FALSE,eval=TRUE}
boxplot(dat1, main="raw data", las=1)
```
You can see clearly, that the 4th data-set has a problem of range. So we'll see if some proportional normalization
may help to make it more comparable to the other ones.
```{r normalizeThis_plot2, echo=FALSE,eval=TRUE}
layout(matrix(1:4, ncol=2))
boxplot(no1, main="mean normalization", las=1)
boxplot(no2, main="trimMean normalization", las=1)
boxplot(no3, main="median normalization", las=1)
boxplot(no4, main="slope normalization", las=1)
```
### Normalize By Rows
The standard approach for normalizing relies on consisting all columns as collections of data who's distribution is not supposed to change.
In some cases/projects we may want to formulate a much more 'aggressive' hypothesis : We consider the content of all columns strictly as the same.
For example this may be the case when comparing with technical replicates only.
In such cases one may use the function `rowNormalize()` which tries to find the average or mean optimal within-line normalization factor.
Besides, an additional mode of operation for _sparse data_ has been added :
Basically, once a row contains just one NA, this row can't be used any more to derive a normalization factor for all rows.
Thus, with many NA-values the number of 'complete' rows will be low or even 0 redering this approach inefficient or impossible.
Once the content of NA-values is above a customizable threshold, the data will be broken in smaller subsets with fewer groups of fewer columns,
thus increasing the chances of finding 'complete' subsets of data which will be normalized first and added to other subsets in later steps.
This approach relies on the **hypothesis** that *all data in a given line should be (aproximately) the same value* !
Thus, this procedure is particularly well adopted to the case when _all_ samples are multiple replicate measurements of the _same_ sample.
```{r rowNormalize1, echo=TRUE}
set.seed(2); AA <- matrix(rbinom(110, 10, 0.05), nrow=10)
AA[,4:5] <- AA[,4:5] *rep(4:3, each=nrow(AA))
AA1 <- rowNormalize(AA)
round(AA1, 2)
```
Now, let's make this sparse and try normalizing:
```{r rowNormalize2, echo=TRUE}
AC <- AA
AC[which(AC <1)] <- NA
(AC1 <- rowNormalize(AC))
```
Like with _normalizeThis()_ we can define some reference-lines (only these lines will be considered to determine normalization-factors)
```{r rowNormalize3, echo=TRUE}
(AC3 <- rowNormalize(AC, refLines=1:5, omitNonAlignable=TRUE))
```
Please note, that the iterative procedure for _sparse data_ may consume large amounts of computational resources, in particular when
a small number of subgroups has been selected.
### Matrix Coordinates Of Values/Points According To Filtering
Sometimes one needs to obtain the coordinates of values/points of a matrix according to a given filtering condition.
The standard approach using _which()_ gives only a _linearized_ index but not row/column, which is sufficient for replacing indexed values.
If you need to know the true row/column indexes, you may use `coordOfFilt()`.
```{r coordOfFilt1, echo=TRUE}
set.seed(2021); ma1 <- matrix(sample.int(n=40, size=27, replace=TRUE), ncol=9)
## let's test which values are >37
which(ma1 >37) # doesn't tell which row & col
coordOfFilt(ma1, ma1 >37)
```
## Statistical Testing {#StatisticalTesting}
### Normal Random Number Generation with Close Fit to Expected mean and sd
When creating random values to an expected _mean_ and _sd_, the results ontained using the standard function `rnorm()`
may deviate somehow from the expected mean and sd, in particular with low _n_.
To still produce random values fitting closely to the expected _mean_ and _sd_ you may use the function `rnormW()`.
The case of _n=2_ is quite simple with one possible results.
In other cases (_n>2_), there will be a random initiation which can be fixed using the argument _seed_.
```{r rnormW1, echo=TRUE}
## some sample data :
x1 <- (11:16)[-5]
mean(x1); sd(x1)
```
```{r rnormW2, echo=TRUE}
## the standard way for gerenating normal random values
ra1 <- rnorm(n=length(x1), mean=mean(x1), sd=sd(x1))
## In particular with low n, the random values deviate somehow from expected mean and sd :
mean(ra1) -mean(x1)
sd(ra1) -sd(x1)
```
```{r rnormW3, echo=TRUE}
## random numbers with close fit to expected mean and sd :
ra2 <- rnormW(length(x1), mean(x1), sd(x1))
mean(ra2) -mean(x1)
sd(ra2) -sd(x1) # much closer to expected value
```
Thus, the second data-sets fits even with few _n_ very well to the global characteristics defined/expected.
### Moderated Pair-Wise t-Test from limma
If you are not familiar with the way data is handled in the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html)
and you would like to use some of the tools for running moderated t-tests therein, this will provide easy access using `moderTest2grp()` :
```{r moderTest2grp, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(1600,10,0.4),2), ncol=8,
dimnames=list(paste("l",1:200), c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2]+3 # augment lines 3:6 for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6]+3 # augment lines 5:8 for AA2&BB2 (c,d,g,h should be found)
t4 <- log2(t8[,1:4]/t8[,5:8])
fit4 <- moderTest2grp(t4, gl(2,2))
## now we'll use limma's topTable() function to look at the 'best' results
if("list" %in% mode(fit4)) { # if you have limma installed we can look further
library(limma)
topTable(fit4, coef=1,n=5) # effect for 3,4,7,8
fit4in <- moderTest2grp(t4, gl(2,2), testO="<")
if("list" %in% mode(fit4in)) topTable(fit4in, coef=1,n=5) }
```
### Multiple Moderated Pair-Wise t-Tests From limma
If you want to make multiple pair-wise comparisons using `moderTestXgrp()` :
```{r moderTestXgrp, echo=TRUE}
grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
dimnames=list(paste(letters[], rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
## expect to find C/A in c,d,g, (h)
## expect to find C/D in c,d,e,f
## expect to find A/D in f,g,(h)
test8 <- moderTestXgrp(t8, grp)
head(test8$p.value, n=8)
```
### Transform p-values To Local False Discovery Rate (lfdr)
To get an introduction into local false discovery rate estimations you may read [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209).
A convenient way to get lfdr values calculated by the package [fdrtool](https://CRAN.R-project.org/package=fdrtool) is available via the function `pVal2lfdr()`.
Note, that the toy-example used below is too small for estimating meaningful lfdr values.
For this reason the function _fdrtool()_ from package [fdrtool](https://CRAN.R-project.org/package=fdrtool) will issue warnings.
```{r pVal2lfdr, echo=TRUE}
set.seed(2017); t8 <- matrix(round(rnorm(160,10,0.4),2), ncol=8, dimnames=list(letters[1:20],
c("AA1","BB1","CC1","DD1","AA2","BB2","CC2","DD2")))
t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f) for AA1&BB1
t8[5:8,5:6] <- t8[5:8,5:6] +3 # augment lines 5:8 (e-h) for AA2&BB2 (c,d,g,h should be found)
head(pVal2lfdr(apply(t8, 1, function(x) t.test(x[1:4], x[5:8])$p.value)))
```
### Confindence Intervals (under Normal Distribution)
The [confindence interval (CI)](https://en.wikipedia.org/wiki/Confidence_interval) is a common way of describing the uncertainity of measured or estimated values.
The function `confInt()` allows calculating the confidence interval of the mean (using the functions _qt()_ and _sd()_) under
a given [significance level (alpha)](https://en.wikipedia.org/wiki/Statistical_significance).
assuming that the [Normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) is valid.
```{r fcCI, echo=TRUE}
set.seed(2022); ran <- rnorm(50)
confInt(ran, alpha=0.05)
## plot points and confindence interval of mean
plot(ran, jitter(rep(1, length(ran))), ylim=c(0.95, 1.05), xlab="random variable 'ran'",main="Points and Confidence Interval of Mean (alpha=0.05)", ylab="", las=1)
points(mean(ran), 0.97, pch=3, col=4) # mean
lines(mean(ran) +c(-1, 1) *confInt(ran, 0.05), c(0.97, 0.97), lwd=4, col=4) # CI
legend("topleft","95% conficence interval of mean", text.col=4,col=4,lty=1,lwd=1,seg.len=1.2,cex=0.9,xjust=0,yjust=0.5)
```
### Extract Groups Of Replicates From Pair-Wise Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the column-names are concatenated group-names.
To get the index of which group has been used in which pair-wise set you may use the function `matchSampToPairw()`, as shown below.
```{r matchSampToPairw, echo=TRUE}
## make example if limma is not installed
if(!requireNamespace("limma", quietly=TRUE)) test8 <- list(FDR=matrix(1, nrow=2, ncol=3, dimnames=list(NULL,c("A-C","A-D","C-D"))))
matchSampToPairw(unique(grp), colnames(test8$FDR))
```
### Extract Numeric Part Of Column-Names
When running multiple pairwise tests (using *moderTestXgrp()*) the results will be in adjacent columns and the group-names reflected in the column-names.
In the case measurements from multiple levels of a given variable are compared it is useful to extract the numeric part, the function `numPairDeColNames()` provides support to do so.
When extracting just the numeric part, unit names will get lost, though. Note, if units used are not constant (eg seconds and milliseconds mixed) the extracted numeric values do not reflect the real quantitative context any more.
```{r pairWiseConc1, echo=TRUE}
mat1 <- matrix(1:8, nrow=2, dimnames=list(NULL, paste0(1:4,"-",6:9)))
numPairDeColNames(mat1)
```
### Automatic Determination Of Replicate Structure Based On Meta-Data
In order to run statistical testing the user must know which sample should be considered replicate of whom.
The function `()` aims to provide help by checking all column of a matrix of meta-data with the aim of identifying the replicate-status.
To do so, all columns are examined how many groups of replicats they may design. Depending on the argumen _method_ various options for choosing automatically exist :
The default _method="combAll"_ will select the column with the median number of groups (not counting all-different or all-same columns)).
When using as _method="combAll"_ (ie combine all columns that are neither all-different nor all-same), there is risk all lines (samples) will be be considered different and no replicates remain.
To avoid this situation the argument -method_ can be set to _"combNonOrth"_.
Then, it will be checked if adding more columns will lead to complete loss of replicates, and -if so- concerned columns omitted.
```{r replicateStructure1, echo=TRUE}
## column a is all different, b is groups of 2,
## c & d are groups of 2 nut NOT 'same general' pattern as b
strX <- data.frame(a=letters[18:11], b=letters[rep(c(3:1,4), each=2)],
c=letters[rep(c(5,8:6), each=2)], d=letters[c(1:2,1:3,3:4,4)],
e=letters[rep(c(4,8,4,7),each=2)], f=rep("z",8) )
strX
replicateStructure(strX[,1:2])
replicateStructure(strX[,1:4], method="combAll")
replicateStructure(strX[,1:4], method="combAll", exclNoRepl=FALSE)
replicateStructure(strX[,1:4], method="combNonOrth", exclNoRepl=TRUE)
replicateStructure(strX, method="lowest")
```
## Working With Clustering {#WorkingWithClustering}
Multiple concepts for clustering have been deeveloped, most of them allow extracting a vector with the cluster-numbers.
Here some functions helping to work with the output of such clustering results are presented.
### Prepare Data For Clustering
The way how to prepare data for clustering may be as important as the choice of the actual clustering-algorithm ...
Many clustering algorithms are available in R (eg see also [CRAN Task View: Cluster Analysis & Finite Mixture Models](https://CRAN.R-project.org/view=Cluster)), many of them require the input data to be standardized.
The regular way of standardizing sets all elements to mean=0 and sd=1.
To do so, the function `scale()` may be used.
```{r std1, echo=TRUE}
dat <- matrix(2*round(runif(100),2), ncol=4)
mean(dat); sd(dat)
datS <- scale(dat)
apply(datS, 2, sd)
# each column was teated separately
mean(datS); sd(datS); range(datS)
# the mean is almost 0.0 and the sd almost 1.0
datB <- scale(dat, center=TRUE, scale=FALSE)
mean(datB); sd(datB); range(datB) # mean is almost 0
```
However, if you want the entire data-set and not each column sparately, you may use `standardW()`.
Thus, relative differences visible within a line will be conserved.
Furthermore, in case of 3-dim arrays, this function returns also the same dimensions as the input.
```{r std2, echo=TRUE}
datS2 <- standardW(dat)
apply(datS2, 2, sd)
summary(datS2)
mean(datS2); sd(datS2)
datS3 <- standardW(dat, byColumn=TRUE)
apply(datS3, 2, sd)
summary(datS3)
mean(datS3); sd(datS3)
```
Sometimes it is sufficient to only set the minimum and maximum to a given range.
```{r scale1, echo=TRUE}
datR2 <- apply(dat, 2, scaleXY, 1, 100)
summary(datR2); sd(datR2)
```
### Characterize Clustering Results
Here a very basic clustering example...
```{r clu01, echo=TRUE}
nGr <- 3
irKm <- stats::kmeans(iris[,1:4], nGr, nstart=nGr*4) # no need to standardize
table(irKm$cluster, iris$Species)
#wrGraph::plotPCAw(t(as.matrix(iris[,1:4])), sampleGrp=irKm,colBase=irKm$cluster,useSymb=as.numeric(as.factor(iris$Species)))
```
Using the function `reorgByCluNo()` we can now 'apply' the clustering result to the initial data to obtain other information.
```{r clu02, echo=TRUE}
## sort results by cluster number
head(reorgByCluNo(iris[,-5], irKm$cluster))
tail(reorgByCluNo(iris[,-5], irKm$cluster))
```
Let's calculate the median and sd values for each cluster:
```{r clu03, echo=TRUE}
## median an CV
ir2 <- reorgByCluNo(iris[,-5], irKm$cluster, addInfo=FALSE, retList=TRUE)
```
```{r clu04, echo=TRUE}
sapply(ir2, function(x) apply(x, 2, median))
```
```{r clu05, echo=TRUE}
sapply(ir2, colSds)
```
Besides, we have already seen the function `cutArrayInCluLike()` in section [Working with Arrays](#WorkingWithArrays) 'Working with Arrays'.
## Tree-Like Structures {#TreeLikeStructures}
### Filter Lists Of Connected Nodes, Extension Of Networks As 'Sandwich'
When interogating network-databases (like String for proteins or the coexpressionDB for gene co-expression) typically a (semi-)quantitatve
value is supplied with the connection of node 'A' to node 'B'.
In many cases, it may be useful to filter the initial query-output to retain only strong interactions.
Furthermore, it may be of interest to expand such networks by nodes allowing to (further) inter-connect initial query-nodes
(so called 'Sandwich' nodes as they are in the middle of initial nodes), for such nodes a separate (eg even more stringent) threshold can be applied.
Here let's suppose nodes have 3-digit names (ie numbers). 7 nodes of an initial query gave 1 to 7 conected nodes,
the results are presented as list of data.frames where the 1st column is the connected node and the 2nd column the quality score of the connection (edge).
Furthemore, let's assume that here lower scores are better.
```{r filterNetw0, echo=TRUE}
lst2 <- list('121'=data.frame(ID=as.character(c(141,221,228,229,449)),11:15),
'131'=data.frame(ID=as.character(c(228,331,332,333,339)),11:15),
'141'=data.frame(ID=as.character(c(121,151,229,339,441,442,449)),c(11:17)),
'151'=data.frame(ID=as.character(c(449,141,551,552)),11:14),
'161'=data.frame(ID=as.character(171),11),
'171'=data.frame(ID=as.character(161),11),
'181'=data.frame(ID=as.character(881:882),11:12) )
```
Now, we'd like to keep the core network consisting of all (dirctly) interconnected nodes with scores below 20 :
```{r filterNetw1, echo=TRUE}
(nw1 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=FALSE))
```
In the resulting output the 1st column now represents the query-nodes, the 2nd column all connected nodes based on filtering scores for edges,
and the 3rd colum the score for the edges.
Let's also remove all nodes not connected to a backbone at least 3 nodes long, ie remove orphan pairs of nodes :
```{r filterNetw2, echo=TRUE}
(nw2 <- filterNetw(lst2, limInt=20, sandwLim=NULL, remOrphans=TRUE))
```
If you want to expand this network by nodes allowing to further interconnect the nodes from above,
we can add all 'sandwich' nodes (let's use a threshold of inferior/equal to 14 which will use only the better 'sandwich'-edges) :
```{r filterNetw3, echo=TRUE}
(nw3 <- filterNetw(lst2, limInt=20, sandwLim=14, remOrphans=TRUE))
```
### Convert Collection Of Pairs Of Nodes To Propensity Matrix
Many times networks get created from pairs of nodes. One way to represent the full network is via propensisty matrixes.
Several advanced tools and packages rather accept such propensisty matrixes as input.
Here, it is assumed that each line of the input represents a separate pair of nodes connected by an edge.
```{r propMatr1, echo=TRUE}
pairs3L <- matrix(LETTERS[c(1,3,3, 2,2,1)], ncol=2) # loop of 3
(netw13pr <- pairsAsPropensMatr(pairs3L)) # as prop matr
```
### Characterize Individual Contribution Of Single Edges In Tree-Structures
```{r contribToContigPerFrag, echo=TRUE}
path1 <- matrix(c(17,19,18,17, 4,4,2,3), ncol=2,
dimnames=list(c("A/B/C/D","A/B/G/D","A/H","A/H/I"), c("sumLen","n")))
contribToContigPerFrag(path1)
```
### Count Same Start- And End- Sites Of Edges (Or Fragments)
If you have a set of fragments from a common ancestor and the fragment's start- and end-sites
are marked by index-positions (integers), you can make a simple graphical display :
```{r simpleFragFig, echo=TRUE}
frag1 <- cbind(beg=c(2,3,7,13,13,15,7,9,7, 3,3,5), end=c(6,12,8,18,20,20,19,12,12, 4,5,7))
rownames(frag1) <- letters[1:nrow(frag1)]
simpleFragFig(frag1)
```
Now we can make a matrix telling if some fragments do start or end at exactely the same position.
```{r countSameStartEnd, echo=TRUE}
countSameStartEnd(frag1)
```
## Support for Graphical Output {#SupportForGraphicalOutput}
### Convenient Paste-Collapse
The function `pasteC()` allows adding quotes and separating the last element by specific text (eg 'and').
```{r pasteC, echo=TRUE}
pasteC(1:4)
pasteC(letters[1:4],quoteC="'")
```
### Transform Numeric Values to Color-Gradient
By default most color-gradients end with a color very close to the beginning.
```{r color-gradient1, echo=TRUE}
set.seed(2015); dat1 <- round(runif(15),2)
plot(1:15, dat1, pch=16, cex=2, las=1, col=colorAccording2(dat1),
main="Color gradient according to value in y")
# Here we modify the span of the color gradient
plot(1:15, dat1, pch=16, cex=2, las=1,
col=colorAccording2(dat1, nStartO=0, nEndO=4, revCol=TRUE), main="blue to red")
# It is also possible to work with scales of transparency
plot(1:9, pch=3, las=1)
points(1:9, 1:9, col=transpGraySca(st=0, en=0.8, nSt=9,trans=0.3), cex=42, pch=16)
```
### Assign New Transparency To Given Colors
For this purpose you may use `convColorToTransp`.
```{r convColorToTransp, fig.height=6, fig.width=3, echo=TRUE}
col0 <- c("#998FCC","#5AC3BA","#CBD34E","#FF7D73")
col1 <- convColorToTransp(col0,alph=0.7)
layout(1:2)
pie(rep(1,length(col0)), col=col0, main="no transparency")
pie(rep(1,length(col1)), col=col1, main="new transparency")
```
### Print Matrix-Content As Plot
There are many ways of creating reports. If you want simply to combine a few plots into a pdf, the function `tableToPlot()`
may be helpful to add a small table (eg overview of points/samples/files used in other plots of the same pdf).
This function prints tables in the current graphical output/window (which may by a pdf-device).
## Other Convenience Functions {#OtherConvenienceFunctions}
### Writing Compact Dates (more options ...)
Many times it may be useful to add the date to filenames when saving data or plots as files.
The built-in functions _date()_, _Sys.Date()_ and _Sys.Time()_ are a good way to start.
Generally I like to use abbreviated month-names since the order of writing the month is different in Europe compared to the USA,
so this may help avoiding mis-interpreting dates insetad of writing the number of the Month.
For example, 2021-03-05 means in Europe March 5th while in other places it means May 3rd.
The R-functions mentioned above use local language settings, so I wrote the function `sysDate` to
produce compact versions of current the date, **independent to local language settings** (or not -if you prefer), ie locale-specific,
(yes, in some languages - like French - the first 3 letters of the month may give ambiguous results !)
and to avoid white space ' ' (which I prefer to avoid in file-names).
Please look at the function's help-page for all available options.
```{r sysDate1, echo=TRUE}
## To get started
Sys.Date()
## Compact English names (in European order), no matter what your local settings are :
sysDate()
```
The table below shows a number of options to write the date in English or using local month-names :
```{r DateTab, echo=TRUE}
tabD <- cbind(paste0("univ",1:6), c(sysDate(style="univ1"), sysDate(style="univ2"),
sysDate(style="univ3"), sysDate(style="univ4"), as.character(sysDate(style="univ5")),
sysDate(style="univ6")), paste0(" local",1:6),
c(sysDate(style="local1"), sysDate(style="local2"), sysDate(style="local3"),
sysDate(style="local4"), sysDate(style="local5"), sysDate(style="local6")))
knitr::kable(tabD, caption="Various ways of writing current date")
```
## Session-Info
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrMisc/vignettes/wrMiscVignette1.Rmd
|
#' Molecular mass for amino-acids
#'
#' Calculate molecular mass based on atomic composition
#'
#' @param massTy (character) 'mono' or 'average'
#' @param inPept (logical) remove H20 corresponding to water loss at peptide bond formaton
#' @param inclSpecAA (logical) include ornithine O & selenocysteine U
#' @return This function returns a vector with masses for all amino-acids (argument 'massTy' to switch from mono-isotopic to average mass)
#' @seealso \code{\link{massDeFormula}}, \code{\link[wrMisc]{convToNum}}
#' @examples
#' massDeFormula(c("12H12O","HO"," 2H 1 Se, 6C 2N","HSeCN"," ","e"))
#' AAmass()
#' @export
AAmass <- function(massTy="mono", inPept=TRUE, inclSpecAA=FALSE) {
## return vector with masses for all amino-acids (argument 'massTy' to switch form mono-isotopic to average mass)
## 'inPept' will remove H20 corresponding to water loss at peptide bond formaton
## 'inclSpecAA' .. include ornithine O & selenocysteine U
## so far all LETTERS exept B,J,X,Z (ie 2,10,24,26) spec (used) O,U
msg <- " argument 'massTy' must bei either 'mono' or 'average' !"
chTy <- length(massTy)
if(chTy <1) stop(msg) else massTy <- massTy[1]
chTy <- c("mono","average") %in% massTy
if(!any(chTy)) stop(msg)
aaComp <- cbind(C=c(3,6,4,4,3,5,5,2,6,6,6,6,5,9,5,3,4,11,9,5,5,3),
H=c(5,12,6,5,5,7,8,3,7,11,11,12,9,9,7,5,7,10,9,9,12,5),
O=c(1,1,2,3,1,3,2,1,1,1,1,1,1,1,1,2,2,1,2,1,2,1),
N=c(1,4,2,1,1,1,2,1,3,1,1,2,1,1,1,1,1,2,1,1,2,1),
S=c(0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0),
Se=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1))
rownames(aaComp) <- c("A","R","N","D","C","E","Q","G","H","I","L","K","M","F","P","S","T","W","Y","V","O","U")
if(!inPept) aaComp[,2:3] <- aaComp[,2:3] + matrix(rep(2:1, each=nrow(aaComp)), ncol=2)
atoMass <- .atomicMasses()[,massTy]
AAmass <- aaComp*matrix(rep(atoMass[match(colnames(aaComp),names(atoMass))], each=nrow(aaComp)), nrow=nrow(aaComp))
AAmass <- rowSums(AAmass)
if(!inclSpecAA) AAmass <- AAmass[1:20] # so far exclude ornithine O & selenocysteine U
AAmass }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/AAmass.R
|
#' AUC from ROC-curves
#'
#' This function calculates the AUC (area under the curve) from ROC data in matrix of specificity and sensitivity values,
#' as provided in the output from \code{\link{summarizeForROC}}.
#'
#' @param dat (matrix or data.frame) main inut containig sensitivity and specificity data (from \code{summarizeForROC})
#' @param useCol (character or integer) column names to be used: 1st for specificity and 2nd for sensitivity count columns
#' @param returnIfInvalid (\code{NA} or \code{NULL}) what to return if data for calculating ROC is invalid or incomplete
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allows easier tracking of messages produced
#' @return This function returns a matrix including imputed values or list of final and matrix with number of imputed by group (plus optional plot)
#' @seealso preparing ROC data \code{\link{summarizeForROC}}, (re)plot the ROC figure \code{\link{plotROC}};
#' note that numerous other packages also provide support for working with ROC-curves : Eg \href{https://CRAN.R-project.org/package=dlstats}{rocPkgShort},
#' \href{https://CRAN.R-project.org/package=ROCR}{ROCR}, \href{https://CRAN.R-project.org/package=pROC}{pROC} or \href{https://CRAN.R-project.org/package=ROCit}{ROCit}, etc.
#' @examples
#' set.seed(2019); test1 <- list(annot=cbind(Species=c(rep("b",35), letters[sample.int(n=3,
#' size=150,replace=TRUE)])), BH=matrix(c(runif(35,0,0.01), runif(150)), ncol=1))
#' roc1 <- summarizeForROC(test1, spec=c("a","b","c"), annotCol="Species")
#' AucROC(roc1)
#' @export
AucROC <- function(dat, useCol=c("spec","sens"), returnIfInvalid=NA, silent=FALSE, debug=FALSE, callFrom=NULL) {
## calculate AUC (area under the curve) from ROC data
fxNa <- wrMisc::.composeCallName(callFrom, newNa="AucROC")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!is.null(returnIfInvalid)) returnIfInvalid <- NA
## basic checks
datOK <- length(dat) >0 && length(useCol) >1
if(datOK) { chDim <- dim(dat)
if(any(length(dim(dat)) !=2, dim(dat) < 1:2)) datOK <- FALSE } # must be matrix or data.frame with min 1 lines and 2 cols")
if(datOK) { if(is.numeric(useCol)) { if(any(useCol[1:2] <1) || any(useCol[1:2] > ncol(dat))) datOK <- FALSE
} else datOK <- all(useCol %in% colnames(dat)) }
if(debug) {message(fxNa,"aucR1 datOK : ",datOK); aucR1 <- list(dat=dat,useCol=useCol)}
if(datOK) {
if(is.character(useCol) && length(useCol) >1) {
useCol <- which(colnames(dat) %in% useCol[1:2])
dataOK <- !any(is.na(useCol)) }
}
out <- returnIfInvalid
if(datOK) {
## check for NA
chNa <- is.na(dat[,useCol])
if(debug) {message(fxNa,"aucR2"); aucR2 <- list(dat=dat,useCol=useCol,chNa=chNa, datOK=datOK)}
if(any(chNa)) { ch1 <- is.na(dat[1, useCol])
if(!silent) message(fxNa,"NOTE : the data contain ",sum(chNa)," NAs, replacing by preceeding value")
if(any(ch1)) dat[1,useCol] <- c(if(ch1[1]) 1 else dat[1,useCol[1]], if(ch1[2]) 0 else dat[1,useCol[2]])
for(i in 1:2) {ch1 <- is.na(dat[,useCol[i]])
if(any(ch1)) dat[which(ch1), useCol[i]] <- dat[which(ch1) -1, useCol[i]]} }
if(debug) {message(fxNa,"aucR4")}
## Normalize (if needed)
for(i in 1:2) if(max(dat[,useCol[i]]) >1) dat[,useCol[i]] <- dat[,useCol[i]]/max(dat[,useCol[i]])
##
if(dataOK) out <- sum(abs(diff(dat[,useCol[1]])) *dat[-nrow(dat), useCol[2]]) else {
if(!silent) message(fxNa,"Invalid input / nothing to do") } }
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/AucRoc.R
|
#' Deprecialed Volcano-plot
#'
#' Please use VolcanoPlotW() from package wrGraph.
#' This function does NOT produce a plot any more.
#'
#' @param Mvalue (numeric or matrix) data to plot; M-values are typically calculated as difference of log2-abundance values and 'pValue' the mean of log2-abundance values;
#' M-values and p-values may be given as 2 columsn of a matrix, in this case the argument \code{pValue} should remain NULL
#' @param pValue (numeric, list or data.frame) if \code{NULL} it is assumed that 2nd column of 'Mvalue' contains the p-values to be used
#' @param useComp (integer, length=1) choice of which of multiple comparisons to present in \code{Mvalue} (if generated using \code{moderTestXgrp()})
#' @param filtFin (matrix or logical) The data may get filtered before plotting: If \code{FALSE} no filtering will get applied; if matrix of \code{TRUE}/\code{FALSE} it will be used as optional custom filter, otherwise (if \code{Mvalue} if an \code{MArrayLM}-object eg from limma) a default filtering based on the \code{filtFin} element will be applied
#' @param ProjNa (character) custom title
#' @param FCthrs (numeric) Fold-Change threshold (display as line) give as Fold-change and NOT log2(FC), default at 1.5, set to \code{NA} for omitting
#' @param FdrList (numeric) FDR data or name of list-element
#' @param FdrThrs (numeric) FDR threshold (display as line), default at 0.05, set to \code{NA} for omitting
#' @param FdrType (character) FDR-type to extract if \code{Mvalue} is 'MArrayLM'-object (eg produced by from \code{moderTest2grp} etc);
#' if \code{NULL} it will search for suitable fields/values in this order : 'FDR','BH',"lfdr" and 'BY'
#' @param subTxt (character) custom sub-title
#' @param grayIncrem (logical) if \code{TRUE}, display overlay of points as increased shades of gray
#' @param col (character) custom color(s) for points of plot (see also \code{\link[graphics]{par}})
#' @param pch (integer) type of symbol(s) to plot (default=16) (see also \code{\link[graphics]{par}})
#' @param compNa (character) names of groups compared
#' @param batchFig (logical) if \code{TRUE} figure title and axes legends will be kept shorter for display on fewer splace
#' @param cexMa (numeric) font-size of title, as expansion factor (see also \code{cex} in \code{\link[graphics]{par}})
#' @param cexLa (numeric) size of axis-labels, as expansion factor (see also \code{cex} in \code{\link[graphics]{par}})
#' @param limM (numeric, length=2) range of axis M-values
#' @param limp (numeric, length=2) range of axis FDR / p-values
#' @param annotColumn (character) column names of annotation to be extracted (only if \code{Mvalue} is \code{MArrayLM}-object containing matrix $annot).
#' The first entry (typically 'SpecType') is used for different symbols in figure, the second (typically 'GeneName') is used as prefered text for annotating the best points (if \code{namesNBest} allows to do so.)
#' @param annColor (character or integer) colors for specific groups of annoatation (only if \code{Mvalue} is \code{MArrayLM}-object containing matrix $annot)
#' @param cexPt (numeric) size of points, as expansion factor (see also \code{cex} in \code{\link[graphics]{par}})
#' @param cexSub (numeric) size of subtitle, as expansion factor (see also \code{cex} in \code{\link[graphics]{par}})
#' @param cexTxLab (numeric) size of text-labels for points, as expansion factor (see also \code{cex} in \code{\link[graphics]{par}})
#' @param namesNBest (integer or character) number of best points to add names in figure; if 'passThr' all points passing FDR and FC-filtes will be selected;
#' if the initial object \code{Mvalue} contains a list-element called 'annot' the second of the column specified in argument \code{annotColumn} will be used as text
#' @param NbestCol (character or integer) colors for text-labels of best points
#' @param sortLeg (character) sorting of 'SpecType' annotation either ascending ('ascend') or descending ('descend'), no sorting if \code{NULL}
#' @param NaSpecTypeAsContam (logical) consider lines/proteins with \code{NA} in Mvalue$annot[,"SpecType"] as contaminants (if a 'SpecType' for contaminants already exits)
#' @param useMar (numeric,length=4) custom margings (see also \code{\link[graphics]{par}})
#' @param returnData (logical) optional returning data.frame with (ID, Mvalue, pValue, FDRvalue, passFilt)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @param debug (logical) additional messages for debugging
#' @return deprecated - returns nothing
#' @seealso this function was replaced by \code{\link[wrGraph]{plotPCAw}})
#' @examples
#' set.seed(2005); mat <- matrix(round(runif(900),2), ncol=9)
#' @export
VolcanoPlotW2 <- function(Mvalue, pValue=NULL, useComp=1, filtFin=NULL, ProjNa=NULL, FCthrs=NULL, FdrList=NULL, FdrThrs=NULL, FdrType=NULL,
subTxt=NULL, grayIncrem=TRUE, col=NULL, pch=16, compNa=NULL, batchFig=FALSE, cexMa=1.8, cexLa=1.1, limM=NULL, limp=NULL,
annotColumn=NULL, annColor=NULL, cexPt=NULL, cexSub=NULL,
cexTxLab=0.7, namesNBest=NULL, NbestCol=1, sortLeg="descend", NaSpecTypeAsContam=TRUE, useMar=c(6.2,4,4,2), returnData=FALSE, callFrom=NULL, silent=FALSE,debug=FALSE) {
## MA plot
## optional arguments for explicit title in batch-mode
fxNa <- wrMisc::.composeCallName(callFrom, newNa="VolcanoPlotW2")
.Deprecated("++ NOTE : OLD VERSION !! ++ Please use VolcanoPlotW() from package wrGraph")
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/VolcanoPlotW2.R
|
#' Selective batch cleaning of sample- (ie column-) names in list
#'
#' This function allows to manipulate sample-names (ie colnames of abundance data) in a batch-wise manner from data stored as multiple matrixes or data.frames of a list.
#' Import functions such as \code{readMaxQuantFile()} organize initial flat files into lists (of matrixes) of the different types of data.
#' Many times all column names in such lists carry long names including redundant information, like the overall experiment name or date, etc.
#' The aim of this function is to facilitate 'cleaning' the sample- (ie column-) names to obtain short and concise names.
#' Character terms to be removed (via argument \code{rem}) and/or replaced/subsitituted (via argument \code{subst}) should be given as they are, characters with special behaviour in \code{grep} (like '.') will be protected internally.
#' Note, that the character substitution part will be done first, and the removal part (without character replacement) afterwards.
#'
#' @param dat (list) main input
#' @param rem (character) character string to be removed, may be named 'left' and 'right' for more specific exact pattern matching
#' (this part will be perfomed before character substitutions by \code{subst})
#' @param subst (character of length=2, or matrix with 2 columns) pair(s) of character-strings for replacement (1st as search-item and 2nd as replacement); this part is performed after character-removal via \code{rem}
#' @param lstE (character, length=1) names of list-elements where colnames should be cleaned
#' @param mathOper (character, length=1) optional mathematical operation on numerical part of sample-names (eg \code{mathOper='/2'} for deviding numeric part of colnames by 2)
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @seealso \code{\link[base]{grep}}
#' @return This function returns a list (equivalent to input \code{dat})
#' @examples
#' dat1 <- matrix(1:12, ncol=4, dimnames=list(1:3, paste0("sample_R.",1:4)))
#' dat1 <- list(raw=dat1, quant=dat1, notes="other..")
#' cleanListCoNames(dat1, rem=c(left="sample_"), c(".","-"))
#' @export
cleanListCoNames <- function(dat, rem=NULL, subst=c("-","_"), lstE=c("raw","quant","counts"), mathOper=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
## clean/stratify columnames
fxNa <- wrMisc::.composeCallName(callFrom, newNa="cleanListCoNames")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
ch1 <- lstE %in% names(dat)
if(!is.list(dat) || all(!ch1)) {if(!silent) message(fxNa," Nothing to do (verifiy your input)"); ok <- FALSE} else ok <- TRUE
if(ok && any(!ch1)) { if(!silent) message(fxNa," Term ",wrMisc::pasteC(lstE[which(!ch1)],quoteC="'")," not found in 'dat', ignoring ..")
lstE <- lstE[which(ch1)] }
needToProt <- paste0("\\",c(".","+","*", "^","$","?", "(",")","\\")) #" #for editor
if(ok && length(rem) >0) {
## protect special characters (if needed)
for(i in needToProt) rem <- gsub(i, paste0("\\",i), rem) #" #for editor
## look for left side removal
ch1 <- names(rem) %in% c("l","le","left")
if(any(ch1)) rem[which(ch1)] <- paste0("^",rem[which(ch1)])
## look for right side removal
ch1 <- names(rem) %in% c("r","ri","right")
if(any(ch1)) rem[which(ch1)] <- paste0(rem[which(ch1)],"$")
for(i in lstE) for(j in rem) colnames(dat[[i]]) <- sub(j,"",colnames(dat[[i]]))
}
if(debug) { message(fxNa,"cLC1"); cLC1 <- list(dat=dat,lstE=lstE,needToProt=needToProt,rem=rem,subst=subst,lstE=lstE,mathOper=mathOper,ok=ok)}
if(ok && length(subst) >0) {
## character substitution
if(length(dim(subst)) >1) if(any(dim(subst) < 1:2)) { subst <- NULL
message(fxNa," Invalid argument 'subst' (should be matrix with left column for term to search for and right column with replacement-term), ignoring") }
if(length(dim(subst)) >1) {
for(j in needToProt) subst[,1] <- gsub(j, paste0("\\",i), subst[,1]) #" #for editor
for(i in lstE) for(j in 1:nrow(subst)) colnames(dat[[i]]) <- sub(subst[j,1], subst[j,2], colnames(dat[[i]]))
} else if(length(subst) >1) {
for(j in needToProt) subst[1] <- gsub(j, paste0("\\",i), subst[1]) #" #for editor
for(i in lstE)
colnames(dat[[i]]) <- sub(subst[1], subst[2], colnames(dat[[i]]))}
}
if(debug) { message(fxNa,"cLC2"); cLC2 <- list(dat=dat,lstE=lstE,needToProt=needToProt,rem=rem,subst=subst,lstE=lstE,mathOper=mathOper,ok=ok)}
if(ok && length(mathOper)==1) if(nchar(mathOper) <2) { mathOper <- NULL
message(fxNa,"Invalid entry for 'mathOper' ... ignoring" )}
if(ok && length(mathOper)==1) {
iniNa <- colnames(dat[[lstE[1]]])
txNa <- sub("^[[:space:]]*[[:digit:]]+","", iniNa) # remove heading numeric part
if(debug) { message(fxNa,"cLC3"); cLC3 <- list(dat=dat,lstE=lstE,iniNa=iniNa,txNa=txNa)}
if(all(nchar(txNa) < nchar(iniNa))) {
nuNa <- substr(iniNa, 1, nchar(iniNa) -nchar(txNa)) # the (supposed) numeric part
if(debug) message(fxNa," mathematical transformation on colnames by ",mathOper," on ",wrMisc::pasteC(utils::head(nuNa),lastCol=", "))
nuNa2 <- try(eval(parse(text=paste0("c(",paste(nuNa, collapse=","),")",mathOper))), silent=TRUE)
if(inherits(nuNa2, "try-error") || !is.numeric(nuNa2)) { if(!silent) message(fxNa,"Can't run mathematical operation, ",
"heading part in column-heads might not to be true numeric or 'mathOper' somehow incorrect !")
} else {
newNa <- paste0(nuNa2, txNa)
for(i in lstE) if(length(dim(dat[[i]])) >1) colnames(dat[[i]]) <- newNa else if(!silent) message(fxNa,"Note '$",i,"' seems not to fit") }
} else if(!silent) message(fxNa,"Some column-names seem not to contain any numeric part at beginning, nothing to do ...")
}
dat }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/cleanListCoNames.R
|
#' Combine Multiple Filters On NA-imputed Data
#'
#' In most omics data-analysis one needs to employ a certain number of filtering strategies to avoid getting artifacts to the step of statistical testing.
#' \code{combineMultFilterNAimput} takes on one side the origial data and on the other side NA-imputed data to create several differnet filters and to finally combine them.
#' A filter aiming to take away the least abundant values (using the imputede data) can be fine-tuned by the argument \code{abundThr}.
#' This step compares the means for each group and line, at least one grou-mean has to be > the threshold (based on hypothesis
#' that if all conditions represent extrememy low measures their diffrenetial may not be determined with certainty).
#' In contrast, the filter addressing the number of missing values (\code{NA}) uses the original data, the arguments \code{colTotNa},\code{minSpeNo} and \code{minTotNo}
#' are used at this step. Basically, this step allows defining a minimum content of 'real' (ie non-NA) values for further considering the measurements as reliable.
#' This part uses internally \code{\link[wrMisc]{presenceFilt}} for filtering elevated content of \code{NA} per line.
#' Finally, this function combines both filters (as matrix of \code{FALSE} and \code{TRUE}) on NA-imputed and original data
#' and retruns a vector of logical values if corresponding lines passe all filter criteria.
#'
#' @param dat (matrix or data.frame) main data (may contain \code{NA})
#' @param imputed (character) same as 'dat' but with all \code{NA} imputed
#' @param grp (character or factor) define groups of replicates (in columns of 'dat')
#' @param annDat (matrix or data.frame) annotation data (should match lines of 'dat')
#' @param abundThr (numeric) optional threshold filter for minimumn abundance
#' @param colRazNa (character) if razor peptides are used: column name for razor peptide count
#' @param colTotNa (character) column name for total peptide count
#' @param minSpeNo (integer) minimum number of specific peptides for maintaining proteins
#' @param minTotNo (integer) minimum total ie max razor number of peptides
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allows easier tracking of messages produced
#' @return This function returns a vector of logical values if corresponding line passes filter criteria
#' @seealso \code{\link[wrMisc]{presenceFilt}}
#' @examples
#' set.seed(2013)
#' datT6 <- matrix(round(rnorm(300)+3,1), ncol=6,
#' dimnames=list(paste0("li",1:50), letters[19:24]))
#' datT6 <- datT6 +matrix(rep(1:nrow(datT6),ncol(datT6)), ncol=ncol(datT6))
#' datT6[6:7,c(1,3,6)] <- NA
#' datT6[which(datT6 < 11 & datT6 > 10.5)] <- NA
#' datT6[which(datT6 < 6 & datT6 > 5)] <- NA
#' datT6[which(datT6 < 4.6 & datT6 > 4)] <- NA
#' datT6b <- matrixNAneighbourImpute(datT6, gr=gl(2,3))
#' datT6c <- combineMultFilterNAimput(datT6, datT6b, grp=gl(2,3), abundThr=2)
#'
#' @export
combineMultFilterNAimput <- function(dat, imputed, grp, annDat=NULL, abundThr=NULL, colRazNa=NULL, colTotNa=NULL, minSpeNo=1, minTotNo=2, silent=FALSE,debug=FALSE,callFrom=NULL){
fxNa <- wrMisc::.composeCallName(callFrom, newNa="combineMultFilterNAimput")
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!isTRUE(silent)) silent <- FALSE
datFi <- wrMisc::presenceFilt(dat, grp=grp, maxGrpM=1, ratMa=0.8, silent=silent, callFrom=fxNa)
if(debug) message(fxNa," at presenceFilt: ",paste(colSums(datFi),collapse=" ")," out of ",nrow(dat))
if(length(colRazNa) >0 & length(annDat) >0) {
razFilt <- razorNoFilter(annot=annDat, totNa=colTotNa, minRazNa=colRazNa, minSpeNo=minSpeNo, minTotNo=minTotNo, silent=silent,callFrom=fxNa)
datFi[which(!razFilt),] <- rep(FALSE,ncol(datFi))
if(debug) message(fxNa," at razorNoFilter: ",paste(colSums(datFi),collapse=" "))
}
## filter mostly low abundance (using imputed), see also .filterMinAv
grpMeans <- wrMisc::rowGrpMeans(imputed$data,grp)
if(any(!(colnames(grpMeans) == colnames(imputed$nNA)))) message(fxNa," Problem with order of columns of imputed$nNA !?")
pwComb <- wrMisc::triCoord(ncol(grpMeans))
if(is.numeric(abundThr) & length(abundThr)==1) {
for(i in 1:nrow(pwComb)) { # loop along all pair-wise questions => (update filter) datFi
chLi <- grpMeans[,pwComb[i,1]] < abundThr & grpMeans[,pwComb[i,2]] < abundThr
if(any(chLi)) datFi[which(chLi),i] <- FALSE}
if(debug) message(fxNa," at abundanceFilt: ",paste(colSums(datFi),collapse=" ")) }
## check if set of mostly imputed data higher than measured -> filter
## number of NAs per line & group
nNAbyGroup <- wrMisc::rowGrpNA(dat,grp)
for(i in 1:nrow(pwComb)) {
critNAGrp <- table(grp)[colnames(grpMeans)[pwComb[i,]]]
critNAGrp <- critNAGrp/2 -0.1
#re-check ?# potentially filter when min 50% of data NA
chLi <- nNAbyGroup[,pwComb[i,]] > matrix(rep(critNAGrp, each=nrow(grpMeans)), ncol=2) # use imputed$nNA; return T when need to filter
if(any(chLi)) {
chLi2 <- cbind(chLi[,1] & grpMeans[,pwComb[i,1]] > grpMeans[,pwComb[i,2]], chLi[,2] & grpMeans[,pwComb[i,2]] > grpMeans[,pwComb[i,1]]) # is T if bad
datFi[,i] <- datFi[,i] & !chLi[,1] & !chLi[,2] }
}
if(debug) message(fxNa," at NA> mean: ",wrMisc::pasteC(colSums(datFi)))
imputed$filt <- datFi
if(!is.null(annDat)) imputed$annot <- annDat
imputed }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/combineMultFilterNAimput.R
|
#' Molecular mass for amino-acids
#'
#' This function calculates the molecular mass of one-letter code amion-acid sequences.
#'
#' @param x (character) aminoacid sequence (single upper case letters for describing a peptide/protein)
#' @param massTy (character) default 'mono' for mono-isotopic masses (alternative 'average')
#' @param seqName (logical) optional (alternative) names for the content of 'x' (ie aa seq) as name (always if 'x' has no names)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This functions returns a vector with masses for all amino-acids (argument 'massTy' to switch form mono-isotopic to average mass)
#' @seealso \code{\link{massDeFormula}}, \code{\link{AAmass}}, \code{\link[wrMisc]{convToNum}}
#' @examples
#' convAASeq2mass(c("PEPTIDE","fPROTEINES"))
#' pep1 <- c(aa="AAAA", de="DEFDEF")
#' convAASeq2mass(pep1, seqN=FALSE)
#' @export
convAASeq2mass <- function(x, massTy="mono", seqName=TRUE, silent=FALSE, callFrom=NULL) {
## convert (character) aminoacid sequence vector (ie AA seq in single upper case letters) to mass with corresp modif
## 'seqName' .. to use 'x' (aa seq) as name (always if 'x' has no names)
fxNa <- wrMisc::.composeCallName(callFrom,newNa="convAASeq2mass")
AAmass1 <- AAmass(massTy=massTy ,inPept=TRUE)
mH20 <- massDeFormula("2HO", massTy=massTy)
if(length(names(x)) <1) seqName <- TRUE
chNoLet <- which(!LETTERS %in% names(AAmass1))
chNoLe2 <- nchar(x) == nchar(sapply(LETTERS[chNoLet],gsub,"",x))
chNoLe2 <- if(length(dim(chNoLe2)) >1) colSums(!chNoLe2) >0 else !chNoLe2
if(any(chNoLe2)) warning(fxNa,"Encountered/ignoring non-attributed sequence character: ",wrMisc::pasteC(LETTERS[chNoLet][which(chNoLe2)],quoteC="'")," !!")
pep1 <- lapply(strsplit(x,""), match, names(AAmass1)) # transform into indexes of AA-letters
out <- sapply(pep1, function(x) sum(AAmass1[x], mH20, na.rm=TRUE)) # basic mass (as sum of its AA)
names(out) <- if(seqName) x else names(x)
chNa <- names(out) %in% "0z"
if(any(chNa)) names(out)[which(chNa)] <- ""
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/convAASeq2mass.R
|
#' Order Columns in list of matrixes
#'
#' @description
#' This function orders columns in list of matrixes (or matrix) according to argument \code{sampNames}.
#' This function can be used to adjust/correct the order of samples after reading data using \code{readMaxQuantFile()}, \code{readPDExport()} etc.
#' The input may also be MArrayLM-type object from package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma} or from \code{\link{moderTestXgrp}} or \code{\link{moderTest2grp}}.
#'
#' @param dat (matrix, list or MArrayLM-object from limma) main input of which columns should get re-ordered, may be output from \code{\link{moderTestXgrp}} or \code{\link{moderTest2grp}}.
#' @param replNames (character) new column-names (in order as input from \code{dat}), allows renaming colnames before defining new order
#' @param sampNames (character) column-names in desired order for output (must match colnames of \code{dat} or \code{replNames}, if used)
#' @param newNames depreciated, plese use \code{replNames} instead
#' @param useListElem (character) in case \code{dat} is list, all list-elements who's columns should get (re-)ordered
#' @param annotElem (character) name of list-element of \code{dat} with annotation data to get in new order
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns an object of same class as input \code{dat} (ie matrix, list or MArrayLM-object from limma)
#' @seealso \code{\link{moderTestXgrp}} for single comparisons; \code{\link[base]{order}}
#' @examples
#' grp <- factor(rep(LETTERS[c(3,1,4)], c(2,3,3)))
#' dat1 <- matrix(1:15, ncol=5, dimnames=list(NULL,c("D","A","C","E","B")))
#' corColumnOrder(dat1, sampNames=LETTERS[1:5])
#'
#' dat1 <- list(quant=dat1,raw=dat1)
#' dat1
#' corColumnOrder(dat1, sampNames=LETTERS[1:5])
#' @export
corColumnOrder <- function(dat, replNames=NULL, sampNames, useListElem=c("quant","raw","counts"), annotElem="sampleSetup", newNames=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
## order columns in list of matrixes (or matrix) according to 'sampNames'
## This function can be used to adjust/correct the order of samples after reading data using \code{readMaxQuantFile()}, \code{readPDExport()} etc.
## dat (list or matrix) main input of which columns should get re-ordered
## sampNames (character) column-names in desired order for output
## useListElem (character) all names of list-elements where the reordering should be performed
fxNa <- wrMisc::.composeCallName(callFrom, newNa="corColumnOrder")
.corPathW <- function(x) gsub("\\\\", "/", x)
.corEnum <- function(colNa, repl=c("Samp","samp","Rep","rep","Re","re","R","r","Number","number","No","no","N","n"), sep=c("_","-")) {
## function to match to remove enumeration characters in colNa; check for 'abc_Rep123' and correct to 'abc_123'
## colNa (character)
##
out1 <- lapply(repl, function(x) {ch2 <- grep(paste0(".\\",sep[1],x,"[[:digit:]]+$"), colNa)
if(identical(ch2, 1:length(colNa))) {le <- sub(paste0("\\",sep[1],x,"[[:digit:]]+$"), paste0("\\",sep[1]), colNa); paste0(le, substr(colNa, nchar(le)+2, nchar(colNa)))}}) # nolint # nolint: line_length_linter.
chLe1 <- sapply(out1, length)
out2 <- lapply(repl, function(x) {ch2 <- grep(paste0(".\\",sep[2],x,"[[:digit:]]+$"), colNa)
if(identical(ch2, 1:length(colNa))) {le <- sub(paste0("\\",sep[2],x,"[[:digit:]]+$"), paste0("\\",sep[2]), colNa); paste0(le, substr(colNa, nchar(le)+2, nchar(colNa)))}})
chLe2 <- sapply(out2, length)
ch3 <- chLe1[which.max(chLe1)] > chLe2[which.max(chLe2)]
out <- if(ch3) {if(chLe1[which.max(chLe1)] >0) out1[[which.max(chLe1)]] else colNa} else {if(chLe2[which.max(chLe2)] >0) out1[[which.max(chLe2)]] else colNa}
out }
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
datOK <- TRUE
alreadyOK <- FALSE
newO <- NA # initialize
chSetupNa <- c("groups","level","lev", "sdrfDat", "annotBySoft")
## checks
if(length(dat) <0) { datOK <- FALSE
msg <- "'dat' is empty, nothing to do" }
if(datOK && length(names(dat)) <0) { datOK <- FALSE
msg <- "'dat' has no names, nothing to do" }
if(datOK) { ch1 <- useListElem %in% names(dat) # check for useListElem
if(any(ch1, na.rm=TRUE)) { if(any(!ch1, na.rm=TRUE)) useListElem <- useListElem[which(ch1)] # update
ch2 <- colnames(dat[[useListElem[1]]])
if(length(ch2) != ncol(dat[[useListElem[1]]])) { datOK <- FALSE; if(!silent) message(fxNa,"Checked list-element ",useListElem[1]," : has no or no differentiating colnames")}
} else { datOK <- FALSE; if(!silent) message(fxNa,"list-elements ",wrMisc::pasteC(useListElem)," not found in 'dat'")}}
if(debug) {message(fxNa,"cCO1"); cCO1 <- list(dat=dat,sampNames=sampNames,replNames=replNames,useListElem=useListElem,annotElem=annotElem,newNames=newNames,datOK=datOK) }
if(length(useListElem) >0) {
chEl <- useListElem %in% names(dat)
if(any(!chEl, na.rm=TRUE)) useListElem <- useListElem[which(chEl)]
if(length(useListElem) <1) warning(fxNa,"After cleaning 'useListElem' nothing remains !?!")
}
## main
if(datOK && length(replNames) >0 && length(replNames)==length(sampNames)) {
## replace colnames (if valid 'replNames' given) - replNames must be in same order as colnames(dat$quant) !!
if(debug) {message(fxNa,"Replace colnames cCO1b")}
if(is.list(dat)) { for(i in useListElem) colnames(dat[[i]]) <- replNames
} else if(is.matrix(dat)) {
if(ncol(dat) != length(sampNames)) warning(fxNa,"'dat' has different number of columns as length of 'sampNames' !! The function might be using the wrong ones !")
for(i in useListElem) colnames(dat[[i]])[1:length(sampNames)] <- replNames }
if(debug) { message(fxNa,"cCO1b"); cCO1b <- list() }
}
if(datOK) {
## try adjusting order of $quant, $raw and $counts
if(debug) {message(fxNa,"cCO2"); cCO2 <- list(dat=dat,sampNames=sampNames,replNames=replNames, datOK=datOK,useListElem=useListElem) }
## Adjust order of columns in $quant etc: compare sampNames & colnames of $quant
if(length(sampNames) ==ncol(dat[[useListElem[1]]])) {
## Note : comparing by $sampleSetup$groups won't work well due to repeated levels
newO <- match(sampNames, colnames(dat[[useListElem[1]]]))
if(any(is.na(newO))) { if(!silent) message(fxNa,"Colnames of 'dat$quant' differ from 'sampNames', trying to adjust .. ")
## modify colnames of $abund to remove enumerator-names
ch3 <- wrMisc::rmEnumeratorName(colnames(dat[[useListElem[1]]]), sepEnum=c(""," ","-","_"), newSep="_", incl=c("anyCase","trim1"))
if(length(ch3) ==length(sampNames)) colnames(dat[[useListElem[1]]]) <- ch3
## same treatment to sampleNames for higher chances of matching
sampNa2 <- wrMisc::rmEnumeratorName(sampNames, sepEnum=c(""," ","-","_"), newSep="_", incl=c("anyCase","trim1"))
newO <- match(sampNa2, ch3) # update
## this could be made after trimming if still not successful, see also wrMisc::matchMatrixLinesToRef()
}
if(any(is.na(newO))) { datOK <- FALSE
if(!silent) message(fxNa,"Failed : Unable to match ",sum(is.na(newO))," suggested sampNames ( ",wrMisc::pasteC(sampNames[which(is.na(newO))], quoteC="'")," )")
} else {
## finally adjust order of $quant etc based on $quant
if(debug) {message(fxNa,"cCO2b"); cCO2b <- list() }
if(identical(newO, 1:length(sampNames))) {
if(!silent) message(fxNa,"Quant/counting data already in good order ..")
alreadyOK <- TRUE
} else {
if(is.list(dat)) { for(i in wrMisc::naOmit(match(useListElem, names(dat)))) { dat[[i]] <- if(length(dim(dat[[i]])) ==2) {
if(any(dim(dat[[i]]) ==1)) matrix(dat[[i]][,newO], nrow=nrow(dat[[i]]), dimnames=dimnames(dat[[i]][,newO])) else dat[[i]][,newO]
} else { if(length(dim(dat[[i]])) ==3) array(as.numeric(dat[[i]][,newO,]), dim=c(nrow(dat[[i]]), length(newO), dim(dat[[i]])[3]),
dimnames=list(rownames(dat[[i]]), colnames(dat[[i]])[newO], dimnames(dat[[i]])[[3]])) }
if(debug) message(fxNa,"Sucessfully adjusted quantitation data to new order") }}
}
}
} else { newO <- NA
if(!silent) message(fxNa,"Failed to adjust quantitative/count data")}
}
if(debug) {message(fxNa,"cCO3"); cCO3 <- list(dat=dat,sampNames=sampNames,useListElem=useListElem,annotElem=annotElem,newO=newO,alreadyOK=alreadyOK)}
if(datOK && !alreadyOK && length(dat[[annotElem]]) >0) {
## Continue adjusting order, now in $sampleSetup
## presume $sampleSetup is in same order as $quant & $raw
ch1 <- sapply(dat[[annotElem]], function(x) if(length(dim(x)) >0) c(NA, dim(x)==length(sampNames)) else c(length(x)==length(sampNames),NA,NA))
if(length(dim(ch1)) <2) ch1 <- matrix(ch1, ncol=length(dat[[annotElem]]), dimnames=list(NULL,names(dat[[annotElem]])))
if(any(ch1[1,], na.rm=TRUE)) for(i in which(ch1[1,])) dat[[annotElem]][[i]] <- dat[[annotElem]][[i]][newO]
if(any(ch1[2,], na.rm=TRUE)) for(i in which(ch1[2,])) dat[[annotElem]][[i]] <- dat[[annotElem]][[i]][newO,]
if(any(ch1[3,], na.rm=TRUE)) for(i in which(ch1[3,])) dat[[annotElem]][[i]] <- dat[[annotElem]][[i]][,newO]
} ## end corColumnOrder
dat }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/corColumnOrder.R
|
#' Compare in-silico digested proteomes for unique and shared peptides, counts per protein or as peptides
#'
#' Compare in-silico digested proteomes for unique and shared peptides, counts per protein or as peptides.
#' The in-silico digestion may be performed separately using the package \href{https://bioconductor.org/packages/release/bioc/html/cleaver.html}{cleaver}.
#' Note: input must be list (or multiple names lists) of proteins with their respective peptides (eg by in-silico digestion).
#'
#' @param ... (list) multiple lists of (ini-silico) digested proteins (typically protein ID as names) with their respectice peptides (AA sequence), one entry for each species
#' @param prefix (character) optional (species-) prefix for entries in '...', will be only considered if '...' has no names
#' @param sep (character) concatenation symbol
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a list with $byPep as list of logical matrixes for each peptide (as line) and unique/shared/etc for each species; $byProt as list of matrixes with count data per proten (as line) for each species; $tab with simple summary-type count data
#' @seealso \code{\link{readFasta2}} and/or \code{cleave-methods} in package \href{https://bioconductor.org/packages/release/bioc/html/cleaver.html}{cleaver}
#' @examples
#' ## The example mimics a proteomics experiment where extracts form E coli and
#' ## Saccharomyces cerevisiae were mixed, thus not all peptdes may occur unique.
#' (mi2 = countNoOfCommonPeptides(Ec=list(E1=letters[1:4],E2=letters[c(3:7)],
#' E3=letters[c(4,8,13)],E4=letters[9]),Sc=list(S1=letters[c(2:3,6)],
#' S2=letters[10:13],S3=letters[c(5,6,11)],S4=letters[c(11)],S5="n")))
#' ## a .. uni E, b .. inteR, c .. inteR(+intra E), d .. intra E (no4), e .. inteR,
#' ## f .. inteR +intra E (no6), g .. uni E, h .. uni E no 8), i .. uni E,
#' ## j .. uni S (no10), k .. intra S (no11), l .. uni S (no12), m .. inteR (no13)
#' lapply(mi2$byProt,head)
#' mi2$tab
#' @export
countNoOfCommonPeptides <- function(..., prefix=c("Hs","Sc","Ec"), sep="_", silent=FALSE, debug=FALSE, callFrom=NULL) {
## compare in-silico digested proteomes for unique and shared peptides, counts per protein
## .. input must be lists of proteins wther their respective peptides
fxNa <- wrMisc::.composeCallName(callFrom,newNa="countNoOfCommonPeptides")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
inp <- list(...)
chInp <- c("prefix","sep","silent","callFrom") %in% names(inp)
if(any(chInp)) inp <- inp[which(!chInp)]
if(length(inp) <2) stop("Not sufficient input elements - nothing to do !")
chSep <- sapply(inp, function(x) length(grep(sep,names(x))) >0)
if(any(chSep)) message("Trouble ahead : Separator 'sep' also appears in sequence names !!")
## main
chN <- names(inp)
if(length(names(inp)) >0) if(all(nchar(names(inp)) >0)) prefix <- names(inp) # use preferetially names of proteins as in main input
nBySet <- lapply(inp,function(x) sapply(x,length))
nBySet2 <- sapply(nBySet,sum)
seqs <- unlist(inp,recursive=TRUE,use.names=FALSE)
.countFra <- function(x) paste(1:x,sep,rep(x,x),sep="")
.firstOfRep <- function(x) duplicated(x,fromLast=TRUE) & !duplicated(x,fromLast=FALSE) # find first of replicated (mark as T)
names(seqs) <- paste(rep(prefix,nBySet2),sep,unlist(lapply(unlist(nBySet,use.names=FALSE),.countFra)),sep, rep(unlist(lapply(inp,names)),unlist(nBySet)),sep="")
seqAnn <- cbind(species=rep(prefix,nBySet2), pepNo=unlist(lapply(unlist(nBySet), function(x) cbind(1:x))),
pepTot=unlist(lapply(unlist(nBySet), function(x) cbind(rep(x,x)))), protID=rep(unlist(lapply(inp,names)),unlist(nBySet)))
## make matrix with prot names & number if pep ? (no need for strsplit later - alternative to concatenate as names in seqs)
staSto <- cumsum(nBySet2)
staSto <- cbind(sta=c(1,staSto[-length(staSto)] +1), stop=staSto)
out <- list(byPep=list(), byProt=list(), tab=list())
matColNa <- paste(rep(c("uni","shared","red"),2), rep(c("IntrA","InteR"),each=3),sep="")
matColNa <- c("uni0","shared0","red0", "unique","sharedInteR","sharedIntrA","redInteR","redIntraA")
for(i in 1:length(inp)) {
mat <- matrix(FALSE, nrow=nBySet2[i], ncol=length(matColNa), dimnames=list(unlist(inp[[i]],use.names=FALSE), matColNa))
frL <- duplicated(seqs[staSto[i,1]:staSto[i,2]],fromLast=TRUE)
frB <- duplicated(seqs[staSto[i,1]:staSto[i,2]],fromLast=FALSE)
mat[,1:3] <- matrix(c(!frL & !frB, frL & !frB, frB),ncol=3)
## now compose unique, shared (as 1st occurance of non-unique) and redundant (further repeated sequences) etc
com <- unique(seqs[staSto[i,1]:staSto[i,2]])
chSh <- com %in% unique(seqs[-1*(staSto[i,1]:staSto[i,2])]) # check for common with any other spec
if(any(chSh)) {
com <- seqs[staSto[i,1]:staSto[i,2]] %in% com[which(chSh)] # index of common (among cur spec)
uniqCom <- unique(seqs[staSto[i,1]:staSto[i,2]][which(com)])
fir <- .firstOfRep(c(seqs[staSto[i,1]:staSto[i,2]][which(com)],uniqCom))[1:sum(com)] # find 1st occur in subgroup of common (needed to re-inject unique common to make sure 1st always shows up)
mat[which(mat[,1] & !com),4] <- TRUE # unique : unique intra and NOT in common
mat[which(com)[which(fir)],5] <- TRUE # shared inter : first inst of common
mat[which(com)[which(!fir)],7] <- TRUE # redun inter : not first inst of common
fir <- .firstOfRep(seqs[staSto[i,1]:staSto[i,2]][which(!com)]) # find 1st occur in subgroup of non-common
mat[which(!com)[which(fir)],6] <- TRUE # shared intra : first inst of non-common (+ later remove unique)
mat[which(!com)[which(!fir)],8] <- TRUE # redun intra : not first inst of non-common (+later remove unique)
mat[which(mat[,4]),7:8] <- FALSE # unique can't be redundant (or shared)
}
out$byPep[[i]] <- mat
names(out$byPep)[i] <- prefix[i]
## exploit by prot
tmp <- matrix(unlist(by(mat, rep(names(inp[[i]]), nBySet[[i]]), function(x) colSums(as.matrix(x),na.rm=TRUE))),
ncol=ncol(mat), byrow=TRUE, dimnames=list(names(inp[[i]]), colnames(mat)))
out[["byProt"]][[i]] <- cbind(nPep=nBySet[[i]], matrix(unlist(by(mat,rep(names(inp[[i]]), nBySet[[i]]), function(x) colSums(as.matrix(x),na.rm=TRUE))),
ncol=ncol(mat), byrow=TRUE, dimnames=list(names(inp[[i]]), colnames(mat))) ) # cut in list of matrixes
names(out[["byProt"]])[[i]] <- prefix[i]
## summarize
supl <- c(
n1pep=sum(nBySet[[i]]==1), n2pep=sum(nBySet[[i]] ==2), n3pep=sum(nBySet[[i]]==3), n4fpep=sum(nBySet[[i]] >3), # n at 1 pep, 2 pep, 3 pep, n at >3 pep
n1pepSpeIntra=sum(nBySet[[i]]==1 & out[["byProt"]][[i]][,"uni0"]==1), # n at 1 pep which is unique.intra
n1pepSpeInter=sum(nBySet[[i]]==1 & out[["byProt"]][[i]][,"unique"]==1), # n at 1 pep which is unique, #1 spec pep,
n2pep2SpeIntra=sum(nBySet[[i]] ==2 & out[["byProt"]][[i]][,"uni0"] ==2), # n = 2pep with 2 spec intra
n2pep2SpeInter=sum(nBySet[[i]] ==2 & out[["byProt"]][[i]][,"unique"] ==2), # n = 2pep with 2 spec iner
inf2SpePepIntra=sum(out[["byProt"]][[i]][,"uni0"] <2), inf2SpePep=sum(out[["byProt"]][[i]][,"unique"] <2), # <2 spec pep at intra-species, <2 spec pep
min2pep1Intra=sum(nBySet[[i]] >1 & out[["byProt"]][[i]][,"uni0"] >0), # min 2 pep & min 1 specif
min2pep1Inter=sum(nBySet[[i]] >1 & out[["byProt"]][[i]][,"unique"] >0), # min 2 pep & min 1 specif
min2speIntra=sum(out[["byProt"]][[i]][,"uni0"] >1), # (min 2 pep &) min 2 specif intra
min2speIner=sum(out[["byProt"]][[i]][,"unique"] >1), # (min 2 pep &) min 2 specif inter
nLost2pepSpec=sum(out[["byProt"]][[i]][,"uni0"] >1 & out[["byProt"]][[i]][,"unique"] <2) ) # min 2 pep as single species but when mult species combined less than 2 pep
out$tab <- if(length(out$tab) <1) as.matrix(c(nProt=length(inp[[i]]),nTotPep=nBySet2[[i]], nPepSpec=sum(mat[,"uni0"]) +sum(mat[,"shared0"]),
colSums(mat),supl)) else cbind(out$tab,c(nProt=length(inp[[i]]), nTotPep=nBySet2[[i]], nPepSpec=sum(mat[,"uni0"]) +sum(mat[,"shared0"]), colSums(mat),supl))
colnames(out$tab)[ncol(out$tab)] <- prefix[i]
}
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/countNoOfCommonPeptides.R
|
#' Export Sample Meta-data from Quantification-Software as Sdrf-draft
#'
#' Sample/experimental annotation meta-data form \href{https://www.maxquant.org/}{MaxQuant} that was previously import can now be formatted in sdrf-style and exported
#' using this function to write a draft-sdrf-file. Please note that this information will not _complete_ in respect to all information used in data-bases like Pride.
#' Sdrf-files provide additional meta-information about samles and MS-runs in a standardized format, they may also be part of submissions to \href{https://www.ebi.ac.uk/pride/}{Pride}.
#'
#'
#' @details
#' Gathering as much as possible information about samples and MS-runs requires that the additional files created from software, like MaxQuant using \code{\link{readMaxQuantFile}},
#' is present and was imported when calling the import-function (eg using the argument _suplAnnotFile=TRUE_).
#' Please note that this functionality was designed for the case where no (external) sdrf-file is available.
#' Thus, when data was imported including exteranl sdrf (uinsg the _sdrf=_ argument), exporting incomplete annotation-data from MaxQuant-produced files does not make any sense and therefore won't be possible.
#'
#' After exporting the draft sdrf the user is advised to check and complete the information in the resulting file.
#' Unfortunately, not all information present in a standard sdrf-file (like on \href{https://www.ebi.ac.uk/pride/}{Pride}) cannot be gathered automatically,
#' but key columns are already present and thus may facilitate completing.
#' Please note, that the file-format has been defined as \code{.tsv}, thus columns/fields should be separated by tabs.
#' At manual editing and completion, some editing- or tabulator-software may change the file-extesion to \code{.tsv.txt},
#' in this case the final files should be renamed as \code{.tsv} to remain compatible with Pride.
#'
#' At this point only the import of data from MaxQuant via \code{\link{readMaxQuantFile}} has been developed to extract information for creating a draft-sdrf.
#' Other data/file-import functions may be further developed to gather as much as possible equivalent information in the future.
#'
#' @param lst (list) object created by import-function (MaxQuant)
#' @param fileName (character) file-name (and path) to be used when exprting
#' @param correctFileExtension (logical) if \code{TRUE} the fileName will get a \code{.tsv}-extension if not already present
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function writes an Sdrf draft to file
#'
#' @seealso This function may be used after reading/importig data by \code{\link{readMaxQuantFile}} in absence of sdrf
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNaMQ <- "proteinGroups.txt.gz"
#' dataMQ <- readMaxQuantFile(path1, file=fiNaMQ, refLi="mainSpe", sdrf=FALSE, suplAnnotFile=TRUE)
#' ## Here we'll write simply in the current temporary directory of this R-session
#' exportSdrfDraft(dataMQ, file.path(tempdir(),"testSdrf.tsv"))
#'
#' @export
exportSdrfDraft <- function(lst, fileName="sdrfDraft.tsv", correctFileExtension=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## export sdrf-like
## Note : lst$sampleSetup$sdrfExport is created only if import-function was run with sdrf=FALSE !!
fxNa <- wrMisc::.composeCallName(callFrom, newNa="exportSdrfDraft")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
datOK <- TRUE
msg <- "Invalid entry - nothing to do. Argument 'lst' should be object like output of import-function from wrProteo"
if(!is.list(lst) || length(lst) <1) { datOK <- FALSE
if(!silent) message(fxNa, msg) }
if(datOK) {
expSdrf <- lst$sampleSetup$sdrfExport
datDim <- dim(lst$quant)
datOK <- length(expSdrf) >0 && isTRUE(datDim[2] >0)}
msg <- "Nothing to do; invalid entry or no data for exporting available !!"
if(!datOK) { if(!silent) message(fxNa, msg)
} else {
## prepare for writing file
fileName <- if(length(fileName) <1) "sdrfDraft.tsv" else fileName[1]
if(is.na(fileName)) fileName <- "sdrfDraft.tsv"
plCompl <- "please complete"
na <- "not available"
organisms <- table(lst$annot[which(lst$annot[,"Contam"] != "TRUE"), "Species"])
organisms <- names(sort(organisms, decreasing=TRUE))
organisms <- matrix(rep(organisms, each=datDim[2]), nrow=datDim[2], dimnames=list(NULL, rep("char_organism", length(organisms))))
modMatr <- c(if(length(lst$sampleSetup$summaryD$Variable.modifications) > 0) lst$sampleSetup$summaryD$Variable.modifications[1],
if(length(lst$sampleSetup$summaryD$Fixed.modifications) >0) lst$sampleSetup$summaryD$Fixed.modifications[1] )
if(length(wrMisc::naOmit(modMatr)) >0) {
modMatr <- unlist(lapply(modMatr, function(x) strsplit(as.character(x), ";") ))
modMatr <- wrMisc::naOmit(unique(modMatr))
modMatr <- matrix(rep(modMatr, each=datDim[2]), nrow=datDim[2], dimnames=list(NULL, rep("com_modification_parameters", length(modMatr))))
} else modMatr <- NULL
expo <- cbind(source.name=lst$sampleSetup$summaryD$ref, organisms, char_organism_part=na, char_disease=na, char_cell_type=na,
char_mass=plCompl, assay.name=paste0("run",1:(datDim[2])), char_spiked_compound=plCompl,
com_label=expSdrf["label"], com_instrument=plCompl, modMatr)
expo <- cbind( expo, com_precursor_mass_tolerance=expSdrf["precMassTol"], com_fragment_mass_tolerance=expSdrf["fragMassTol"],
com_technical_replicate=plCompl, com_fraction_identifyer=plCompl,
com_file_uri="not available", com_data_file=lst$sampleSetup$summaryD[,1] , material_type=plCompl )
## chat_ for characteristics[..]; com_ for comment[..]
colnames(expo) <- gsub("_"," ", sub("com_","comment\\[", sub("char_","characteristics\\[", colnames(expo))))
colnames(expo)[grep("\\[", colnames(expo))] <- paste0(colnames(expo)[grep("\\[", colnames(expo))],"]") # complete brackets
## main write/export
chWr <- try(utils::write.table(expo, fileName, quote=FALSE, sep='\t', col.names=TRUE, row.names=FALSE))
if(inherits(chWr, "try-error")) { if(!silent) message(fxNa,"FAILED to write file (check if rights to write)")
} else if(!silent) message(fxNa,"Successfully exported sdrf-draft to file '",fileName,"'")
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/exportSdrfDraft.R
|
#' Extract species annotation
#'
#' \code{extrSpeciesAnnot} identifies species-related annotation (as suffix to identifyers) for data comnining multiple species and returns alternative (short) names.
#' This function also suppresses extra heading or tailing space or punctuation characters.
#' In case multiple tags are found, the last tag is reported and a message of alert may be displayed.
#'
#' @param annot (character) vector with initial annotation
#' @param spec (character) the tags to be identified
#' @param shortNa (character) the final abbreviation used, order and lengt must fit to argument \code{annot}
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a character vector with single (last of multiple) term if found in argument \code{annot}
#' @seealso \code{\link[base]{grep}}
#' @examples
#' spec <- c("keratin_CONT","AB_HUMAN","CD_YEAST","EF_G_HUMAN","HI_HUMAN_ECOLI","_YEAST_012")
#' extrSpeciesAnnot(spec)
#' @export
extrSpeciesAnnot <- function(annot,spec=c("_CONT","_HUMAN","_YEAST","_ECOLI"),shortNa=c("cont","H","S","E"), silent=FALSE, debug=FALSE, callFrom=NULL){
## extract species information for element of 'annot'
## return character vector with single (last of) term if found in 'annot'
## 'annot' .. character vector
## 'spec' .. (character) term to search
## 'shortextrSpeciesAnnotNa' .. (character) term to code output
fxNa <- wrMisc::.composeCallName(callFrom, newNa="extrSpeciesAnnot")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
msg <- "Argument 'shortNa' doesn't fit to length of 'spec'"
if(length(shortNa) < length(spec) && length(shortNa) >0) {
if(!silent) message(fxNa,msg," ignoring")
shortNa <- NULL }
if(is.null(shortNa)) {
shortNa <- sub("[[:punct:]]+[[:blank:]]+[[:punct:]]+|^[[:blank:]]+[[:punct:]]+[[:blank:]]+","",spec)
trim <- substr(shortNa,1,1)
if(length(unique(trim)) < length(spec)) {
shortNa <- substr(shortNa,1,2)
} else shortNa <- trim
if(!silent) message(fxNa,"Constructing 'shortNa'.. replace by 1st alphanum-character : ",shortNa) }
## main
tmp <- list()
out <- rep(NA, length(annot))
for(i in 1:length(spec)) {tmp[[i]] <- grep(spec[i], annot)
out[tmp[[i]]] <- shortNa[i] }
che <- table(table(unlist(tmp)))
if(any(as.numeric(names(che)) >1) && !silent) message(fxNa,"Multiple/conflicting annotation in ",sum(che[which(as.numeric(names(che)) != 1)])," cases")
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/extrSpeciesAnnot.R
|
#' Extract Results From Moderated t-tests
#'
#' This function allows convenient access to results produced using the functions \code{\link{moderTest2grp}} or \code{moderTestXgrp}.
#' The user can define the threshold which type of multiple testing correction should be used
#' (as long as the multiple testing correction method was actually performed as part of testing).
#'
#' @param stat ('MArrayLM'-object or list) Designed for the output from \code{moderTest2grp} or \code{moderTestXgrp}
#' @param compNo (integer) the comparison number/index to be used
#' @param statTy (character) the multiple-testing correction type to be considered when looking for significant changes with threshold \code{thrsh} (depends on which have been run initially with \code{moderTest2grp} or \code{moderTestXgrp})
#' @param thrsh (numeric) the threshold to be applied on \code{statTy} for the result of the statistcal testing (after multiple testing correction)
#' @param FCthrs (numeric) Fold-Change threshold given as Fold-change and NOT log2(FC), default at 1.5 (for filtering at M-value =0.585)
#' @param annotCol (character) column-names from the annotation to be included
#' @param nSign (integer) number of significant digits whe returning results
#' @param addTy (character) additional groups to add (so far only "allMeans" available) in addition to the means used in the pairwise comparison
#' @param filename (character) optional (path and) file-name for exporting results to csv-file
#' @param fileTy (character) file-type to be used with argument \code{filename}, may be 'csvEur' or 'csvUS'
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a limma-type MA-object (which can be handeled like a list)
#' @seealso \code{\link{moderTest2grp}} for single comparisons, \code{\link{moderTestXgrp}} for multiple comparisons, \code{\link[limma]{lmFit}} and the \code{eBayes}-family of functions in package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}
#' @examples
#' grp <- factor(rep(LETTERS[c(3,1,4)],c(2,3,3)))
#' set.seed(2017); t8 <- matrix(round(rnorm(208*8,10,0.4),2), ncol=8,
#' dimnames=list(paste(letters[],rep(1:8,each=26),sep=""), paste(grp,c(1:2,1:3,1:3),sep="")))
#' t8[3:6,1:2] <- t8[3:6,1:2] +3 # augment lines 3:6 (c-f)
#' t8[5:8,c(1:2,6:8)] <- t8[5:8,c(1:2,6:8)] -1.5 # lower lines
#' t8[6:7,3:5] <- t8[6:7,3:5] +2.2 # augment lines
#' ## expect to find C/A in c,d,g, (h)
#' ## expect to find C/D in c,d,e,f
#' ## expect to find A/D in f,g,(h)
#' library(wrMisc) # for testing we'll use this package
#' test8 <- moderTestXgrp(t8, grp)
#' extractTestingResults(test8)
#' @export
extractTestingResults <- function(stat, compNo=1, statTy="BH",thrsh=0.05, FCthrs=1.5, annotCol=c("Accession","EntryName","GeneName"),
nSign=6, addTy=c("allMeans"), filename=NULL, fileTy="csvUS", silent=FALSE, debug=FALSE, callFrom=NULL) {
##
fxNa <- wrMisc::.composeCallName(callFrom, newNa="extractTestingResults")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
argNa <- deparse(substitute(stat))
if(!isTRUE(silent)) silent <- FALSE
if(!"list" %in% mode(stat) || length(stat) <1) stop("'stat' must be a list or 'MArrayLM'-object from limma")
if(!("MArrayLM" %in% class(stat)) & !silent) message(fxNa," Caution, '",argNa,"' is not 'MArrayLM'-object as expected")
if(length(statTy) <1) { statTy <- "BH"
if(!silent) message(fxNa," argument 'statTy' empty, setting to default 'BH'")}
useFdrTy <- if(identical(statTy,"BH") & "FDR" %in% names(stat)) "FDR" else statTy # maybe not needed to force to FDR
chLstEl <- c(useFdrTy,"annot") %in% names(stat)
if(!chLstEl[1]) stop("Cannot find list-element '",useFdrTy,"' in 'stat'")
if(!chLstEl[2]) {
if(!silent && length(annotCol) >0) message(fxNa,"Cannot find list-element 'annot' in ",argNa)
annotCol <- NULL }
if(length(compNo) != 1) stop("'compNo' must be numeric and of length=1")
if(compNo > ncol(stat[[useFdrTy]])) { compNo <- 1
message(fxNa," Invalid entry of 'compNo', setting to defaul compNo=1")}
if(is.na(FCthrs) || !is.numeric(FCthrs)) FCthrs <- NULL
if(length(FCthrs) >0 && is.numeric(FCthrs)) FCthrs <- log2(FCthrs) else FCthrs <- NULL
groupSep <- "-" # used to separate comparison groups
## main extracting
## redo sample-pair assoc
avCol <- wrMisc::sampNoDeMArrayLM(stat, compNo, lstP=useFdrTy) # ultimately switch to function in wrMisc
## filtering ? normally already taken care of during testing
## need FC values
logFC <- stat$means[,avCol[2]] - stat$means[,avCol[1]]
fcOk <- if(length(FCthrs) ==1) abs(logFC) > FCthrs else rep(TRUE, length(logFC))
chNa <- is.na(fcOk)
if(any(chNa)) fcOk[which(chNa)] <- FALSE
## check for Fdr results
fdrOk <- stat[[useFdrTy]][,compNo] < thrsh
chNa <- is.na(fdrOk)
if(any(chNa)) fdrOk[which(chNa)] <- FALSE
if(any(fcOk & fdrOk)) {
extrLi <- if(TRUE) which(fcOk & fdrOk) else which(fcOk | fdrOk)
useCompNo <- c(compNo,(1:ncol(stat[[useFdrTy]]))[-compNo])
## some results .. continue
if(any(c("all","allFDR") %in% addTy) & length(avCol) >2) {
out <- stat[[useFdrTy]][extrLi,]
colnames(out) <- paste0(useFdrTy,".",colnames(out))
out <- if(nrow(out) >1) out[,useCompNo] else matrix(out[,useCompNo], nrow=1, dimnames=list(names(extrLi),colnames(out)[useCompNo])) # place comparison of interest first
} else out <- data.frame(FDR=stat[[useFdrTy]][extrLi,compNo])
## prepare FC
if(any(c("all","allFC") %in% addTy) && length(avCol) >2) {
outX <- sapply(useCompNo, function(x) wrMisc::sampNoDeMArrayLM(stat, x, lstP=useFdrTy))
out2 <- (stat$means[,outX[2,]] - stat$means[,avCol[1,]])[extrLi,useCompNo]
colnames(out2) <- if(length(useCompNo) >1) paste0("logFC.",apply(outX,2, function(x) paste(colnames(stat$means)[x],collapse="-"))) else "logFC"
} else {out2 <- as.matrix(logFC[extrLi]); colnames(out2) <- paste0("logFC.",colnames(stat[[useFdrTy]])[compNo]) }
## prepare means
ch1 <- any(c("all","allMeans") %in% addTy) || length(avCol) <3
out3 <- if(ch1) stat$means[extrLi,] else stat$means[extrLi,avCol]
if(length(extrLi)==1) out3 <- matrix(out3, nrow=1, dimnames=list(names(extrLi), if(ch1) colnames(stat$means) else colnames(stat$means)[avCol] ))
if(length(dim(out3)) >1) colnames(out3) <- paste0("av.",colnames(out3))
out <- signif(cbind(out, out2, out3), nSign)
if(length(annotCol) >0) out <- cbind(stat$annot[extrLi,wrMisc::naOmit(match(annotCol,colnames(stat$annot)))], out) # add annotation
## optional writing to file
if(length(filename)==1) {
digits <- min(nSign, 12)
tmp <- if(identical(fileTy,"csvEur")) {
try(utils::write.csv2(as.matrix(format(out, digits=digits)), filename, row.names=FALSE,quote=FALSE),silent=silent)
} else try(utils::write.csv(as.matrix(format(out, digits=digits)), filename, row.names=FALSE,quote=FALSE),silent=silent)
if(inherits(tmp, "try-error")) message(fxNa," Note: Did not manage to write results to file '",filename,"', check for rights to write ...") else {
if(!silent) message(fxNa," Wrote results successfully to file '",filename,"'")}
}
out
} else {if(!silent) message(fxNa,"No results pass thresholds"); return(NULL)} }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/extractTestingResults.R
|
#' Add arrow for expected Fold-Change to VolcanoPlot or MA-plot
#'
#' NOTE : This function is deprecated, please use \code{\link[wrGraph]{foldChangeArrow}} instead !!
#' This function was made for adding an arrow indicating a fold-change to MA- or Volcano-plots.
#' When comparing mutiple concentratios of standards in benchmark-tests it may be useful to indicate the expected ratio in a pair-wise comparison.
#' In case of main input as list or MArrayLM-object (as generated from limma), the colum-names of multiple pairwise comparisons can be used
#' for extracting a numeric content (supposed as concentrations in sample-names) which will be used to determine the expected ratio used for plotting.
#' Optionally the ratio used for plotting can be returned as numeric value.
#'
#' @param FC (numeric, list or MArrayLM-object) main information for drawing arrow : either numeric value for fold-change/log2-ratio of object to search for colnames of statistical testing for extracting numeric part
#' @param useComp (integer) only used in case FC is list or MArrayLM-object an has multiple pairwise-comparisons
#' @param isLin (logical) inidicate if \code{FC} is log2 or not
#' @param asX (logical) indicate if arrow should be on x-axis
#' @param col (integer or character) custom color
#' @param arr (numeric, length=2) start- and end-points of arrow (as relative to entire plot)
#' @param lwd (numeric) line-width of arrow
#' @param addText (logical or named vector) indicate if text explaining arrow should be displayed, use \code{TRUE} for default (on top right of plot),
#' or any combination of 'loc','line','cex','side','adj','col','text' (or 'txt') for customizing specific elements
#' @param returnRatio (logical) return ratio
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return plots arrow only (and explicative text), if \code{returnRatio=TRUE} also returns numeric value for extracted ratio
#'
#' @details The argument \code{addText} also allows specifying a fixed position when using \code{addText=c(loc="bottomleft")}, also bottomright, topleft, topright, toleft and toright may be used.
#' In this case the elemts \code{side} and \code{adjust} will be redefined to accomodate the text in the corner specified.
#'
#' Ultimately this function will be integated to the package wrGraph.
#'
#' @seealso new version : \code{\link[wrGraph]{foldChangeArrow}}; used with \code{\link[wrGraph]{MAplotW}}, \code{\link[wrGraph]{VolcanoPlotW}}
#' @examples
#' plot(rnorm(20,1.5,0.1),1:20)
#' #deprecated# foldChangeArrow2(FC=1.5)
#'
#' @export
foldChangeArrow2 <- function(FC, useComp=1, isLin=TRUE, asX=TRUE, col=1, arr=c(0.005,0.15), lwd=NULL,
addText=c(line=-0.9,cex=0.7,txt="expected",loc="toright"), returnRatio=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL){
##
.Deprecated("Please use the foldChangeArrow() function form the package wrGraph instead !")
fxNa <- wrMisc::.composeCallName(callFrom, newNa="foldChangeArrow2")
if(!isTRUE(silent)) silent <- FALSE
figCo <- graphics::par("usr") # c(x1, x2, y1, y2)
if(length(FC) >1 && any(c("MArrayLM","list") %in% class(FC))) {
## try working based on MArrayLM-object or list
## look for names of pairwise comparisons to extract numeric parts for calculating expected ratio
chNa <- names(FC) %in% c("t","BH","FDR","p.value")
if(any(chNa)) {
if(all(length(useComp)==1, length(dim(FC[[which(chNa)[1]]])) ==2, dim(FC[[which(chNa)[1]]]) > 0:1)) {
colNa <- colnames(FC[[which(chNa)[1]]])
ch2 <- colNa[1]=="(Intercept)" && length(colNa)==2
} else ch2 <- TRUE
if(!ch2) {
regStr <-"[[:space:]]*[[:alpha:]]+[[:punct:]]*[[:alpha:]]*"
colNa <- sub(paste0("^",regStr),"", sub(paste0(regStr,"$"), "", unlist(strsplit(colNa[useComp], "-"))) )
chN2 <- try(as.numeric(colNa), silent=TRUE)
if(!"try-error" %in% class(chN2) & length(colNa)==2) {
FC <- chN2[2] / chN2[1]
} else ch2 <- TRUE
## note: wrMisc::numPairDeColNames() sorts numeric values, can't use
chN2 <- all(length(colNa)==2, nchar(sub("[[:digit:]]*\\.?[[:digit:]]*","",colNa)) <1) # contains only usable digits
isLin <- FALSE # assume log2 when from testing result
} else ch2 <- TRUE
} else ch2 <- TRUE
if(ch2) FC <- NULL
}
FC <- try(as.numeric(FC), silent=TRUE)
if(!"try-error" %in% class(FC)) {
if(!isLin) FC <- log2(FC)
if(any(identical(addText, TRUE), c("line","cex","side","adj","col","text","txt","loc") %in% names(addText))) {
cat(" .. FC",FC," addText:",addText,"\n")
## bottomleft, bottomright, topleft, topright, toleft and toright
mLi <- if("line" %in% names(addText)) try(as.numeric(addText["line"][1]),silent=TRUE) else -0.9
mCex <- if("cex" %in% names(addText)) try(as.numeric(addText["cex"][1]),silent=TRUE) else 0.7
mSide <- if("side" %in% names(addText)) try(as.integer(addText["side"][1]),silent=TRUE) else 1
mAdj <- if("adj" %in% names(addText)) try(as.integer(addText["adj"][1]),silent=TRUE) else 1
mCol <- if("col" %in% names(addText)) addText["col"][1] else col
mTxt <- if("text" %in% names(addText)) addText["text"][1] else {if("txt" %in% names(addText)) addText["txt"][1] else "arrow: expected="}
if("loc" %in% names(addText)) {
mTxt <- paste(mTxt, signif(if(isLin) 2^FC else FC,3))
## check for left/right/center
chRi <- grep("right$",as.character(addText["loc"]))
chLe <- grep("left$",as.character(addText["loc"]))
chCe <- grep("center$",as.character(addText["loc"]))
if(length(chLe) >0) { mAdj <- 0; mTxt <- paste0(" ",mTxt) # this is left.xxx
if(arr[1] < 0.15 && FC < figCo[1] +diff(figCo[1:2])/3) arr[1] <- 0.015 # raise min starting hight to avoid crossing text
} else { if(length(chRi) >0) {mAdj <- 1; mTxt <- paste0(mTxt," ") # this is right.xxx
if(arr[1] < 0.15 && FC > figCo[2] -diff(figCo[1:2])/3) arr[1] <- 0.015 # raise min starting hight to avoid crossing text
} else {
if(length(chCe) >0) mAdj <- 0.5; if(arr[1] < 0.15) arr[1] <- 0.015 }}
## check for top/bottom
chTop <- grep("^top",as.character(addText["loc"]))
chBot <- grep("^bottom",as.character(addText["loc"]))
if(length(chTop) >0) mSide <- 3 else if(length(chBot) >0) mSide <- 1
chTo <- c(grep("tori",as.character(addText["loc"])), grep("tole",as.character(addText["loc"])))
if(length(chTo) >0) graphics::mtext(mTxt, at=FC, side=mSide, adj=0, col=mCol, cex=mCex, line=mLi) else {
graphics::mtext(mTxt, side=mSide, adj=mAdj, col=mCol, cex=mCex, line=mLi)
} }
}
chArr <- (arr[2] -arr[1]) > 0.05
if(!chArr) arr[2] <- arr[1] +0.04
## draw arrow
if(!isFALSE(asX)) graphics::arrows(FC, figCo[3] + arr[1]*(figCo[4]-figCo[3]), FC, figCo[3] + arr[2]*(figCo[4]-figCo[3]),
col=col,lwd=lwd,length=0.1) else { graphics::arrows(figCo[3] + arr[1]*(figCo[4]-figCo[3]), FC,
figCo[3] + arr[2]*(figCo[4]-figCo[3]), FC, col=col, lwd=lwd,length=0.1) }
if(isTRUE(returnRatio)) return(FC)
} else if(!silent) message("Unable to extract usable values for drawing arrow")
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/foldChangeArrow2.R
|
#' Combine Multiple Proteomics Data-Sets
#'
#' This function allows combining up to 3 separate data-sets previously imported using wrProteo.
#'
#' @details
#' Some quantification software way give some identifyers multiple times, ie as multiple lines (eg for different modifictions or charge states, etc).
#' In this case this function tries first to summarize all lines with identical identifyers (using the function \code{\link[wrMisc]{combineRedundLinesInList}}
#' which used by default the median value).
#' Thus, it is very important to know your data and to understand when lines that appear with the same identifyers should/may be fused/summarized without
#' doing damage to the later biological interpretation ! The user may specify for each dataset the colum out of the protein/peptide-annotation to use
#' via the argument \code{columnNa}.
#' Then, this content will be matched as identical match, so when combining data from different software special care shoud be taken !
#'
#' Please note, that (at this point) the data from different series/objects will be joined as they are, ie without any additional normalization.
#' It is up to the user to inspect the resulting data and to decide if and which type of normalization may be suitable !
#'
#' Please do NOT try combining protein and peptide quntification data.
#'
#' @param x (list) First Proteomics data-set
#' @param y (list) Second Proteomics data-set
#' @param z (list) optional third Proteomics data-set
#' @param columnNa (character) column names from annotation
#' @param NA.rm (logical) remove \code{NA}s
#' @param listNa (character) names of key list-elemnts from \code{x} to be treated; the first one is used as pattern for the format of quantitation data,
#' , the last one for the annotation data
#' @param all (logical) union of intersect or merge should be performed between x, y and z
#' @param textModif (character) Additional modifications to the identifiers from argument \code{columnNa};
#' so far intregrated: \code{rmPrecAA} for removing preceeding caps letters (amino-acids, eg [KR].AGVIFPVGR.[ML] => AGVIFPVGR)
#' or \code{rmTerminalDigit} for removing terminal digits (charge-states)
#' @param shortNa (character) for appending to output-colnames
#' @param retProtLst (logical) return list-object similar to input, otherwise a matrix of fused/aligned quantitation data
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with the same number of list-elements as \code{$x}, ie typically this contains :
#' \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations,
#' \code{$annot}, optionally \code{$counts} an array with number of peptides, \code{$quantNotes} or \code{$notes}
#' @seealso \code{\link[stats]{sd}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
#' MCproFi1 <- "tinyMC.RData"
#' dataMC <- readMassChroQFile(path1, file=MCproFi1, plotGraph=FALSE)
#' dataFused <- fuseProteomicsProjects(dataMQ, dataMC)
#' dim(dataMQ$quant)
#' dim(dataMC$quant)
#' dim(dataFused$quant)
#' @export
fuseProteomicsProjects <- function(x, y, z=NULL, columnNa="Accession", NA.rm=TRUE, listNa=c(quant="quant",annot="annot"), all=FALSE, textModif=NULL, shortNa=NULL, retProtLst=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
##
##
## 'listNa' names of list-elements containing quantitation data (1st position) and protein/line annotation (2nd position)
## 'columnNa' column names from annotation (equiv to argument 'by' in merge() )
## 'shortNa' for appending to output-colnames
## 'retProtLst' return list-object similar to input, otherwise a matrix of fused/aligned quantitation data
## set 'textModif' (character) to 'rmTerminalDigit' for treating peptides from DiaNN
## note : no normalization by default
fxNa <- wrMisc::.composeCallName(callFrom, newNa="fuseProteomicsProjects")
#namesXY <- c(deparse(substitute(x)), deparse(substitute(y)), if(length(z) >0) deparse(substitute(z)))
namesXY <- c(deparse(substitute(x)), deparse(substitute(y)), deparse(substitute(z)))
out <- NULL
if(length(x) <1) stop("Argument 'x' seems empty !")
if(length(y) <1) stop("Argument 'y' seems empty !")
if(debug) message(fxNa," fPL0")
## check argument 'shortNa'
if(length(shortNa) <1) shortNa <- wrMisc::trimRedundText(namesXY)
chNA <- is.na(shortNa)
if(any(chNA)) shortNa[which(chNA)] <- ""
## check argument 'columnNa'
if(length(columnNa) <1) stop("Missing 'columnNa' (should designate colnames of x$listNa[2], etc)")
if(length(columnNa)==1) columnNa <- rep(columnNa, 2 +(length(z) >0))
datOK <- columnNa[1] %in% colnames(x[[listNa[length(listNa)]]])
if(!datOK) stop("missing column '",columnNa[1],"' in ",namesXY[1])
ch2 <- columnNa[2] %in% colnames(y[[listNa[length(listNa)]]])
if(!ch2) stop("missing column '",columnNa[2],"' in ",namesXY[2])
if(length(z) >0) {
ch1 <- columnNa[3] %in% colnames(z[[listNa[length(listNa)]]])
if(!ch1) stop("missing column '",columnNa[3],"' in ",namesXY[3])
}
annN <- list(x=x[[listNa[length(listNa)]]][,which(colnames(x[[listNa[length(listNa)]]])==columnNa[1])[1]],
y=y[[listNa[length(listNa)]]][,which(colnames(y[[listNa[length(listNa)]]])==columnNa[2])[1]],
z=if(length(z) >0) z[[listNa[length(listNa)]]][,which(colnames(z[[listNa[length(listNa)]]])==columnNa[3])[1]] else NULL)
## check IDs (all identical or all NA)
badID <- lapply(annN, function(w) if(length(w) >0) {all(is.na(w)) || sum(duplicated(w)) == length(w) -1} else NULL)
if(debug) {message(fxNa," fPL0b ++"); fPL0b <- list()}
if(any(unlist(badID[1:2]))) stop(fxNa," The annotation given for ",wrMisc::pasteC(namesXY[which(unlist(badID))], quoteC="'")," with column '",columnNa,"' is all NA or all redundant")
if("rmPrecAA" %in% textModif) { ## remove preceeding and following AAs eg '[KR].AGVIFPVGR.[ML]' => 'AGVIFPVGR'
annN <- lapply(annN, function(x) sub("^\\[[[:upper:]]+\\]\\.","", sub("\\.\\[[[:upper:]]+\\]$","", x)) ) }
if("rmTerminalDigit" %in% textModif) { ## remove terminal digit (eg from charge-state)
annN <- lapply(annN, function(x) sub("[[:digit:]]+$","", x) ) }
if(debug) {message(fxNa," fPL1 ++"); fPL1 <- list(x=x,y=y,z=z,columnNa=columnNa,listNa=listNa,shortNa=shortNa,annN=annN,textModif=textModif,all=all)} #xAnn=xAnn,xQua=xQua,yAnn=yAnn,yQua=yQua,
## combine redundant IDs ?
chUni <- lapply(annN, duplicated)
##
if(datOK) {
## IDs : combine from data-sets
rmRed <- list(x=x, y=y, z=z)
chLe <- sapply(rmRed, length) >0
### CHECK THIS WHEN FINISHED modifying combineRedundLinesInList() !!
allID3 <- lapply(rmRed, function(w) wrMisc::naOmit(w[[listNa[length(listNa)]]][,columnNa]))
if(isTRUE(all)) allID <- unique(unlist(allID3)) else {
k <- which(sapply(allID3, length) >0)
allID <- if(length(k) >1) intersect(allID3[[k[1]]], allID3[[k[2]]]) else NULL
if(length(k) >2) for(i in k[-1:-2]) allID <- intersect(allID, allID3[[k[i]]])
}
if(debug) {message(fxNa," fPL1b ++"); fPL1b <- list()} #
if(length(allID) <1) { datOK <- FALSE
if(debug) message(fxNa,"Found NO COMMON IDs ! Nothing to do ..") }
}
if(datOK) {
if(debug) {message(fxNa," fPL2"); fPL2 <- list(all=all,rmRed=rmRed,allID=allID,x=x,y=y,z=z,columnNa=columnNa,listNa=listNa,shortNa=shortNa,annN=annN,textModif=textModif,namesXY=namesXY)} #xAnn=xAnn,xQua=xQua,yAnn=yAnn
## extract column names of quantitation data (to fuse)
colNaAnn <- lapply(rmRed, function(w) colnames(w[[listNa[length(listNa)]]]))
colNa <- lapply(rmRed, function(w) colnames(w[[listNa[1]]]))
colInd <- cumsum(sapply(colNa, length))
colInd <- cbind(beg=c(1, 1 +colInd[-length(colNa)]), end=colInd)
## fuse quantitative data (add columns)
useLst <- which(sapply(rmRed, length) >0) # which datasets contain data for fusing
if(debug) {message(fxNa," fPL2c"); fPL2c <- list()}
j <- 1
for(i in useLst) {
if(j==1) { out <- rmRed[[i]]
dim2 <- lapply(out, dim)
arr3dim <- sapply(dim2, function(w) if(length(w) ==3) all(w[1:2]==dim(rmRed[[i]][[listNa[1]]])) else FALSE ) # all elements with 3 dims & same nrow & ncol as listNa[1]
mat2dim <- sapply(dim2, function(w) if(length(w) ==2) all(w[1:2]==dim(rmRed[[i]][[listNa[1]]])) else FALSE ) # all elements with 2 dims & same nrow & ncol as listNa[1]
chAnn <- names(mat2dim) %in% listNa[length(listNa)]
if(any(chAnn, na.rm=TRUE)) mat2dim[which(chAnn)] <- FALSE
if(debug) {message(fxNa,"Matrix-type elements found for fusing : ",wrMisc::pasteC(names(out)[which(mat2dim)], quoteC="'"))
message(fxNa,"3-dim arrays elements found for fusing : ",wrMisc::pasteC(names(out)[which(arr3dim)], quoteC="'")) }
if(debug) {message(fxNa," fPL2d i=",i," j=",j); fPL2d <- list(all=all,rmRed=rmRed,allID=allID,x=x,y=y,z=z,out=out,columnNa=columnNa,listNa=listNa,shortNa=shortNa,annN=annN,textModif=textModif,namesXY=namesXY,useLst=useLst,colInd=colInd,colNa=colNa,arr3dim=arr3dim)
}
## fuse quantitative data (add columns)
if(sum(arr3dim) >0) for(k in which(arr3dim)) out[[k]] <- array(dim=c(length(allID), sum(sapply(colNa, length)), dim(rmRed[[i]][[k]])[3]),
dimnames=list(allID, paste0(rep(namesXY,sapply(colNa, length)),".",unlist(colNa)), dimnames(rmRed[[i]][[k]])[[3]]))
if("notes" %in% names(out)) out$notes <- matrix(out$notes, ncol=1, dimnames=list(names(out$notes),namesXY[i]))
if("sampleSetup" %in% names(out)) out$sampleSetup <- list(out$sampleSetup$groups) # very minimal fusion ... ($groups ONLY !!)
if("quantNotes" %in% names(out)) out$quantNotes <- list(out$quantNotes) }
## prepare for fusing quantitative data (columns)
if(j==1) { tmp <- matrix(NA, nrow=length(allID), ncol=sum(sapply(colNa, length)), dimnames=list(allID, paste0(rep(namesXY,sapply(colNa, length)),".",unlist(colNa)))) ## initialize
if(sum(mat2dim) >1) for(k in which(mat2dim)) out[[k]] <- tmp
if(sum(arr3dim) >0) {
for(k in which(arr3dim)) tmp <- array(NA, dim=c(length(allID), sum(sapply(colNa, length)), arr3dim[[k]]),
dimnames=list(allID, paste0(rep(namesXY,sapply(colNa, length)),".",unlist(colNa)), dimnames(dim2[[k]])[[3]])) ## initialize
}
}
## assign
inLi <- wrMisc::naOmit(match(allID, rmRed[[i]][[listNa[length(listNa)]]][,columnNa[i]])) # where current fits to allID
if(debug) message(fxNa,"i=",i," extract ",length(inLi)," common out of current ",nrow(rmRed[[i]][[listNa[length(listNa)]]]),"")
for(k in which(mat2dim)) out[[k]][which(allID %in% rmRed[[i]][[listNa[length(listNa)]]][,columnNa[i]]), colInd[i,1]:colInd[i,2]] <- rmRed[[i]][[k]][ inLi, ]
if(j==1) out[[listNa[length(listNa)]]] <- out[[listNa[length(listNa)]]][inLi, ] # annotation
if(debug) {message(fxNa," fPL2e i=",i," j=",j); fPL2e <- list()}
if(sum(arr3dim) >0) for(k in which(arr3dim)) if(j >1 && names(out)[k] %in% names(rmRed[[i]])) { # possibly also check if name of 3rd dim is consistent ..
if(length(dim(rmRed[[i]][[k]])) ==3) out[[k]][which(allID %in% rmRed[[i]][[listNa[length(listNa)]]][,columnNa[i]]), colInd[i,1]:colInd[i,2],] <- rmRed[[i]][[k]][ inLi,, ] }
## also try fusing annotation data ?
## fuse other (notes, sampleSetup, quantNotes)
if(debug) message(fxNa," fPL3a ")
if(j >1 && "notes" %in% names(rmRed[[i]])) out$notes <- cbind(out$notes, unlist(rmRed[[i]]$notes)[match(rownames(out$notes), names(rmRed[[i]]$notes))])
if(j >1 && "sampleSetup" %in% names(rmRed[[i]])) out$sampleSetup[[j]] <- rmRed[[i]]$sampleSetup$groups
if(j >1 && "quantNotes" %in% names(rmRed[[i]])) out$quantNotes[[j]] <- rmRed[[i]]$quantNotes
j <- j +1
}
}
out
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/fuseProteomicsProjects.R
|
#' Accession-Numbers And Names Of UPS-1 Proteins
#'
#' \href{https://www.sigmaaldrich.com/FR/en/product/sigma/ups1}{UPS-1} and UPS-2 are mix of 48 of human proteins frequently used
#' as standard in spike-in experiments. They are comercially available from Sigma-Aldrich.
#' This function allows accessing their accession-numbers and associated Names on \href{https://www.uniprot.org/}{UniProt}
#'
#' @return This function returns data.frame with accession-numbers as stated by the supplier (\code{$acFull}),
#' trimmed accession-numbers, ie without version numbers (\code{$ac})
#' and associated (\code{UniProt}) names on \href{https://www.uniprot.org/}{UniProt} as well as the species designation for the collection of 48 human UPS-1 proteins.
#' @examples
#' head(getUPS1acc())
#' @export
getUPS1acc <- function() {
## The accession numbers for the UPS1 proteins
UPS1 <- data.frame( ac=rep(NA,48),
acFull=c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165",
"P00709", "P06732", "P12081", "P61626", "Q15843", "P02753", "P16083", "P63279",
"P01008", "P61769", "P55957", "O76070", "P08263", "P01344", "P01127", "P10599",
"P99999", "P06396", "P09211", "P01112", "P01579", "P02787", "O00762", "P51965",
"P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375"),
uniProt=c("CAH1", "CAH2", "CO5", "HBA", "HBB", "LEP", "ALBU", "UBIQ", "CATA",
"CYB5", "EGF", "MYG", "NQO1", "PPIA", "PRDX1", "SUMO1", "LALBA", "KCRM",
"SYHC", "LYSC", "NEDD8", "RETBP", "NQO2", "UBC9", "ANT3", "B2MG", "BID",
"SYUG", "GSTA1", "IGF2", "PDGFB", "THIO", "CYC", "GELS", "GSTP1", "RASH",
"IFNG", "TRFE", "UBE2C", "UB2E1", "ANXA5", "CRP", "FABPH", "IL8", "TRFL",
"TAU", "SODC", "TNFA"),
species=rep("Homo sapiens", 48),
name=NA)
UPS1$ac <- sub("\\-[[:digit:]]+","", UPS1$acFull)
UPS1 }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/getUPS1acc.R
|
#' Isolate NA-neighbours
#'
#' This functions extracts all replicate-values where at least one of the replicates is \code{NA} and sorts by number of \code{NA}s per group.
#' A list with all \code{NA}-neighbours organized by the number of \code{NA}s gets returned.
#'
#' @param mat (matrix or data.frame) main data (may contain \code{NA})
#' @param gr (character or factor) grouping of columns of 'mat', replicate association
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with NA-neighbours sorted by number of NAs in replicate group
#' @seealso This function gets used by \code{\link{matrixNAneighbourImpute}} and \code{\link{testRobustToNAimputation}}; estimation of mode \code{\link[wrMisc]{stableMode}}; detection of NAs \code{\link[stats]{na.fail}}
#' @examples
#' mat1 <- c(22.2, 22.5, 22.2, 22.2, 21.5, 22.0, 22.1, 21.7, 21.5, 22, 22.2, 22.7,
#' NA, NA, NA, NA, NA, NA, NA, 21.2, NA, NA, NA, NA,
#' NA, 22.6, 23.2, 23.2, 22.4, 22.8, 22.8, NA, 23.3, 23.2, NA, 23.7,
#' NA, 23.0, 23.1, 23.0, 23.2, 23.2, NA, 23.3, NA, NA, 23.3, 23.8)
#' mat1 <- matrix(mat1, ncol=12, byrow=TRUE)
#' gr4 <- gl(3, 4)
#' isolNAneighb(mat1, gr4)
#' @export
isolNAneighb <- function(mat, gr, silent=FALSE, debug=FALSE, callFrom=NULL) {
## isolate NA-neighbours
fxNa <- wrMisc::.composeCallName(callFrom, newNa="isolNAneighb")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
msg <- NULL
datOK <- TRUE
NAneig <- NULL
if(any(length(mat) <1, length(dim(mat)) !=2, dim(mat) < c(2,1))) { datOK <- FALSE
msg <- "'mat' should be matrix or data.frame with min 2 rows & 1 column, nothing to do, return NULL"}
if(datOK && length(gr) !=ncol(mat)) { datOK <- FALSE
msg <- "Length of 'gr' must match number of columns in 'mat', nothing to do, return NULL" }
if(datOK && length(unique(gr)) ==length(gr)) { datOK <- FALSE
msg <- "No replicates, can't isolate NA-neighbours" }
if(!datOK && !silent) message(fxNa,msg)
if(debug) {message(fxNa," datOK: ",datOK," iNN1"); iNN1 <- list(mat=mat, gr=gr, datOK=datOK)}
if(datOK) {
## basic (optimized) extraction of NA-neighbours
maxHi <- max(tapply(gr, gr, length)) -1 # max number of NA-neighbours (exclude group/line with all NA)
NAneig <- lapply(1:maxHi, function(x) NULL) # initialize output
names(NAneig) <- paste0("n", 1:maxHi)
## need first to separate by groups of replicates
matR <- lapply(unique(gr), function(x) {mat[, which(gr ==x)]}) # split by groups of replicates
## now separate NA-neighbours for each group/line
nNA <- as.integer(sapply(matR, function(x) rowSums(is.na(x))))
naNei <- wrMisc::partUnlist(lapply(matR, apply, 1, function(x) {chN <- is.na(x); if(sum(chN) ==0 || sum(chN)==length(x)) NULL else x[which(!chN)]}), silent=silent, debug=debug, callFrom=fxNa)
chLe <- sapply(naNei, length)
## combine according to number of NA-values in group/line
if(any(chLe >0)) {for(i in 1:maxHi) {ch1 <- which(nNA==i); if(length(ch1) >0) NAneig[[i]] <- unlist(naNei[ch1])}}
} else NAneig <- NULL
NAneig }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/isolNAneighb.R
|
#' Molecular mass from chemical formula
#'
#' Calculate molecular mass based on atomic composition
#'
#' @param comp (character) atomic compostion
#' @param massTy (character) 'mono' or 'average'
#' @param rmEmpty (logical) suppress empty entries
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a numeric vector with mass
#' @seealso \code{\link[wrMisc]{convToNum}}
#' @examples
#' massDeFormula(c("12H12O","HO"," 2H 1 Se, 6C 2N","HSeCN"," ","e"))
#' @export
massDeFormula <- function(comp, massTy="mono", rmEmpty=FALSE, silent=FALSE, callFrom=NULL){
## calculate molecular mass based on composition formula (sum formula: number & element)
## 'comp' .. character vector with molecular composition(s)
##
fxNa <- wrMisc::.composeCallName(callFrom,newNa="massDeFormula")
if(length(wrMisc::naOmit(comp)) < length(comp)){
if(!silent) message(fxNa,sum(is.na(comp))," entries of 'comp' are NA (remove) !")
comp <- comp[wrMisc::naOmit(match(wrMisc::naOmit(comp), comp))] }
## rm NAsclean heading space,
comp <- gsub("^ +","", gsub(" +$","",comp))
if(rmEmpty) {
if(any(comp=="")) {
if(!silent) message(fxNa,sum(comp=="")," some entries of 'comp' are empty (remove) !")
comp <- comp[which(comp !="")] }
}
msg <- "Can't find any element names (must start with caps,'e' or 'z')'"
chEm <- nchar(comp) <1
if(any(chEm)) comp[which(chEm)] <- "z"
El <- up <- gregexpr("[[:upper:]]|e|z",comp)
chMaj <- sapply(up, function(x) any(x <1))
if(any(chMaj)) stop(msg," in ",comp[which(chMaj)])
ay <- gregexpr("[[:lower:]]", comp)
for(i in which(sapply(ay, function(x) any(x >0)))) { # correct if upper followed by lower caps
tmp <- which(El[[i]] %in% (ay[[i]]-1))
El[[i]][tmp] <- El[[i]][tmp] +1 }
form <- list()
for(i in 1:length(El)) {
begStr <- c(1,El[[i]][-length(El[[i]])] +1) # beginning of string with number & element
y <- substring(comp[[i]], begStr,El[[i]]) # isolated series until capital/lower letter
sig <- grep("-",y)
num <- gsub("[[:alpha:]]|[[:blank:]]|[[:punct:]]","",y)
num[which(num=="")] <- "1"
num <- as.numeric(num)
if(length(sig) >0) num[sig] <- -1*num[sig]
if(length(sig) <length(y)) num[-1*sig] <- paste("+",num[-1*sig],sep="") # add '+'
form[[i]] <- matrix(c(num,substring(comp[[i]],up[[i]],El[[i]])), ncol=2, dimnames=list(NULL,c("n","elem"))) }
names(form) <- comp
## convert extracted/cleaned sum formula in mass :
atMa <- .atomicMasses()[,massTy=massTy]
tmp <- lapply(form,function(x) x[,2] %in% names(atMa))
chEl <- sapply(tmp,sum)
usePep <- 1:length(comp)
if(any(chEl < sapply(form,nrow))) {
if(all(chEl <1)) stop(" Can't identify any of the names given via .atomicMasses()")
nonIdEl <- unlist(sapply(form,function(x) x[which(is.na(match(x[,2],names(atMa)))),2]))
if(any(chEl <1) & !silent) message(fxNa, " can't find ",wrMisc::pasteC(sub("z"," ",nonIdEl),quote="'")," .. setting to 0 mass")
corEl <- which(chEl < sapply(form,nrow))
form[corEl] <- lapply(form[corEl], function(x) {x[which(x[,2] %in% nonIdEl),1] <- "0"; x})
}
mass <- sapply(form,function(x) sum(as.numeric(x[,1])*atMa[match(x[,2],names(atMa))]))
names(mass) <- sapply(form,function(x) paste(paste0(x[,1],x[,2]),collapse=""))
chNa <- names(mass) =="0z"
if(any(chNa)) {y <- which(chNa); mass[y] <- 0; names(mass)[y] <- ""}
mass }
#' Molecular mass for Elements
#'
#' This fuction returns the molecular mass based of main elements found in biology/proteomics as average and mono-isotopic mass.
#' The result includes H, C, N, O, P, S, Se and the electrone.
#' The values are bsed on http://www.ionsource.com/Card/Mass/mass.htm in ref to http://physics.nist.gov/Comp (as of 2019).
#'
#' @return This function returns a numeric matrix with mass values
#' @seealso \code{\link{massDeFormula}}
#' @examples
#' .atomicMasses()
#' @export
.atomicMasses <- function() {
## return matrix of atomic masses : 1st col for average mass and 2nd col for mono-isotopic
## based on http://www.ionsource.com/Card/Mass/mass.htm in ref to http://physics.nist.gov/Comp (~agree in http://www.weddslist.com/ms/tables.html)
mass <- cbind(aver=c(1.007940, 12.010700, 14.006700, 15.999400, 30.973761, 32.065000, 78.960000, 5.48579909e-4,0),
mono=c(1.0078250321, 12, 14.0030740052, 15.9949146221, 30.97376151, 31.97207069, 79.9165196, 5.48579909e-4,0))
rownames(mass) <- c("H","C","N","O","P","S","Se","e","ze")
mass }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/massDeFormula.R
|
#' Histogram of content of NAs in matrix
#'
#' \code{matrixNAinspect} makes histograms of the full data and shows sub-population of \code{NA}-neighbour values.
#' The aim of this function is to investigate the nature of \code{NA} values in matrix (of experimental measures) where replicate measurements are available.
#' If a given element was measured twice, and one of these measurements revealed a \code{NA} while the other one gave a (finite) numeric value, the non-NA-value is considered a \code{NA}-neighbour.
#' The subpopulation of these \code{NA}-neighbour values will then be highlighted in the resulting histogram.
#' In a number of experimental settiongs some actual measurements may not meet an arbitrary defined baseline (as 'zero') or may be too low to be distinguishable from noise that
#' associated measures were initially recorded as \code{NA}. In several types of measurments in proteomics and transcriptomics this may happen.
#' So this fucntion allows to collect all \code{NA}-neighbour values and compare them to the global distribution of the data to investigate if \code{NA}-neighbours are typically very low values.
#' In case of data with multiple replicates \code{NA}-neighbour values may be distinguished for the case of 2 \code{NA} per group/replicate-set.
#' The resulting plots are typically used to decide if and how \code{NA} values may get replaced by imputed random values or wether measues containing \code{NA}-values should rather me omitted.
#' Of course, such decisions do have a strong impact on further steps of data-analysis and should be performed with care.
#'
#' @param dat (matrix or data.frame) main numeric data
#' @param gr (charcter or factor) grouping of columns of dat indicating who is a replicate of whom (ie the length of 'gr' must be equivalent to the number of columns in 'dat')
#' @param retnNA (logical) report number of NAs in graphic
#' @param xLab (character) custom x-label
#' @param tit (character) custom title
#' @param xLim (numerical,length=2) custom x-axis limits
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function produces a graphic (to the current graphical device)
#' @seealso \code{\link[graphics]{hist}}, \code{\link[stats]{na.fail}}, \code{\link[wrMisc]{naOmit}}
#' @examples
#' set.seed(2013)
#' datT6 <- matrix(round(rnorm(300)+3,1), ncol=6,
#' dimnames=list(paste("li",1:50,sep=""), letters[19:24]))
#' datT6 <- datT6 +matrix(rep(1:nrow(datT6),ncol(datT6)), ncol=ncol(datT6))
#' datT6[6:7,c(1,3,6)] <- NA
#' datT6[which(datT6 < 11 & datT6 > 10.5)] <- NA
#' datT6[which(datT6 < 6 & datT6 > 5)] <- NA
#' datT6[which(datT6 < 4.6 & datT6 > 4)] <- NA
#' matrixNAinspect(datT6, gr=gl(2,3))
#' @export
matrixNAinspect <- function(dat, gr=NULL, retnNA=TRUE, xLab=NULL, tit=NULL, xLim=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- wrMisc::.composeCallName(callFrom, newNa="matrixNAinspect")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
## extract if object
if(is.list(dat)) {
if("sampleSetup" %in% names(dat) & length(gr) <1) {
gr <- dat$sampleSetup$lev
if(length(gr) >1 && length(dat$sampleSetup$col) <2) names(gr) <- dat$sampleSetup$meta[,dat$sampleSetup$col] # in case names are not provided
}
if(!silent) message(fxNa,"Trying to extract quantitation data to use as 'dat' out of list ..")
dat <- dat$quant }
if(any(length(dim(dat)) !=2, dim(dat) < 2, na.rm=TRUE)) stop("Invalid argument 'dat'; must be matrix or data.frame with min 2 lines and 2 cols")
if(is.data.frame(dat)) dat <- as.matrix(dat)
chGr <- FALSE
if(length(gr) != ncol(dat)) stop("Number of columns in 'dat' and number of (group-)elements in 'gr' do not match !")
if(length(gr)==length(unique(gr))) { hasNaNeigh <- FALSE
if(!silent) message(fxNa,"NOTE : The argument 'gr' does not designate any replicates, can't determine NA-neighbours !")
} else hasNaNeigh <- TRUE
if(!is.factor(gr)) gr <- as.factor(gr)
if(is.null(xLab)) xLab <- "(log2) Abundance"
chRColB <- requireNamespace("RColorBrewer", quietly=TRUE)
if(!chRColB) message(fxNa,"More/better colors may be displayed with package 'RColorBrewer' installed; consider installing it !")
quaCol <- if(chRColB) RColorBrewer::brewer.pal(4,"Set1")[c(3,2,4)] else c(3:4,2)
if(is.null(tit)) tit <- "Distribution of values and NA-neighbours"
cexMain <- if(nchar(tit) < 25) 1.4 else 1.1
## main
NAneig <- NAneig2 <- numeric()
isNA <- is.na(dat)
chNA <- any(isNA)
nNAmat <- matrix(0, nrow=nrow(dat), ncol=length(levels(gr)), dimnames=list(NULL,levels(gr)))
colPanel <- c(grDevices::grey(0.6), grDevices::rgb(0,0.7,0,0.6), grDevices::rgb(0.15,0.15,0.7,0.7))
if(debug) {message(fxNa,"Ffound ",sum(isNA,na.rm=TRUE)," NAs (out of ",prod(dim(dat))," values); chNA=",chNA," mMNi0");
mMNi0 <- list(dat=dat,gr=gr,isNA=isNA,chNA=chNA,nNAmat=nNAmat,hasNaNeigh=hasNaNeigh)}
if(chNA & hasNaNeigh) {
## extract NA-neighbours
for(i in c(1:length(levels(gr)))) {
curCol <- which(gr==levels(gr)[i])
nNAmat[,i] <- if(length(curCol) >1) rowSums(isNA[,curCol]) else (isNA[,curCol])
maxCol <- length(curCol)
useLi <- which(nNAmat[,i] >0 & nNAmat[,i] < maxCol) # 1 or 2 NAs (but not all)
useL2 <- which(nNAmat[,i] >1 & nNAmat[,i] < maxCol) # just 2 NAs (but not all)
if(length(useLi) >0) NAneig <- c(NAneig, wrMisc::naOmit(as.numeric(dat[useLi,curCol])))
if(length(useL2) >0) NAneig2 <- c(NAneig2, wrMisc::naOmit(as.numeric(dat[useL2,curCol])))
}
n <- c(sum(!is.na(dat)), length(NAneig), length(NAneig2))
perc <- c("",paste(" (",round(100*n[2:3]/n[1],1),"%)"))
if(debug) {message(fxNa,"mMNi1 n=",wrMisc::pasteC(n))}
hi1 <- graphics::hist(dat, breaks="FD", plot=FALSE)
if(is.null(xLim)) graphics::plot(hi1, border=grDevices::grey(0.85), col=grDevices::grey(0.92), xlab=xLab, las=1, main=tit, cex.main=cexMain) else {
graphics::plot(hi1, border=grDevices::grey(0.85), col=grDevices::grey(0.92), xlab=xLab, las=1, main=tit, xlim=xLim,cex.main=cexMain)}
graphics::abline(v=stats::quantile(dat,c(0.05,0.1,0.15),na.rm=TRUE), col=c(quaCol[-1],"tomato3"), lty=2)
graphics::mtext(paste(c(" (bar) all data",paste(" (box) ",c("any","min 2")," NA-neighbour values"))," n=",n,perc), col=colPanel[1:3],cex=0.65,adj=0,line=c(0.6,-0.1,-0.7),side=3)
graphics::mtext(paste(" - -",c(5,10,15),"%-quantile (all data)"), col=c(quaCol[-1],"tomato3"), cex=0.6, adj=0, line=c(-1.5,-2.1,-2.7), side=3)
if(length(NAneig) >10) { # display mode
yLim <- signif(graphics::par("usr")[3:4], 3) # current y-limits
mod <- signif(wrMisc::stableMode(if(length(NAneig2) >300) NAneig2 else NAneig, method="density"), 3)
graphics::mtext(paste(" (arrow) mode of",if(length(NAneig2) >300) "2-"," NA-neighbours :",signif(mod,3)), col="sienna2", cex=0.7, adj=0, line=-3.4, side=3)
graphics::arrows(mod, yLim[1]+(yLim[2]-yLim[1])*0.4, mod, yLim[1]+(yLim[2]-yLim[1])/4, length=0.1, col="sienna2", lwd=2)
}
graphics::hist(NAneig, breaks=hi1$breaks, border=grDevices::grey(0.75), col=grDevices::rgb(0.1,1,0.1,0.15), add=TRUE); # in green
graphics::hist(NAneig2, breaks=hi1$breaks, border=grDevices::grey(0.75), col=grDevices::rgb(0,0,0.7,0.2), add=TRUE); # in purple
} else {
graphics::hist(dat, breaks="FD", border=grDevices::grey(0.85), col=grDevices::grey(0.92), xlab=xLab, las=1,main=tit,cex.main=cexMain)
graphics::mtext(paste(" (bar) all data n=",length(dat)), col=colPanel[1], cex=0.7, adj=0, line=0.6, side=3)
graphics::abline(v=stats::quantile(dat,c(0.05,0.1,0.15),na.rm=TRUE), col=c(quaCol[-1],"tomato3"), lty=2)
graphics::mtext(paste(" - -",c(5,10,15),"%-quantile (all data)"), col=c(quaCol[-1],"tomato3"), cex=0.6, adj=0, line=c(-1.5,-2.1,-2.7), side=3)}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/matrixNAinspect.R
|
#' Imputation of NA-values based on non-NA replicates
#'
#' It is assumed that \code{NA}-values appear in data when quantitation values are very low (as this appears eg in quantitative shotgun proteomics).
#' Here, the concept of (technical) replicates is used to investigate what kind of values appear in the other replicates next to NA-values for the same line/protein.
#' Groups of replicate samples are defined via argument \code{gr} which descibes the columns of \code{dat}).
#' Then, they are inspected for each line to gather NA-neighbour values (ie those values where NAs and regular measures are observed the same time).
#' Eg, let's consider a line contains a set of 4 replicates for a given group. Now, if 2 of them are \code{NA}-values, the remaining 2 non-\code{NA}-values will be considered as NA-neighbours.
#' Ultimately, the aim is to replaces all \code{NA}-values based on values from a normal distribution ressembling theire respective NA-neighbours.
#'
#' By default a histogram gets plotted showing the initial, imputed and final distribution to check the global hypothesis that \code{NA}-values arose
#' from very low measurements and to appreciate the impact of the imputed values to the overall final distribution.
#'
#' @details
#' There are a number of experimental settings where low measurements may be reported as \code{NA}.
#' Sometimes an arbitrary defined baseline (as 'zero') may provoke those values found below being unfortunately reported as \code{NA} or as 0 (in case of MaxQuant).
#' In quantitative proteomics (DDA-mode) the presence of numerous high-abundance peptides will lead to the fact that a number of less
#' intense MS-peaks don't get identified properly and will then be reported as \code{NA} in the respective samples,
#' while the same peptides may by correctly identified and quantified in other (replicate) samples.
#' So, if a given protein/peptide gets properly quantified in some replicate samples but reported as \code{NA} in other replicate samples
#' one may thus speculate that similar values like in the successful quantifications may have occored.
#' Thus, imputation of \code{NA}-values may be done on the basis of \code{NA}-neighbours.
#'
#'
#'
#' When extracting \code{NA}-neighbours, a slightly more focussed approach gets checked, too, the 2-\code{NA}-neighbours : In case a set of replicates for a given protein
#' contains at least 2 non-\code{NA}-values (instead of just one) it will be considered as a (min) 2-\code{NA}-neighbour as well as regular \code{NA}-neighbour.
#' If >300 of these (min) 2-\code{NA}-neighbours get found, they will be used instead of the regular \code{NA}-neighbours.
#' For creating a collection of normal random values one may use directly the mode of the \code{NA}-neighbours (or 2-\code{NA}-neighbours, if >300 such values available).
#' To do so, the first value of argument \code{avSd} must be set to \code{NA}. Otherwise, the first value \code{avSd} will be used as quantile of all data to define the mean
#' for the imputed data (ie as \code{quantile(dat, avSd[1], na.rm=TRUE)}). The sd for generating normal random values will be taken from the sd of all \code{NA}-neighbours (or 2-\code{NA}-neighbours)
#' multiplied by the second value in argument \code{avSd} (or \code{avSd}, if >300 2-\code{NA}-neighbours), since the sd of the \code{NA}-neighbours is usually quite high.
#' In extremely rare cases it may happen that no \code{NA}-neighbours are found (ie if \code{NA}s occur, all replicates are \code{NA}).
#' Then, this function replaces \code{NA}-values based on the normal random values obtained as dscribed above.
#'
#' @param dat (matrix or data.frame) main data (may contain \code{NA})
#' @param gr (character or factor) grouping of columns of 'dat', replicate association
#' @param imputMethod (character) choose the imputation method (may be 'mode2'(default), 'mode1', 'datQuant', 'modeAdopt' or 'informed')
#' @param retnNA (logical) decide (if =\code{TRUE}) only NA-substuted data should be returned, or if list with $data, $nNA, $NAneighbour and $randParam should be returned
#' @param avSd (numerical,length=2) population characteristics 'high' (mean and sd) for >1 \code{NA}-neighbours (per line)
#' @param avSdH depreciated, please use \code{avSd} inestad; (numerical,length=2) population characteristics 'high' (mean and sd) for >1 \code{NA}-neighbours (per line)
#' @param NAneigLst (list) option for repeated rounds of imputations: list of \code{NA}-neighbour values can be furnished for slightly faster processing
#' @param plotHist (character or logical) decide if supplemental figure with histogram shoud be drawn, the details 'Hist','quant' (display quantile of originak data), 'mode' (display mode of original data) can be chosen explicitely
#' @param xLab (character) label on x-axis on plot
#' @param xLim (numeric, length=2) custom x-axis limits
#' @param yLab (character) label on y-axis on plot
#' @param yLim (numeric, length=2) custom y-axis limits
#' @param tit (character) title on plot
#' @param figImputDetail (logical) display details about data (number of NAs) and imputation in graph (min number of NA-neighbours per protein and group, quantile to model, mean and sd of imputed)
#' @param seedNo (integer) seed-value for normal random values
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) supplemental messages for debugging
#' @return This function returns a list with \code{$data} .. matrix of data where \code{NA} are replaced by imputed values, \code{$nNA} .. number of \code{NA} by group, \code{$randParam} .. parameters used for making random data
#' @seealso this function gets used by \code{\link{testRobustToNAimputation}}; estimation of mode \code{\link[wrMisc]{stableMode}}; detection of NAs \code{\link[stats]{na.fail}}
#' @examples
#' set.seed(2013)
#' datT6 <- matrix(round(rnorm(300)+3,1), ncol=6, dimnames=list(paste("li",1:50,sep=""),
#' letters[19:24]))
#' datT6 <- datT6 +matrix(rep(1:nrow(datT6), ncol(datT6)), ncol=ncol(datT6))
#' datT6[6:7, c(1,3,6)] <- NA
#' datT6[which(datT6 < 11 & datT6 > 10.5)] <- NA
#' datT6[which(datT6 < 6 & datT6 > 5)] <- NA
#' datT6[which(datT6 < 4.6 & datT6 > 4)] <- NA
#' datT6b <- matrixNAneighbourImpute(datT6, gr=gl(2,3))
#' head(datT6b$data)
#' @export
matrixNAneighbourImpute <- function(dat, gr, imputMethod="mode2", retnNA=TRUE, avSd=c(0.15,0.5), avSdH=NULL, NAneigLst=NULL,
plotHist=c("hist","mode"), xLab=NULL, xLim=NULL, yLab=NULL, yLim=NULL, tit=NULL, figImputDetail=TRUE,
seedNo=NULL, silent=FALSE, callFrom=NULL, debug=FALSE){
## replace NA values based on group neigbours (based on grouping of columns in gr), overall assumption of close to Gaussian distrib
## return matrix including imputed values or list of final & matrix with number of imputed by group
## 'batch-mode' (iterated runs) furnish NAneigLst (with $nNaNei, $charAll, $all.lm or $linMod, $NAneighbour), avSd (postition 3+ for medMode)
fxNa <- wrMisc::.composeCallName(callFrom, newNa="matrixNAneighbourImpute")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE
datOK <- TRUE
tx1 <- "; invalid entry - nothing to do"
if(datOK) { if(length(unique(gr))==length(gr)) { datOK <- FALSE
msg <- "Argument 'gr' does not designate any replicates ! .. ie NA-neighbours can't be determined - nothing to do"} }
if(length(dat) <1) { datOK <- FALSE
msg <- c("'dat' must be list, S3-object, matrix or data.frame with min 2 rows and min 2 cols",tx1)}
if(datOK) if(is.list(dat)) {
if("quant" %in% names(dat)) {dat <- dat$quant
if(debug) message(fxNa,"'dat' is list, using element 'quant' as 'dat'")
} else { datOK <- FALSE
msg <- c("'dat' is list but does NOT contain element named 'quant'",tx1)}
} else { if(any(dim(dat) < c(2,1), na.rm=TRUE)) { datOK <- FALSE
msg <- c("'dat' matrix or data.frame with min 2 rows and min 2 cols",tx1)}}
if(datOK) { if(length(gr) != ncol(dat)) { datOK <- FALSE
msg <- c("Number of columns of 'dat' must match length of 'gr'",tx1)}}
if(datOK) {
if(is.data.frame(dat)) dat <- as.matrix(dat)
if(!is.factor(gr)) gr <- as.factor(gr)
if(is.null(xLab)) xLab <- "Values"
if(length(wrMisc::naOmit(imputMethod)) <1) { imputMethod <- "mode2"
if(!silent) message(fxNa,"Invalid entry for 'imputMethod' setting to default")}
chMeth <- imputMethod %in% c("datQuant", "medmode", "mode1", "mode2", "mode3", "modeadopt","informed","none")
if(!chMeth) {
if(!silent) message(fxNa,"Unknown method '",imputMethod,"' as entry for 'imputMethod', setting to default")
imputMethod <- "mode2"}
## extract elements for 'batch-mode'
if(debug) message(fxNa,"Extract elements for 'batch-mode' mn0")
if(is.list(NAneigLst) && length(NAneigLst) >1) {
## gather outside/previous information
nNaNei <- NAneigLst$nNaNei; charAll <- NAneigLst$charAll;
medMod <- NAneigLst$medMod
all.lm <- if("all.lm" %in% names(NAneigLst)) NAneigLst$all.lm else NAneigLst$linMod
NAneighbour <- NAneigLst$NAneighbour
if(debug) { message(fxNa," mn0a"); mn0a <- list(dat=dat,gr=gr,imputMethod=imputMethod,NAneigLst=NAneigLst,NAneighbour=NAneighbour,medMod=medMod)}
} else { NAneighbour <- NAneigLst <- nNaNei <- medMod <- all.lm <- charAll <- NULL } # initialize
if(length(seedNo) >1) { seedNo <- seedNo[1]
if(!silent) message(fxNa,"Invalid entry for argument 'seedNo', it may be single integer or NULL, setting to NULL")}
if(length(seedNo) >0) if(any(is.na(seedNo), na.rm=TRUE)) seedNo <- NULL
if(is.logical(plotHist)) { plotHist <- if(identical(TRUE, plotHist)) c("hist","quant","mode") else NULL}
## main
isNA <- is.na(dat)
chNA <- any(isNA)
if(debug) { message(fxNa,"Starting main, mn1"); mn1 <- list(dat=dat,gr=gr,isNA=isNA,chNA=chNA,imputMethod=imputMethod,NAneigLst=NAneigLst,NAneighbour=NAneighbour)}
if(length(avSdH) >1 && identical(c(0.15,0.5), avSd)) { avSd <- avSdH
if(!silent) message(fxNa,"Using depreciated 'avSdH' as substitute of (default) 'avSd', please change your code to rather use 'avSd' instead of 'avSdH' !!")
} else if(length(avSdH) >0 && !silent) message(fxNa,"Argument 'avSdH' has been depreciated, 'avSd' is used instead")
if(!chNA) {
## no NAs, nothing to impute ...
if(debug) message(fxNa,"mn1a No NAs, nothing to impute ...")
if("hist" %in% plotHist) {graphics::hist(dat, br="FD", border=grDevices::grey(0.85), col=grDevices::grey(0.92), xlab=xLab, las=1, main=tit)
graphics::mtext("No NA-replacement needed ", adj=1, cex=0.6, line=-0.3)
graphics::mtext(paste(" n=",length(dat)), side=3, line=-0.3, cex=0.55, adj=0,col=grDevices::grey(0.3)) }
return( if(isTRUE(retnNA)) list(data=dat, nNA=0, NAneighbour=NULL, randParam=NULL) else dat)
} else {
modNa <- NULL
leNAneigh <- if(!is.null(NAneighbour)) sum(sapply(NAneighbour, length)) else 0
if(leNAneigh <1 && (length(grep("mode", imputMethod)) >0 || any(c("informed","datQuant") %in% imputMethod, na.rm=TRUE))) { ## all methods using mode need NA-neighbours ...
if(debug) message(fxNa," mn1b")
if(length(NAneighbour) <1) NAneighbour <- isolNAneighb(dat, gr, silent=silent, debug=debug, callFrom=fxNa) #
leNAneigh <- sum(sapply(NAneighbour, length))
if(debug) {message(fxNa," mn2"); mn2 <- list(NAneighbour=NAneighbour)}
if(leNAneigh >0) {
chLast <- length(NAneighbour[[length(NAneighbour)]])
if(chLast==0) NAneighbour <- NAneighbour[-1*length(NAneighbour)]} # remove last field if empty (as usual)
} else { nNA <- sum(isNA)}
if(debug) {message(fxNa," mn3"); mn3 <- list()}
nNaNei <- sapply(NAneighbour, length)
## check if sufficient NAs for mode-based methods
if(debug) message(fxNa,"Checking if sufficient NAs for mode-based methods")
if(sum(nNaNei) <10 && length(grep("mode", imputMethod)) >0) {
##number of NA neighb not yet known#
if(!silent) message(fxNa,"Only ",sum(nNaNei)," NA-neighbour values available, ie insufficient to calculate representative mode, using instead 10%ile of global distribution")
imputMethod <- "datQuant"
avSd[1] <- 0.1
}
if(debug) {message(fxNa," mn4") }
## IMPUTATIONS
randVa <- NULL # initialize for 'none'
datIni <- dat
if(debug) {message(fxNa," mn4"); mn4 <- list(dat=dat,gr=gr,randVa=randVa,medMod=medMod, isNA=isNA,chNA=chNA,imputMethod=imputMethod,NAneigLst=NAneigLst,NAneighbour=NAneighbour)}
## use 'medMod' as summary of random data generated
## choose method
#save(dat,gr,imputMethod, randVa,medMod,NAneighbour,seedNo,isNA,chNA,NAneigLst, file="mn4a.RData")
if("datQuant" %in% imputMethod) { # quantile of data
if(debug) message(fxNa,"Starting method 'datQuant'")
if(length(avSd) >2 && !is.na(avSd[3])) {useQu <- avSd[3]} else { # 3rd value (if specified) may be used as custom mean for normal distrib (instead of determining as xth quantile)
if(is.na(avSd[1])) { avSd[1] <- 0.1 # check quantile value (ie where to check full data)
if(!silent) message(fxNa," avSd not valid, using 10%quantile instead")}
useQu <- stats::quantile(dat, avSd[1], na.rm=TRUE) }
if(length(seedNo) ==1) set.seed(seedNo)
randVa <- signif(stats::rnorm(sum(isNA), useQu, avSd[2]),5)
plotHist <- unique(c(plotHist,"quantile")) # for ploting quantile-guides
msg <- paste(signif(avSd[1],3),"quantile, ie mean=",signif(useQu,4),"and sd=", signif(avSd[2],4))
}
if("medmode" %in% tolower(imputMethod)) { # whatever is lowest: global median or global mode of all NA neighbours
if(debug) message(fxNa,"Starting method 'medmode'")
if(length(medMod) <1) medMod <- c(med=stats::median(unlist(NAneighbour)), mod=wrMisc::stableMode(unlist(NAneighbour), method="density", callFrom=fxNa, silent=silent))
if(debug) {message(fxNa," mn5a") }
if(length(seedNo) ==1) set.seed(seedNo)
randVa <- signif(stats::rnorm(sum(isNA), min(medMod), avSd[2]), 5)
msg <- paste("mean=",signif(min(medMod),4),"and sd=", signif(avSd[2],4))
}
if(any(c("mode1","mode3","informed") %in% imputMethod, na.rm=TRUE)) { # global mode of all NA neighbours
if(debug) message(fxNa,"Prepare for methods 'mode1','mode2' and 'informed'")
if(length(medMod) <1 && sum(sapply(NAneighbour, length)) >0) {
medMod <- wrMisc::stableMode(unlist(NAneighbour), method="density",silent=silent,callFrom=fxNa)
} else {
}
if(length(seedNo) ==1) set.seed(seedNo)
randVa <- signif(stats::rnorm(sum(isNA), medMod, avSd[2]),5)
msg <- paste("mode=",signif(medMod,4),"and sd=", signif(avSd[2],4)) # correct ??
}
if(debug) {message(fxNa," mn5b") }
if("mode2" %in% imputMethod) { # selective mode of NA of '2-NA-neighbours' if n.2NA > 300
if(debug) message(fxNa,"Starting methods 'mode2'")
if(length(medMod) <1) medMod <- wrMisc::stableMode(if(sum(sapply(NAneighbour[-1], length)) >300) unlist(NAneighbour[-1]) else unlist(NAneighbour), method="density", callFrom=fxNa,silent=silent)
if(length(seedNo) ==1) set.seed(seedNo)
randVa <- signif(stats::rnorm(sum(isNA), medMod, avSd[2]),5)
msg <- paste("mean=",signif(medMod,4),"and sd=", signif(avSd[2],4))
}
if("informed" %in% imputMethod) { # informed about min abundance in line with any NA, use mean of line-min and non-biased NA-neigh random value
## determine "biased" view : min per line with any NA
## still has problem when one all repl of one grp NA and highly abundant in other group
if(debug) message(fxNa,"Starting method 'informed'")
NAliCo <- rowSums(isNA)
NAli <- which(NAliCo >0 & NAliCo < ncol(dat)) # lines with NA but not all NA
NAmin <- rep(NA, nrow(dat))
if(debug) {message(fxNa," mn5c"); mn5c <- list()}
if(length(NAli) >0) {
## prepare
indC <- matrix(0, nrow=nrow(isNA), ncol=ncol(isNA))
indC[NAli,] <- 1 # matrix (full dim) indicating where min-informed correction can be done
indC <- 0 + isNA + indC # is 2 if NA and eligible to min-informed
isNA2 <- 0 + isNA
isNA2[which(indC ==2)] <- 2 # matrix (full dim), 2 .. NA eligible to informed cor; 1.. NA not elig
if(debug) {message(fxNa," mn5c2") ; mn5c2 <- list()}
NAmin <- apply(dat[NAli,], 1, min, na.rm=TRUE) # min of line with any (but not all) NA (for biased estimate of NA-replacement
ReMa <- matrix(rep(NAmin, ncol(isNA)), ncol=ncol(isNA)) # matrix of row-min values
ReMa[which(indC[NAli,] !=2)] <- NA # leave only value at position eligible to min-informed correction
useInd <- which(isNA2[which(isNA2 >0)] >1) # which randVa is eligible to min-informed cor
## compare to level of regualar NA-neighb
corF <- wrMisc::naOmit(as.numeric(ReMa)) - medMod
chUp <- corF >0
if(any(chUp, na.rm=TRUE)) corF[which(chUp)] <- 0 # don't use if higher than mode of NA-neighb
## apply min-informed cor
randVa[useInd] <- randVa[useInd] +corF # mean of imputed and non-biased rand Va
}
}
if("modeadopt" %in% tolower(imputMethod)) { # flexible/adopt
## median and mode of NA-neighbours all/ by group :
if(debug) {message(fxNa,"Starting method 'modeadopt'"," mn5d") }
if(length(charAll) <1) { charAll <- c(mean=mean(unlist(NAneighbour)), med=stats::median(unlist(NAneighbour)),
mode=as.numeric(wrMisc::stableMode(unlist(NAneighbour), method="density", silent=TRUE)))
charAll <- rbind(charAll, cbind(sapply(NAneighbour, mean), sapply(NAneighbour, stats::median), sapply(NAneighbour, wrMisc::stableMode, method="density", silent=TRUE, callFrom=fxNa)))}
modNa <- charAll[1,3]
## model for dynamic NA-neighbour mean (hypoth for further interpolating)
nRa <- as.integer(sub("n","", names(NAneighbour) ))
if(length(all.lm) <1) all.lm <- stats::lm(a~n, data=data.frame(n=rep(nRa,nNaNei), a=unlist(NAneighbour)))
pVaSlo <- summary(stats::aov(all.lm))[[1]][1,"Pr(>F)"] # not < 0.05
if(pVaSlo < 0.1 & stats::coef(all.lm)[2] <0) {
## dynamic NA-substit depending on number of NAs per prot&repl
prLev <- stats::predict(all.lm, new=data.frame(n=1:(max(nRa) +1)))
ranOff <- prLev -charAll[1,1] # chose to subtract mode of all NA-neighbours (as reference)
nNaGrp <- wrMisc::rowGrpNA(dat, gr) # check : sum(nNaGrp) == sum(isNA) # here 2845
ranOff <- charAll[1,1] - rep(ranOff[nNaGrp], nNaGrp[which(nNaGrp >0)]) # final offset in order for is.na(dat)
msg <- paste("mean=",wrMisc::pasteC(signif(prLev,4)),"(for",wrMisc::pasteC(1:(1+max(nRa))),"NAs) and sd=", signif(avSd[2],4))
if(!silent) message(fxNa,"Substituting dynamically based on mean per number of NAs")
} else {
## constant NA-substit, use min of mean, median and mode
ranOff <- charAll[1,1] - rep(min(charAll[1,]), sum(isNA))
msg <- paste("mean=",signif(min(charAll[1,]),4),"and sd=", signif(avSd[2],4))
if(!silent) message(fxNa,"Substituting based on ",c("mean","median","mode")[which.min(charAll[1,])]," of all ",sum(nNaNei)," NA-neighbours")
}
if(length(seedNo) ==1) set.seed(seedNo)
randVa <- signif(stats::rnorm(sum(isNA), charAll[1,1], avSd[2]) -ranOff, 5) # initial global value (median for all NA-neighb) + offset/correction
}
## replace NAs
if(debug) {message(fxNa," mn6") }
if(!"none" %in% tolower(imputMethod)) dat[which(isNA)] <- randVa
chImp <- stats::quantile(datIni, c(0.05,0.15), na.rm=TRUE)
chImp <- c(mean(randVa, na.rm=TRUE) < chImp[1], mean(randVa, na.rm=TRUE) > chImp[2])
msg <- list(li1=c(" n.woNA=",sum(!isNA),", n.NA =",sum(isNA)),
li2=c("Imputing based on",paste0("'",imputMethod,"'"),"using",msg),
li3=if(any(chImp, na.rm=TRUE)) c("Note, mean for imputation is ",if(chImp[1]) "below 0.05 " else "above 0.15", "quantile !!") )
if(!silent) message(fxNa, paste(sapply(msg, paste, collapse=" "), collapse="\n "))
## FIGURE
if("hist" %in% tolower(plotHist)) {
if(!silent) message(fxNa,"Plotting figure")
hi1 <- graphics::hist(as.numeric(dat), breaks="FD", col=grDevices::grey(0.9), border=grDevices::grey(0.8),
xlab=xLab, ylab=yLab, las=1, ylim=yLim, main=paste(tit,"at NA-Replacement")) #xlim=xLim, # grey cols (final distr)
colPanel <- c(grDevices::grey(0.6), grDevices::rgb(0,0.7,0,0.6), grDevices::rgb(0.15,0.15,0.7,0.7), grDevices::rgb(0.7,0.5,0.2,0.6), grDevices::rgb(0.8,0.2,0.7,0.7))
graphics::hist(datIni, breaks=hi1$breaks, border=grDevices::grey(0.75), col=grDevices::rgb(0.1,1,0.1,0.15), add=TRUE) # orig data in green
if(length(randVa) >5) graphics::hist(randVa, br=hi1$breaks, border=grDevices::grey(0.75), col=grDevices::rgb(0,0,0.7,0.2), add=TRUE) # add purple hist to
nextLi <- -1.7
if(any(c("quant","quantile") %in% plotHist, na.rm=TRUE)) {
graphics::abline(v=stats::quantile(datIni, c(0.05,0.1,0.15), na.rm=TRUE), lty=2, col=c(colPanel[4:5],"tomato4"))
nextLi <- nextLi -c(0, 0.5, 1.1)
graphics::mtext(paste(" - - ",c(0.05,0.1,0.15),"quantile (initial data)"), col=c(colPanel[4:5],"tomato4"), cex=0.7, adj=0, line=nextLi, side=3)
nextLi <- min(nextLi) -0.6 }
if(length(modNa) >0) { # display mode
yLim <- signif(graphics::par("usr")[3:4], 3) # current y-limits
if(any(c("mode") %in% plotHist, na.rm=TRUE)) {
graphics::mtext(paste(" (arrow) mode of", NULL, " NA-neighbours :", signif(modNa,4)), col="sienna2", cex=0.7, adj=0, line=nextLi, side=3)
graphics::arrows(modNa, yLim[1] - (yLim[2] -yLim[1])/18, modNa, 0, length=0.1, col="sienna2",lwd=2) }
}
if(isTRUE(figImputDetail)) graphics::mtext(paste(sapply(msg[1:2],paste,collapse=" "),collapse="\n "), side=3, line=-1.2, cex=0.75, adj=0,col=grDevices::grey(0.3))
graphics::legend("topright",c("final","initial","imputed"), col=colPanel, text.col=colPanel, cex=0.9, seg.len=0.3, lwd=4)
}
return(if(isTRUE(retnNA)) list(data=dat, nNA=sum(isNA) , randParam=imputMethod, NAneigLst=list(NAneighbour=NAneighbour, nNaNei=nNaNei, medMod=medMod, charAll=charAll, linMod=all.lm)) else dat)
}
} else { if(!silent) message(fxNa,msg)}
dat }
#' Basic NA-imputaton (main)
#'
#' This (lower-level) function allows to perfom the basic NA-imputaton.
#' Note, at this point the information from argument \code{gr} is not used.
#'
#' @param dat (matrix or data.frame) main data (may contain \code{NA})
#' @param gr (character or factor) grouping of columns of \code{dat}, replicate association
#' @param impParam (numeric) 1st for mean; 2nd for sd; 3rd for seed
#' @param exclNeg (logical) exclude negative
#' @param inclLowValMod (logical) label on x-axis on plot
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) supplemental messages for debugging
#' @return This function returns a list with \code{$data} and \code{$datImp}
#' @seealso for more complex treatment \code{\link{matrixNAneighbourImpute}};
#' @examples
#' dat1 <- matrix(11:22, ncol=4)
#' dat1[3:4] <- NA
#' .imputeNA(dat1, impParam=c(mean(dat1, na.rm=TRUE), 0.1))
#'
#' @export
.imputeNA <- function(dat, gr=NULL, impParam, exclNeg=TRUE, inclLowValMod=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## basic NA imputation for 'dat' using 'impParam' (mean, sd, (3rd not used) and optional seed (as 4th))
## 'impParam' .. (numeric) 1st for mean; 2nd for sd; 3rd for seed
## used (so far) in subsequent loops of testRobustToNAimputation
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".imputeNA")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE
isNa <- is.na(dat)
tmp <- if(length(impParam) >2) try(set.seed(as.integer(impParam[3])), silent=TRUE) else NULL
if(inherits(tmp, "try-error")) message(fxNa,"FAILED to set seed !")
if(length(impParam) <2 || any(is.na(impParam))) { message(fxNa,"BAD input parameters, nothing to do !")
} else {
impDat <- try(stats::rnorm(round(1.5*sum(isNa)), mean=impParam[1], sd=impParam[2]), silent=TRUE)
if(debug) {message(fxNa," iNA1") }
if(inherits(impDat, "try-error")) {
message(fxNa,"FAILED to generate random data !! Return data as input (may contain NAs)")
} else {
if(exclNeg) impDat <- impDat[which(impDat >0)]
dat[which(isNa)] <- impDat[1:sum(isNa)]
if(inclLowValMod) list(data=dat, datImp=impDat)
} }
dat }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/matrixNAneighbourImpute.R
|
#' Plot ROC curves
#'
#' \code{plotROC} plots ROC curves based on results from \code{\link{summarizeForROC}}.
#' This function plots only, it does not return any data. It allows printing simultaneously multiple ROC curves from different studies,
#' it is also compatible with data from 3 species mix as in proteomics benchmark.
#' Input can be prepared using \code{\link[wrMisc]{moderTest2grp}} followed by \code{\link{summarizeForROC}}.
#'
#' @param dat (matrix) from testing (eg \code{\link{summarizeForROC}} )
#' @param ... optional additional data-sets to include as seprate ROC-curves to same plot (must be of same type of format as 'dat')
#' @param useColumn (integer or character, length=2) columns from \code{dat} to be used for pecificity and sensitivity
#' @param methNames (character) names of methods (data-sets) to be displayed
#' @param col (character) custom colors for lines and text (choose one color for each different data-set)
#' @param pch (integer) type of symbol to be used (see also \code{\link[graphics]{par}})
#' @param bg (character) background color in plot (see also \code{\link[graphics]{par}})
#' @param tit (character) custom title
#' @param xlim (numeric, length=2) custom x-axis limits
#' @param ylim (numeric, length=2) custom y-axis limits
#' @param point05 (numeric) specific point to highlight in plot (typically at alpha=0.05)
#' @param pointSi (numeric) size of points (as expansion factor \code{cex})
#' @param nByMeth (integer) value of n to display
#' @param speciesOrder (integer) custom order of species in legend
# @param speciesOrder (integer) optional custom order for counts per species (eg number of proteins) in legend (eg 'n.H/S/E')
#' @param txtLoc (numeric, length=3) location for text (x, y location and proportional factor for line-offset, default is c(0.4,0.3,0.04))
#' @param legCex (numeric) cex expansion factor for legend (see also \code{\link[graphics]{par}})
#' @param las (numeric) factor for text-orientation (see also \code{\link[graphics]{par}})
#' @param addSuplT (logical) add text with information about precision,accuracy and FDR
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns only a plot with ROC curves
#' @seealso \code{\link[wrProteo]{summarizeForROC}}, \code{\link[wrMisc]{moderTest2grp}}
#' @examples
#' roc0 <- cbind(alph=c(2e-6,4e-5,4e-4,2.7e-3,1.6e-2,4.2e-2,8.3e-2,1.7e-1,2.7e-1,4.1e-1,5.3e-1,
#' 6.8e-1,8.3e-1,9.7e-1), spec=c(1,1,1,1,0.957,0.915,0.915,0.809,0.702,0.489,0.362,0.234,
#' 0.128,0.0426), sens=c(0,0,0.145,0.942,2.54,2.68,3.33,3.99,4.71,5.87,6.67,8.04,8.77,
#' 9.93)/10, n.pos.a=c(0,0,0,0,2,4,4,9,14,24,36,41) )
#' plotROC(roc0)
#' @export
plotROC <- function(dat,..., useColumn=2:3, methNames=NULL, col=NULL, pch=1, bg=NULL, tit=NULL, xlim=NULL, ylim=NULL, point05=0.05, pointSi=0.85, nByMeth=NULL,
speciesOrder=NULL, txtLoc=NULL, legCex=0.72, las=1, addSuplT=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
##
fxNa <- wrMisc::.composeCallName(callFrom, newNa="plotROC")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!isFALSE(addSuplT)) addSuplT <- TRUE
inpSu <- list(...)
chInp <- lapply(c("dat","useColumn","methNames","col","pch","bg","tit","point05","pointSi","nByMeth","txtLoc","legCex"), wrMisc::.cutStr, startFr=2,reverse=TRUE)
chAr <- names(inpSu) %in% unlist(chInp)
if(any(chAr)) {
## if argument names changed/ not complete need to change/adjust code here !!
inpSu <- inpSu[which(!chAr)]
}
## organize all data in list
inpAsMultDat <- is.list(dat)
if(debug) {message(fxNa,"'dat' was given as list ",inpAsMultDat," length .../inpSu ", length(inpSu)," plR0"); plR0 <- list(dat=dat,inpSu=inpSu,chAr=chAr, useColumn=useColumn,methNames=methNames,tit=tit,col=col)}
#inpAsMultDat <- if(inpAsMultDat) is.list(dat[[1]]) else FALSE
#inpAsMultDat <- if(inpAsMultDat) is.list(dat[[1]][[1]]) else FALSE
inpS <- if(inpAsMultDat) dat else list(dat)
if(debug) {message(fxNa,"inpS list ",is.list(inpS)," le ", length(inpS)," plR0b")}
## rm(dat)
if(length(inpSu) >0) {
inpS[1+ 1:length(inpSu)] <- inpSu
}
if(debug) {message(fxNa,"inpS list ",is.list(inpS)," le ", length(inpS)," plR0c")}
if(is.null(tit)) tit <- paste("ROC")
if(is.null(col)) col <- if(length(inpS)==1) 1 else c(grDevices::grey(0.4), 2:(1+length(inpS)))
xLab <- "1 - Specificity"
yLab <- "Sensitivity"
nDigLeg <- c(3,2,2,2) # number of digits for supl info /legend (AUC/prec/accur/FDR)
if(!is.numeric(xlim) || length(xlim) !=2) xlim <- c(0,1)
if(debug) {message(fxNa,"plR1"); plR1 <- list(dat=dat,inpS=inpS,inpSu=inpSu,chAr=chAr, useColumn=useColumn,methNames=methNames,tit=tit,col=col,xLab=xLab,yLab=yLab,nDigLeg=nDigLeg,point05=point05,pointSi=pointSi)}
graphics::plot(1 -inpS[[1]][,useColumn[1]], inpS[[1]][,useColumn[2]], type="n", col=col[1], pch=pch, bg=bg, main=tit, xlab=xLab, ylab=yLab, xlim=xlim, ylim=if(length(ylim)==2) ylim else c(0,1),las=las) # main frame wo points
col2 <- col
cutP <- inpS[[1]][which(inpS[[1]][,1]==point05),]
if(length(stats::na.omit(point05))==1) {
newPch <- cbind(c(1,16,2,17, 7,15,5,6), new=c(21,21,24,24,22,22,23,25)) # transform open or plain filled points to color-filled
if(pch %in% newPch[,1]) { pch2 <- newPch[which(newPch[,1]==pch),2]; bg <- wrMisc::convColorToTransp(col,0.1);
col2 <- grDevices::grey(0.4) } else {pch2 <- pch; col2 <- col}
graphics::points(1 -cutP[useColumn[1]], cutP[useColumn[2]], col=col2, pch=pch2, bg=col[1], cex=pointSi) }
graphics::points(1 -inpS[[1]][,useColumn[1]], inpS[[1]][,useColumn[2]], type="s", col=col[1], pch=pch, bg=bg) # main curve
coColN <- colnames(inpS[[1]])[wrMisc::naOmit(grep("n\\.pos\\.",colnames(inpS[[1]])))] # more flexible (also to number of species/tags)
if(debug) {message(fxNa,"plR1b"); plR1b <- list(dat=dat,inpS=inpS,inpSu=inpSu,chAr=chAr, col2=col2,cutP=cutP,coColN=coColN,useColumn=useColumn,methNames=methNames,tit=tit,col=col,xLab=xLab,yLab=yLab,nDigLeg=nDigLeg,point05=point05,pointSi=pointSi)}
if(length(coColN) <2) { coColN <- colnames(inpS[[1]])[(ncol(inpS[[1]]) -2):ncol(inpS[[1]])]
if(!silent) message(fxNa," Can't find 'n.pos.' tag among colnames of 'dat', assuming last 3 columns") }
if(length(speciesOrder) <length(coColN)) speciesOrder <- c(1:length(coColN))
coColN <- coColN[speciesOrder]
coColN1 <- sub("n\\.pos\\.","",coColN)
coColN2 <- paste0(" n.",paste0(coColN1,collapse="/")," ")
if(length(txtLoc) !=3) txtLoc <- graphics::par("usr")
if(length(txtLoc) !=3) { figDim <- signif(graphics::par("usr"),3)
txtLoc <- c(x=figDim[1] +0.42*(figDim[2] -figDim[1]), y=figDim[3] + (0.3 +length(inpS))*(figDim[4] -figDim[3])/30, fac=0.037*(figDim[4] -figDim[3])) }
if(debug) {message(fxNa,"plR2")}
AUC1 <- c( AucROC(inpS[[1]], silent=silent, callFrom=fxNa),
if(length(inpS) >0) sapply(inpS, AucROC, silent=silent, callFrom=fxNa))
AUC1 <- sprintf(paste0("%.",nDigLeg[1],"f"),AUC1) # format to fixed no of digits
if(debug) {message(fxNa,"plR3"); plR3 <- list(dat=dat,inpS=inpS,inpSu=inpSu,chAr=chAr,cutP=cutP,coColN=coColN, txtLoc=txtLoc,useColumn=useColumn,methNames=methNames,tit=tit,col=col,xLab=xLab,yLab=yLab,nDigLeg=nDigLeg)}
if(addSuplT) { # add legend-like method-name/descr
txt <- if(is.null(nByMeth)) methNames[1] else paste0(methNames[1]," (n.test=",nByMeth[1],") ")
graphics::text(txtLoc[1] -txtLoc[3], txtLoc[2] +txtLoc[3], paste("Values at threshold of",point05,":"), cex=0.75, col=grDevices::grey(0.4), adj=0)
graphics::text(txtLoc[1], txtLoc[2], txt, cex=legCex+0.02, col=col[1], adj=1)
if(addSuplT) graphics::text(txtLoc[1] +0.02, txtLoc[2], paste(paste(paste(c("AUC=","prec=","accur=","FDR="), #"n.E/S/H="
c(AUC1[1],round(cutP[c("prec","accur","FDR")],nDigLeg[-1]))),collapse=" "), coColN2 ,cutP[coColN[1]],cutP[coColN[2]],
if(length(coColN)>2) cutP[coColN[3]]), cex=legCex, col=col[1], adj=0) }
if(debug) {message(fxNa,"plR4"); plR4 <- list(dat=dat,inpS=inpS,inpSu=inpSu,chAr=chAr,cutP=cutP,coColN=coColN, txtLoc=txtLoc,useColumn=useColumn,methNames=methNames,tit=tit,col=col,xLab=xLab,yLab=yLab,nDigLeg=nDigLeg)}
if(length(inpS) >1) {
for(i in 2:length(inpS)) { ## additional ROC curves
if(length(inpS[[i]]) >0) if(nrow(inpS[[i]]) >0) {
cutP <- inpS[[i]][which(inpS[[i]][,1]==point05),]
if(point05) graphics::points(1 -cutP[useColumn[1]], cutP[useColumn[2]], col=col2, pch=pch2[1], bg=col[i], cex=pointSi) # new point at alpha
graphics::points(1-inpS[[i]][,useColumn[1]],inpS[[i]][,useColumn[2]], type="s",col=col[i+1], pch=pch, bg=bg[i]) # new ROC curve
if(addSuplT) {
txt <- if(is.null(nByMeth)) methNames[i] else paste0(methNames[i]," (n.test=",nByMeth[i],") ")
graphics::text(txtLoc[1], txtLoc[2]-txtLoc[3]*i,txt, cex=legCex+0.02, col=col[i], adj=1) # first block
graphics::text(txtLoc[1] +0.02, txtLoc[2]-txtLoc[3]*i, paste(paste(paste(c("AUC=","prec=","accur=","FDR="),
c(AUC1[i],round(cutP[c("prec","accur","FDR")],nDigLeg[-1]))),collapse=" "),coColN2,cutP[coColN[1]],cutP[coColN[2]], # first block (with counting data)
if(length(coColN)>2) cutP[coColN[3]]), cex=legCex, col=col[i], adj=0)
}
} } }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/plotROC.R
|
#' Filter based on either number of total peptides and specific peptides or number of razor petides
#'
#' \code{razorNoFilter} filters based on either a) number of total peptides and specific peptides or b) numer of razor petides.
#' This function was designed for filtering using a mimimum number of (PSM-) count values following the common practice to consider results with 2 or more peptide counts as reliable.
#' The function be (re-)run independently on each of various questions (comparisons).
#' Note: Non-integer data will be truncated to integer (equivalent to \code{floor}).
#'
#' @param annot (matrix or data.frame) main data (may contain NAs) with (PSM-) count values for each protein
#' @param speNa (integer or character) indicate which column of 'annot' has number of specific peptides
#' @param totNa (integer or character) indicate which column of 'annot' has number of total peptides
#' @param minRazNa (integer or character) name of column with number of razor peptides, alternative to 'minSpeNo'& 'minTotNo'
#' @param minSpeNo (integer) minimum number of pecific peptides
#' @param minTotNo (integer) minimum total ie max razor number of peptides
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a vector of logical values if corresponding line passes filter criteria
#' @seealso \code{\link[wrMisc]{presenceFilt}}
#' @examples
#' set.seed(2019); datT <- matrix(sample.int(20,60,replace=TRUE), ncol=6,
#' dimnames=list(letters[1:10], LETTERS[1:6])) -3
#' datT[,2] <- datT[,2] +2
#' datT[which(datT <0)] <- 0
#' razorNoFilter(datT, speNa="A", totNa="B")
#' @export
razorNoFilter <- function(annot, speNa=NULL, totNa=NULL, minRazNa=NULL, minSpeNo=1, minTotNo=2, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- wrMisc::.composeCallName(callFrom, newNa="razorNoFilter")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
msg <- NULL
doFilter <- length(annot) >0
if(length(speNa) != length(totNa) && is.null(minRazNa)) { doFilter <- FALSE
msg <- "Length of 'speNa' differs from 'totNa', can't run fiktering" }
if(debug) message(fxNa,"rNF1")
if(doFilter) {
if(is.null(minRazNa)) {
specPe <- as.integer(annot[,speNa]) >= minSpeNo
totPe <- as.integer(annot[,totNa]) >= minTotNo
filt <- (specPe & totPe)
} else {
filt <- as.integer(annot[,minRazNa]) >= minTotNo
}
} else { filt <- NULL
if(!silent) message(fxNa,if(length(msg) >0) msg else "Invalid argumenet 'annot'")}
filt }
#' Checking presence of knitr and rmarkdown
#'
#' This function allows checking presence of knitr and rmarkdown
#'
#' @param tryF (logical)
#' @return This function returns a logical value
#' @seealso \code{\link[wrMisc]{presenceFilt}}
#' @examples
#'.checkKnitrProt()
#' @export
.checkKnitrProt <- function(tryF=FALSE) {
## function for checking presence of knitr and rmarkdown
## needed to explicitely call functions of packages
chPaR <- try(find.package("rmarkdown"), silent=TRUE)
chPaK <- try(find.package("knitr"), silent=TRUE)
if(inherits(chPaR, "try-error")) warning("package 'rmarkdown' not found ! Please install from CRAN") else {
if(tryF) rmarkdown::pandoc_available() }
if(inherits(chPaK, "try-error")) warning("package 'knitr' not found ! Please install from CRAN") else {
if(tryF) knitr::kable(matrix(1:4, ncol=2)) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/razorNoFilter.R
|
#' Read (Normalized) Quantitation Data Files Produced By AlphaPept
#'
#' Protein quantification results from \href{https://github.com/MannLabs/alphapept}{AlphaPept} can be read using this function.
#' Input files compressed as .gz can be read as well.
#' The protein abundance values (XIC) get extracted. Since protein annotation is not very extensive with this format of data, the function allows reading the
#' initial fasta files (from the directory above the quantitation-results) allowing to extract more protein-annotation (like species).
#' Sample-annotation (if available) can be extracted from sdrf files, too.
#' The protein abundance values may be normalized using multiple methods (median normalization as default), the determination of normalization factors can be restricted to specific proteins
#' (normalization to bait protein(s), or to invariable matrix of spike-in experiments).
#' The protein annotation data gets parsed to extract specific fields (ID, name, description, species ...).
#' Besides, a graphical display of the distribution of protein abundance values may be generated before and after normalization.
#'
#' @details
#'
#' Meta-data describing the samples and experimental setup may be available from a sdrf-file (from the directory above the analysis/quantiication results)
#' If available, the meta-data will be examined for determining groups of replicates and
#' the results thereof can be found in $sampleSetup$levels.
#' Alternatively, a dataframe formatted like sdrf-files (ie for each sample a separate line, see also function \code{readSdrf}) may be given, too.
#'
#' This import-function has been developed using AlphaPept version x.x.
#' The final output is a list containing these elements: \code{$raw}, \code{$quant}, \code{$annot}, \code{$counts}, \code{$sampleSetup}, \code{$quantNotes}, \code{$notes}, or (if \code{separateAnnot=FALSE}) data.frame
#' with annotation- and main quantification-content. If \code{sdrf} information has been found, an add-tional list-element \code{setup}
#' will be added containg the entire meta-data as \code{setup$meta} and the suggested organization as \code{setup$lev}.
#'
#'
#' @param fileName (character) name of file to be read (default 'results_proteins.csv'). Gz-compressed files can be read, too.
#' @param path (character) path of file to be read
#' @param fasta (logical or character) if \code{TRUE} the (first) fasta from one direcory higher than \code{fileName} will be read as fasta-file to extract further protein annotation;
#' if \code{character} a fasta-file at this location will be read/used/
#' @param isLog2 (logical) typically data read from AlphaPept are expected NOT to be \code{isLog2=TRUE}
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param extrColNames (character or \code{NULL}) custom definition of col-names to extract
#'
#'
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param specPref (character) prefix to identifiers allowing to separate i) recognize contamination database, ii) species of main identifications and iii) spike-in species
#' @param refLi (character or integer) custom specify which line of data should be used for normalization, ie which line is main species; if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param remRev (logical) option to remove all protein-identifications based on reverse-peptides
#' @param remConta (logical) option to remove all proteins identified as contaminants
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (logical, character, list or data.frame) optional extraction and adding of experimenal meta-data:
#' if \code{sdrf=TRUE} the 1st sdrf in the directory above \code{fileName} will be used
#' if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by Compomics; if \code{gr} is provided, it gets priority for grouping of replicates
#' if \code{TRUE} default to files 'summary.txt' (needed to match information of \code{sdrf}) and 'parameters.txt' which can be found in the same folder as the main quantitation results;
#' if \code{character} the respective file-names (relative ro absolute path), 1st is expected to correspond to 'summary.txt' (tabulated text, the samples as given to Compomics) and 2nd to 'parameters.txt' (tabulated text, all parameters given to Compomics)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical) optional plot vioplot of initial and normalized data (using \code{normalizeMeth}); alternatively the argument may contain numeric details that will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (numeric) relative expansion factor of the violin in plot
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot} (columns ), \code{$counts} an array with 'PSM' and 'NoOfRazorPeptides',
#' \code{$quantNotes}, \code{$notes} and optional \code{setup} for meta-data from \code{sdrf}; or a data.frame with quantitation and annotation if \code{separateAnnot=FALSE}
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readProteomeDiscovererFile}}; \code{\link{readProlineFile}} (and other import-functions), \code{\link{matrixNAinspect}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' # Here we'll load a short/trimmed example file
#' fiNaAP <- "tinyAlpaPeptide.csv.gz"
#' dataAP <- readAlphaPeptFile(file=fiNaAP, path=path1, tit="tiny AlphaPaptide ")
#' summary(dataAP$quant)
#' @export
readAlphaPeptFile <- function(fileName="results_proteins.csv", path=NULL, fasta=NULL, isLog2=FALSE, normalizeMeth="none", quantCol="_LFQ$", contamCol=NULL,
read0asNA=TRUE, refLi=NULL, sampleNames=NULL, # pepCountCol=c("number_of_peptides"), extrColNames=c("protein_group"),
specPref=NULL, extrColNames=NULL,
remRev=TRUE, remConta=FALSE, separateAnnot=TRUE, gr=NULL, sdrf=NULL, suplAnnotFile=NULL, groupPref=list(lowNumberOfGroups=TRUE),
titGraph=NULL, wex=1.6, plotGraph=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## prepare
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readAlphaPeptFile")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
remStrainNo <- TRUE # if TRUE extract Species in very stringent pattern
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
fixSpeciesNames <- TRUE
trimColNames <- TRUE ## further trim quantitation colnames
chCol <- NULL
## functions
.cleanMQann <- function(x, sep="\\|", silent=FALSE, debug=FALSE, callFrom=NULL) {
## split multiple protein entries as with 1st column of MaxQuant data
## return matrix with
## example ann1 <- read.delim(file.path(system.file("extdata", package="wrProteo"), "tinyWombCompo1.csv.gz"), sep=",", stringsAsFactors=FALSE)[,1]
## .cleanMQann(ann1)
# x=rAP4a$tmp[c(5,31:32,81:82,111:114),1]
xIni <- x # keep backup for recupera-ting bizzare nonparsed
isCont <- grepl("CON__", x)
mult <- nchar(x) - nchar(gsub(";", "", x))
chMult <- mult >0
if(any(chMult)) {
spl1 <- strsplit(x[which(chMult)], ";")
## use entry with most separators (when multiple entries, eg 'sp|P00761|CON__TRYP_PIG;CON__P00761')
spl1 <- sapply(spl1, function(y) { nSep <- nchar(y) - nchar(gsub("|","",y)); y[which.max(nSep)] })
x[which(chMult)] <- spl1 }
## split separators
chSpl <- function(y) {chID <- grepl("^[[:upper:]]{1,3}[[:digit:]]{2,}|^[[:upper:]]{1,3}[[:digit:]]+[[:upper:]]+[[:digit:]]*", y); chName <- grepl("[A-Z0-9]_[[:upper:]]",y); # extract db, ID & prot-name
c(dbIni= if((length(y) >1 && grepl("^[[:lower:]]{1,8}$", y[1])) || length(y) >2 && grepl("^[[:lower:]]{2}|[[:lower:]]{2}$",
y[1])) y[1] else NA, IDini=if(any(chID)) y[which(chID)[1]] else NA, nameIni=if(any(chName)) y[which(chName)[1]] else NA) }
x <- t(sapply(strsplit(x, sep), chSpl))
nColIni <- ncol(x)
cleanID <- function(y, useCol=c(db=1, ID=2, name=3)) {
ext <- grepl("[[:lower:]]+$", y[,useCol[2]]) # look for extension like 'P08758ups'
extNoDb <- which(ext & is.na(y[,useCol[1]]))
if(any(ext)) { cleanID <- sub("[[:lower:]]+$","", y[which(ext), useCol[2]])
if(length(extNoDb) >0) y[which(ext), useCol[1]] <- substring(y[which(ext), useCol[2]], nchar(cleanID) +1 )
y[which(ext), useCol[2]] <- cleanID }
prefi <- grepl("^[[:upper:]]+__[[:upper:]]", y[,useCol[3]]) # look for prefix like 'CON__FA5_BOVIN'
if(any(prefi)) { ch2 <- grepl("[A-Z0-9]_[[:upper:]]", y[which(prefi), useCol[3]]); if(any(ch2)) {
y[which(prefi)[which(ch2)], useCol[1]] <- tolower(sub("__[[:upper:]].+","", y[which(prefi)[which(ch2)], useCol[3]]))
y[which(prefi)[which(ch2)], useCol[3]] <- sub("^[[:upper:]]+__","", y[which(prefi)[which(ch2)], useCol[3]])}}
colnames(y) <- c("db","ID","name")
y }
x <- cbind(x, cleanID(x, useCol=c(db=1, ID=2, name=3)))
x <- cbind(x, conta=grepl("^con|^REV_", x[,"db"]) | grepl("__CON__",xIni))
## recuperate all (bizarre) non-parsed into ID
isNa <- rowSums(is.na(x)) > nColIni -2
if(any(isNa)) x[which(isNa),c(2+nColIni)] <- xIni[which(isNa)]
cbind(x[,c((nColIni+1):ncol(x), 1:nColIni)], iniSoftAnn=xIni) }
## end functions
## init check
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("Package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- annot <- annotMQ <- annotMQ <- NULL # initialize
## check if path & file exist
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="csv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
## read (main) file
## future: look for fast reading of files
# read.csv("C:\\E\\projects\\MassSpec\\smallProj\\testAlphaPept\\demoData_aug23\\testHSE_A1-3\\results_proteins.csv", header=TRUE)
tmp <- try(utils::read.csv(paFi, header=TRUE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check format or if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Compomics-file") }
tmp <- NULL
return(NULL)
} else {
## start checking format of initial read
if(debug) { message(fxNa,"rAP1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rAP1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,read0asNA=read0asNA,quantCol=quantCol,
refLi=refLi,separateAnnot=separateAnnot )} # annotCol=annotCol,FDRCol=
## check which columns can be extracted (for annotation)
isSummaryCsv <- any(grepl("^n_sequence\\.", colnames(tmp))) # check if "results_protein_summary.csv (has cols heading '^n_sequence\\.' and cols '$LFQ.intensity.' and' $intensity.
## note that 'results_proteins.csv' has 'XXX_LFQ' and 'XXX'
if(length(extrColNames) <1) { extrColNames <- # default as list for patterns
if(isSummaryCsv) list(lfq="^LFQ\\.intensity\\.",nSeq="^n_sequence\\.",annot=1) else list(LFQ="_LFQ$", annot=1)
} else {
## check custom entry (& format as list)
ch1 <- extrColNames %in% colnames(tmp)
if(!all(ch1) && "LFQ" %in% names(extrColNames)) { # seems to be pattern ?
}
if(!"annot" %in% names(extrColNames)) if(is.list(extrColNames)) extrColNames$annot <- 1 else c(extrColNames, annot=1)
}
if(debug) { message(fxNa,"rAP1b"); rAP1b <- list(tmp=tmp,paFi=paFi,fasta=fasta,quantCol=quantCol,extrColNames=extrColNames,isSummaryCsv=isSummaryCsv)}
if(length(tmp) >0) {
## check for lines with absent IDs => eliminate
chNa <- is.na(tmp[,extrColNames$annot]) | nchar(tmp[,extrColNames$annot]) <2
if(any(chNa)) {
if(!silent) message(fxNa,"Removing ",sum(chNa)," lines since absent ID or all NA (won't be able to do anything lateron withour ID ..)")
tmp <- tmp[-which(chNa),]
}
if(debug) {message(fxNa,"rAP1d"); rAP1d <- list(tmp=tmp,paFi=paFi,fasta=fasta,quantCol=quantCol)}
## further extracting : quantitation
useDCol <- grep(extrColNames$LFQ, colnames(tmp))
if(length(useDCol) <1) stop("NO columns matching term ",wrMisc::pasteC(quantCol, quoteC="'")," from argument 'quantCol' found !")
abund <- as.matrix(tmp[,useDCol]) # abundances (not normalized, not log2)
if(debug) {message(fxNa,"rAP1d2"); rAP1d2 <- list(tmp=tmp,paFi=paFi,fasta=fasta,abund=abund,quantCol=quantCol,extrColNames=extrColNames)}
chNum <- try(is.numeric(abund), silent=TRUE)
if(!chNum) {abund <- try(apply(tmp[,useDCol], 2, wrMisc::convToNum, convert="allChar", silent=debug, callFrom=fxNa), silent=TRUE)
if(inherits(abund, "try-error")) {datOK <- FALSE; warning(fxNa,"CANNOT transform 'abund' to numeric data !'")} }
if(length(dim(abund)) <2 && !is.numeric(abund)) abund <- matrix(as.numeric(abund), ncol=ncol(abund), dimnames=dimnames(abund))
if(debug) {message(fxNa,"rAP1e"); rAP1e <- list(tmprAP1e=tmp,abund=abund,paFi=paFi,fasta=fasta,quantCol=quantCol,extrColNames=extrColNames)}
## adjust colnames of abund
colnames(abund) <- sub(extrColNames$LFQ,"", sub("^LFQ.intensity\\.","", colnames(abund)))
if(trimColNames) { ## further trim
colnames(abund) <- wrMisc::.trimFromStart(wrMisc::.trimFromEnd( sub(paste0("^",quantCol),"", colnames(abund))))
## no trim needed for AlphaPept ?
}
if(debug) {message(fxNa,"rAP3"); rAP3 <- list(abund=abund,paFi=paFi,path=path,chPa=chPa,tmp=tmp,remConta=remConta,fasta=fasta)}
## check for neg values
chNeg <- which(abund <0)
if(length(chNeg) ==prod(dim(abund))) { read0asNA <- FALSE
message(fxNa,"NOTE : Bizzare, ALL values are NEGATIVE !! omit transforming neg values to NA)")}
## convert 0 to NA
if(!isFALSE(read0asNA)) {
ch1 <- abund <= 0
if(any(ch1, na.rm=TRUE)) { abund[which(ch1)] <- NA
if(!silent) message(fxNa,"Transform ",sum(ch1),"(",100*round(sum(ch1)/length(ch1),3),"%) initial '0' values to 'NA'")}}
## further extracting : prepare for countig data
if("nSeq" %in% names(extrColNames)) {
counts <- as.matrix(tmp[,extrColNames$nSeq])
if(is.numeric(counts)) counts <- try(matrix(as.numeric(counts, ncol=ncol(counts)), dimnames=dimnames(counts)))
if(inherits(counts, "try-error")) { counts <- NULL
message(fxNa,"Unable to extract counts (not numeric ?)")}
} else { counts <- NULL}
if(debug) {message(fxNa,"rAP4"); rAP4 <- list(abund=abund,counts=counts,annot=annot,paFi=paFi,path=path,chPa=chPa,tmp=tmp,remConta=remConta,fasta=fasta,annotMQ=annotMQ)}
## Annotation
annot <- .cleanMQann(tmp[,extrColNames$annot]) # typically 1st column entitled 'X'
colnames(annot)[2] <- "protein_group"
if(debug) {message(fxNa,"rAP4aa"); rAP4aa <- list(abund=abund,counts=counts,annot=annot,paFi=paFi,path=path,chPa=chPa,tmp=tmp,remConta=remConta,fasta=fasta,annot=annot)}
## read fasta from same dir (AlphaPept)
if(length(fasta) >0) {fasta <- fasta[1]; if(isFALSE(fasta) || is.na(fasta)) fasta <- NULL}
if(isTRUE(fasta)) {
chFa <- grep("\\.fasta$", dirname(paFi))
faFi <- if(length(chFa) >0) dir(dirname(paFi), pattern="\\.fasta$")[1]
} else faFi <- fasta
if(length(faFi) >0) { # has fasta for recuperating annotation
fasta <- try(readFasta2(filename=faFi, tableOut=TRUE, silent=silent,debug=debug,callFrom=fxNa), silent=TRUE)
## Potential problem with inconsistent format of fasta
if(inherits(fasta, "try-error")) { fasta <- NULL
if(!silent) message(fxNa,"Unable to read/open fasta file '",faFi,"' (check rights to read ?)")
} else {
tmpAnn <- if(length(annot) >0) annot[,2] else {if(length(annot) >0) annot[,2] else tmp[,1]} # 'P02768' still missing
tm2 <- wrMisc::concatMatch(tmpAnn, fasta[,2], sepPattern=NULL, globalPat="digitExtension", silent=silent, debug=debug, callFrom=fxNa) # clean protein-names (eg digit extensions, concateneated IDs) & match to data
iniAnn <- if(length(annot) >0) annot else {if(length(annot) >0) annot else cbind(iniSoftAnn=tmp[,1])}
#colnames(iniAnn) <- c("iniSoftAnn", if(ncol(iniAnn) >1) paste0(colnames(iniAnn)[-1],".",quantSoft))
useFaCol <- match(c("uniqueIdentifier","entryName","proteinName","OS","OX","GN","database"), colnames(fasta)) # do not export full 'sequence'
annot <- cbind(trimIdentifier=names(tm2), fasta[tm2, useFaCol], iniAnn=tmpAnn)
if(debug) {message(fxNa,"rAP4ab"); rAP4ab <- list(abund=abund,counts=counts,annot=annot,fasta=fasta,tmp=tmp,chNa=chNa,annot=annot) }
foundFastaCol <- !is.na(useFaCol)
if(any(foundFastaCol)) colnames(annot)[1:sum(foundFastaCol)] <- c("Accession","AccessionFull","Description","EntryName","Species","OX","GeneName","Database")[which(foundFastaCol)]
## strip species details
if("Species" %in% annot) annot[,"Species"] <- sub(" \\(.+", "", annot[,"Species"])
}
} else {
#annot <- if(length(annot) >0) annot
if(debug) message(fxNa,"NOTE : No fasta-file found in main directory ...")
}
if(debug) {message(fxNa,"dim annot ",nrow(annot)," ",ncol(annot)," rAP4b"); rAP4b <- list(annot=annot,faFi=faFi,abund=abund,tmp=tmp,fasta=fasta,annot=annot)}
## check ID col of annot
#chID <- match(c("Accession","protein_group","uniqueIdentifier"), colnames(annot))
#if(all(is.na(chID))) { chID <- wrMisc::naOmit(match(c("protein_group","ID"), colnames(annot)))
# if(length(chID) < 1) warning("PROBLEM : UNEXPECTED colnames in annot") #else colnames(annot)[chID][1] <- "Accession"
#}
annot <- as.matrix(tmp[,1])
## remove lines wo IDs
chNa <- is.na(annot[,1])
if(any(chNa)) {
if(!silent) message(fxNa,"Removing ",sum(chNa)," out of ",nrow(abund)," lines wo ID")
rmLi <- which(chNa)
tmp <- tmp[-rmLi,]
annot <- annot[-rmLi,]
if(length(dim(annot)) <2) annot <- matrix(annot, ncol=1, dimnames=list(NULL,colnames(tmp)[1]))
abund <- abund[-rmLi,]
if(length(counts) >0) counts <- if(length(dim(counts))==3) counts[-rmLi,,] else counts[-rmLi,]
}
if(debug) {message(fxNa,"dim annot",nrow(annot)," ",ncol(annot)," rAP4d"); rAP4d <- list(annot=annot,faFi=faFi,abund=abund,tmp=tmp)}
## unique ID
chD <- duplicated(annot[,1])
uniqueID <- if(any(chD, na.rm=TRUE)) wrMisc::correctToUnique(annot[,1], silent=silent, callFrom=fxNa) else annot[,1] # extrColNames[1]
rownames(annot) <- rownames(abund) <- uniqueID
if(length(counts) >0) rownames(counts) <- uniqueID
if(debug) {message(fxNa,"rAP4e"); rAP4e <- list(paFi=paFi,path=path,chPa=chPa,tmp=tmp,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,specPref=specPref)}
## remove Wombat contaminants
#useColumn <- wrMisc::naOmit(match(c("Accession","protein_group"), colnames(annot)))
conLi <- grep("CON__[[:alnum:]]", annot[, if(ncol(annot) >1) wrMisc::naOmit(match(c("Accession","protein_group"), colnames(annot)))[1] else 1])
if(remConta) {
if(length(conLi) >0) {
iniLi <- nrow(annot)
annot <- annot[-conLi,]
abund <- abund[-conLi,]
counts <- if(length(dim(counts))==3) counts[-conLi,,] else counts[-conLi,,]
if(debug) message(fxNa,"Removing ",length(conLi)," instances of contaminants to final ",nrow(annot)," lines/IDs")}
}
## split Annotation
if(debug) {message(fxNa,"rAP4f"); rAP4f <- list(path=path,chPa=chPa,tmp=tmp,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,specPref=specPref)}
## finalize annotation
chCols <- c("EntryName","GeneName","Species","Contam","Description")
chCol2 <- chCols %in% colnames(annot)
if(any(!chCol2)) annot <- cbind(annot, matrix(NA, nrow=nrow(annot), ncol=sum(!chCol2), dimnames=list(NULL, chCols[which(!chCol2)]))) # add columns so far not present
if(!remConta && length(conLi) >0) annot[conLi, "Contam"] <- "TRUE"
if(debug) {message(fxNa,"rAP5"); rAP5 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,remStrainNo=remStrainNo, specPref=specPref)}
## extract species according to custom search parameters 'specPref'
if(remStrainNo && any(!is.na(annot[,"Species"]))) {
annot[,"Species"] <- sub(" \\(strain [[:alnum:]].+","", annot[,"Species"])
}
## complete species annot by info extracted from fasta : ' OS='
.completeSpeciesAnnot <- function(spe=c("Homo sapiens", "_HUMAN"), anno=annot, exCoNa=c("Species", "EntryName","name","proteinName")) { # re-written 12jun23
## complete species if missing in anno[,exCoNa[2]] but found in anno[,exCoNa[1]]; return corrected anno
chNa <- is.na(anno[,exCoNa[1]]) | nchar(anno[,exCoNa[1]]) <1 # missing (species) annotation
if(any(chNa, na.rm=TRUE)) { # suppose that all 'exCoNa' are present as colnames in 'annot'
useColumn <- if(all(is.na(anno[,exCoNa[2]]))) wrMisc::naOmit(match(exCoNa[3:length(exCoNa)], colnames(anno))) else exCoNa[2]
if(length(useColumn) >1) useColumn <- useColumn[1]
chS <- grep(spe[1], anno[,useColumn])
if(length(chS) >0) anno[chS, exCoNa[1]] <- spe[2]
}
anno }
if(isTRUE(fixSpeciesNames)) { # try to recuperate/fix non-given/bad formatted species
chNa <- is.na(annot[,"Species"])
if(any(chNa)) {
commonSpec <- .commonSpecies()
for(i in 1:nrow(commonSpec)) annot[which(chNa),] <- .completeSpeciesAnnot(commonSpec[i,], annot[which(chNa),], exCoNa=c("Species","EntryName","name","proteinName")) }
if(debug) {message(fxNa,"rAP6"); rAP6 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,remStrainNo=remStrainNo, specPref=specPref)}
## check/complete for truncated species names (ie names found inside other ones)
chSpe <- which(!is.na(annot[,"Species"]) & nchar(annot[,"Species"]) >0)
if(length(chSpe) >0) {
OS <- gsub(";{1,5}$", "", annot[chSpe,"Species"]) # remove tailing separators
OSna <- unique(OS)
ch1 <- nchar(OSna) <1
if(debug) {message(fxNa,"rAP6b")}
if(any(ch1, na.rm=TRUE)) OSna <- OSna[which(nchar(OSna) >0)] # (just in case) remove empty tags
ch2 <- lapply(OSna, grep, OSna)
chTr <- sapply(ch2, length) >1
if(any(chTr, na.rm=TRUE)) { if(!silent) message(fxNa,"Found ",sum(chTr)," species name(s) appearing inside other ones, assume as truncated (eg ",OSna[which(chTr)[1]],")")
for(i in which(chTr)) OS[which(OS==OSna[i])] <- OSna[ch2[[i]][1]]
}
annot[chSpe,"Species"] <- OS }
}
## in case "Accession" is avail not "EntryName" is not
if(debug) {message(fxNa,"rAP7"); rAP7 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,quantCol=quantCol,remStrainNo=remStrainNo,
abund=abund,chNum=chNum,specPref=specPref, annot=annot,remConta=remConta,counts=counts)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, useColumn=c("Description","Species","EntryName","GeneName"), silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rAP7b") }
if(!silent) { chSp <- sum(is.na(annot[,"Species"]))
if(chSp >0) message(fxNa,"Note: ",chSp," proteins with unknown species")
tab <- table(annot[,"Species"])
if(length(tab) >0) {
tab <- rbind(names(tab), paste0(": ",tab,", "))
if(!silent) message(" data by species : ", apply(tab, 2, paste)) } } # all lines assigned
if(debug) {message(fxNa,"rAP8"); rAP8 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,quantCol=quantCol,remStrainNo=remStrainNo,
abund=abund,chNum=chNum, annot=annot,remConta=remConta,counts=counts) }
## look for unique col from $annot to use as rownames
if(nrow(annot) <1) warning("annot is empty (NO lines)")
## maybe annot is empty ?
chAn <- colSums(apply(annot[,c(1:min(ncol(annot),7))], 2, duplicated), na.rm=TRUE) # look at first 6 cols : how many elements per column duplicated
if(!silent) message(fxNa,"Use column '",colnames(annot)[which.min(chAn)],"' as identifyer (has fewest, ie ",chAn[which.min(chAn)]," duplicated entries) as rownames")
rownames(abund) <- rownames(annot) <- if(any(chAn==0)) annot[,which(chAn==0)[1]] else wrMisc::correctToUnique(annot[,which.min(chAn)], callFrom=fxNa)
if(length(counts) >0) rownames(counts) <- rownames(annot)
if(debug) {message(fxNa,"rAP9"); rAP9 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,quantCol=quantCol,abund=abund,chNum=chNum,
annot=annot,refLi=refLi,remConta=remConta)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(if(isLog2) abund else log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(inherits(quant, "try-error")) { warning(fxNa,"PROBLEMS ahead : Unable to normalize as log2-data !!") }
if(debug) {message(fxNa,"rAP10"); rAP10 <- list(path=path,chPa=chPa,tmp=tmp,chCol=chCol,quantCol=quantCol,abund=abund,chNum=chNum,
quant=quant,annot=annot,remConta=remConta,groupPref=groupPref,suplAnnotFile=suplAnnotFile, sdrf=sdrf,paFi=paFi )}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
## prepare for sdrf (search in directory above)
if(isTRUE(sdrf)) {
hiDir <- dir(file.path(dirname(paFi),".."))
chFa <- grep("^sdrf.+\\.tsv$", hiDir)
if(length(chFa) >0) sdrf <- file.path(dirname(paFi),"..",hiDir[chFa[1]]) else {sdrf <- NULL
if(!silent) message(fxNa,"NO sdrf file found in directory above main data !")}
}
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
headAbund <- utils::head(quant)
chX <- grepl("^X[[:digit:]]",colnames(quant)) #check for heading X in all colnames
if(any(chX)) colnames(headAbund)[which(chX)] <- sub("^X", "", colnames(headAbund)[which(chX)])
## check for matching : (as done within readSampleMetaData) - can't , sdrf not read yet ...
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth=paste0("AP"), path=NULL, abund=headAbund, groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rAP13 .."); rAP13 <- list(sdrf=sdrf,gr=gr,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="AP", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rAP14"); rAP14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,plotGraph=plotGraph,normalizeMeth=normalizeMeth,isLog2=isLog2)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(debug) message(fxNa," rAP15 normalizeMeth= ",normalizeMeth," ; plotGraph ", plotGraph)
## need to plot 2 diustribution3s ?
## a) data are same ?
chSame <- (identical(abund, quant) || identical(log2(abund), quant)) && "none" %in% normalizeMeth
if(plotGraph) .plotQuantDistr(abund=abund, quant=if(chSame) NULL else quant, custLay=custLay, normalizeMeth=normalizeMeth, notLogAbund=TRUE, softNa=paste("AlphaPept"),
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=debug, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod=paste("AlphaPept"), qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readAlphaPeptFile.R
|
#' Read Tabulated Files Exported by DIA-NN At Protein Level
#'
#' This function allows importing protein identification and quantification results from \href{https://github.com/vdemichev/DiaNN}{DIA-NN}.
#' Data should be exported as tabulated text (tsv) as protein-groups (pg) to allow import by thus function.
#' Quantification data and other relevant information will be parsed and extracted (similar to the other import-functions from this package).
#' The final output is a list containing as (main) elements: \code{$annot}, \code{$raw} and \code{$quant}, or a data.frame with the quantication data and a part of the annotation if argument \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using DIA-NN version 1.8.x.
#' Note, reading gene-group (gg) files is in priciple possible, but resulting files typically lack protein-identifiers which may be less convenient in later steps of analysis.
#' Thus, it is suggested to rather read protein-group (pg) files.
#'
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment related information.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol - not used (the argument was kept to remain with the same synthax as the other import functions fo this package)
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}}, \code{\link{readProlineFile}}
#' @examples
#' diaNNFi1 <- "tinyDiaNN1.tsv.gz"
#' ## This file contains much less identifications than one may usually obtain
#' path1 <- system.file("extdata", package="wrProteo")
#' ## let's define the main species and allow tagging some contaminants
#' specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN")
#' dataNN <- readDiaNNFile(path1, file=diaNNFi1, specPref=specPref1, tit="Tiny DIA-NN Data")
#' summary(dataNN$quant)
#' @export
readDiaNNFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="\\.raw$",
annotCol=NULL, refLi=NULL, separateAnnot=TRUE, FDRCol=NULL,
groupPref=list(lowNumberOfGroups=TRUE), plotGraph=TRUE, titGraph="DiaNN", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"),
gr=NULL, sdrf=NULL, suplAnnotFile=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read DiaNN exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readDiaNNFile")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
## check if path & (tsv) file exist (set paFi, if bad -> error)
if(!grepl("\\.tsv$|\\.tsv\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="tsv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rdn0a ..")
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rdn0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Fragpipe-file") }
NULL
} else {
if(debug) { message(fxNa,"rdn1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rdn1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,sampleNames=sampleNames,read0asNA=read0asNA,quantCol=quantCol,
annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol )}
## locate & extract annotation
## note : space (' ') in orig colnames are transformed to '.'
if(length(annotCol) <1) annotCol <- c("Protein.Group","Protein.Ids","Protein.Names","Genes","First.Protein.Description")
## check for essential colnames ! distinguish export as protein-groups (pg) or gene-groups (gg)
## 'Accesion' (eg "P00498") .. missing
## 'Description' (eg "Cyclin-dependent kinase 1") .. missing
## no PSM o spectral counts data in file
##
if(is.character(annotCol)) annotColNo <- match(annotCol, colnames(tmp))
chNa <- is.na(annotColNo)
if(any(chNa) && silent) message(fxNa,"Missing ",sum(chNa)," annotation columns: ",wrMisc::pasteC(annotCol[chNa], quoteC="'"))
if(all(chNa) && "Genes" %in% colnames(tmp)) {
annotColG <- which(colnames(tmp) %in% "Genes")
annot <- cbind(Accession=NA, EntryName=if(is.na(annotColNo[3])) NA else tmp[,annotCol[3]], GeneName=tmp[,annotColG], Species=NA, Contam=NA, SpecType=NA,
Description=NA, UniProtID=NA, EntryNamesAll=NA, GeneNameAll=tmp[,annotColG] ) # may be better to name column 'species'
if(!silent) message(fxNa,"NOTE : Data seems to be 'gene-groups' (gg) format, MISSING protein identifiers in annoation !!")
} else {
## rename columns to wrProteo format
annot <- cbind(Accession=NA, EntryName=tmp[,annotCol[3]], GeneName=tmp[,annotCol[4]], Species=NA, Contam=NA, SpecType=NA,
Description=NA, UniProtID=tmp[,annotCol[2]], EntryNamesAll=tmp[,annotCol[3]], GeneNameAll=tmp[,annotCol[4]], tmp[,wrMisc::naOmit(annotColNo[c(5)])]) # may be better to name column 'species'
}
if(debug) { message(fxNa,"rdn2 .. annotColNo : ", wrMisc::pasteC(annotColNo)); rdn2 <- list(annot=annot,annotCol=annotCol,tmp=tmp,specPref=specPref )}
## 'EntryName' & 'GeneName' may contain multiple proteins, pick 1st
chMult <- grep(";",annot[,2])
if(length(chMult) >0) annot[,2] <- sub(";.+","", annot[,2])
chMult <- grep(";",annot[,3])
if(length(chMult) >0) annot[,3] <- sub(";.+","", annot[,3])
## Species (need to run before reparsing badly parsed)
## is there anaything that can be done for annotColNo[3], ie 'First.Protein.Description' ?? was all NA in data provided
if(!is.na(annotColNo[3])) {
chSp <- grep("^[[:alnum:]]+_[[:upper:]]", tmp[,annotColNo[3]])
if(length(chSp) >0) { commonSpec <- .commonSpecies()
spe2 <- sub("^[[:alnum:]]+_", "", tmp[chSp,annotColNo[3]])
chSp3 <- which(sub("^_","", commonSpec[,1]) %in% spe2)
if(length(chSp3) >0) for(i in chSp3) annot[chSp,"Species"] <- commonSpec[i,2]}
}
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header - applicable ?
if(debug) {message(fxNa,"rdn6d .. "); rdn6d <- list(annot=annot,tmp=tmp,chSp=chSp,specPref=specPref,annotCol=annotCol)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rdn6b .. ")}
if(!silent) {
if(any(chSp, na.rm=TRUE) && !all(chSp)) message(fxNa,"Note: ",sum(chSp)," (out of ",nrow(tmp),") lines with unrecognized species")
if(!all(chSp)) { tab <- table(annot[,"Species"])
tab <- rbind(names(tab), paste0(": ",tab," ; "))
if(!silent) message(fxNa,"Count by 'specPref' : ",apply(tab, 2, paste)) }} # all lines assigned
if(debug) {message(fxNa,"rdn6e .. ")}
## check for unique annot[,"Accession"] - not applicable
if(debug) { message(fxNa,"rdn7 .. dim annot ",nrow(annot)," and ",ncol(annot)); rdn7 <- list(annot=annot,tmp=tmp,annot=annot,specPref=specPref) }
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) <1) quantCol <- "\\.raw$"
if(length(quantCol) ==1) {
## pattern search (for abundance/quantitation data)
if(is.character(quantCol)) quantCol <- grep(quantCol, tolower(colnames(tmp)))
}
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
abund <- as.matrix(tmp[, quantCol])
rownames(abund) <- annot[,"EntryName"]
if(debug) { message(fxNa,"rdn8 .. dim abund ",nrow(abund)," and ",ncol(abund)) }
## check & clean abundances
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"Valid 'sampleNames' were provided rdn8b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
}
counts <- NULL
if(debug) { message(fxNa,"rdn8c")}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol) - not applicable ?
if(debug) { message(fxNa,"rdn11"); rdn11 <- list(annot=annot,tmp=tmp,abund=abund)}
if(debug) {message(fxNa,"rdn12 .. ");
rdn12 <- list(tmp=tmp,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,
refLi=refLi,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr) }
## correct colnames from 'Pathabc.raw' to 'abc'
colnames(abund) <- wrMisc::.trimLeft(sub("\\.raw$|\\.RAW$","", colnames(abund)), silent=silent, debug=debug, callFrom=fxNa)
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## set 0 values to NA (avoid -Inf at log2)
if(!isFALSE(read0asNA)) { ch0 <- abund ==0
if(any(ch0, na.rm=TRUE)) abund[which(ch0)] <- NA }
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rdn13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")
rdn13 <- list(tmp=tmp,quant=quant,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,groupPref=groupPref,
refLi=refLi,refLiIni=refLiIni,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr,silent=silent,debug=debug) }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=separateAnnot, quantMeth="DN", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rdn13b .."); rdn13b <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="DN", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rdn14"); rdn14 <- list(setupSd=setupSd, sdrf=sdrf, suplAnnotFile=suplAnnotFile,quant=quant,abund=abund,plotGraph=plotGraph)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="DiaNN",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=debug, callFrom=fxNa, debug=debug)
if(debug) {message(fxNa,"Read sample-meta data, rdn15"); rdn15 <- list()}
## meta-data
notes <- c(inpFile=paFi, qmethod="DiaNN", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readDiaNNFile.R
|
#' Read Tabulated Files Exported by DiaNN At Peptide Level
#'
#' This function allows importing peptide identification and quantification results from \href{https://github.com/vdemichev/DiaNN}{DiaNN}.
#' Data should be exported as tabulated text (tsv) to allow import by thus function.
#' Quantification data and other relevant information will be extracted similar like the other import-functions from this package.
#' The final output is a list containing as (main) elements: \code{$annot}, \code{$raw} and \code{$quant}, or a data.frame with the quantication data and a part of the annotation if argument \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using DiaNN version 1.8.x.
#'
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment related information.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) - not used
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}}, \code{\link{readProlineFile}}
#' @examples
#' diaNNFi1 <- "tinyDiaNN1.tsv.gz"
#' ## This file contains much less identifications than one may usually obtain
#' path1 <- system.file("extdata", package="wrProteo")
#' ## let's define the main species and allow tagging some contaminants
#' specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN")
#' dataNN <- readDiaNNFile(path1, file=diaNNFi1, specPref=specPref1, tit="Tiny DIA-NN Data")
#' summary(dataNN$quant)
#'
#' @export
readDiaNNPeptides <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="\\.raw$",
annotCol=NULL, refLi=NULL, separateAnnot=TRUE, FDRCol=NULL,
groupPref=list(lowNumberOfGroups=TRUE), plotGraph=TRUE, titGraph="DiaNN", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"),
gr=NULL, sdrf=NULL, suplAnnotFile=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read DiaNN exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readDiaNNPeptides")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
## check if path & (tsv) file exist
if(!grepl("\\.tsv$|\\.tsv\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="tsv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rdnp0a ..")
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rdnp0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Fragpipe-file") }
NULL
} else {
if(debug) { message(fxNa,"rdnp1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rdnp1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,sampleNames=sampleNames,read0asNA=read0asNA,quantCol=quantCol,
annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol )}
## locate & extract annotation
## note : space (' ') in orig colnames are transformed to '.'
if(length(annotCol) <1) annotCol <- c("Protein.Group","Protein.Ids","Protein.Names","Genes","First.Protein.Description","Proteotypic","Stripped.Sequence","Precursor.Id","Precursor.Charge")
## note : 'Precursor.Id' contains modifications (Unimod number) and terminal charge number
## check for essential colnames !
## 'Accesion' (eg "P00498") .. missing
## 'Description' (eg "Cyclin-dependent kinase 1") .. missing
## no PSM o spectral counts data in file
##
if(is.character(annotCol)) annotColNo <- match(annotCol, colnames(tmp))
chNa <- is.na(annotColNo)
if(any(chNa) && silent) message(fxNa,"Missing ",sum(chNa)," annotation columns: ",wrMisc::pasteC(annotCol[chNa], quoteC="'"))
## rename columns to wrProteo format
annot <- cbind(Accession=NA, EntryName=tmp[,annotCol[3]], GeneName=tmp[,annotCol[4]], Species=NA, Contam=NA, SpecType=NA,
Description=NA, UniProtID=tmp[,annotCol[2]], EntryNamesAll=tmp[,annotCol[3]], GeneNameAll=tmp[,annotCol[4]], tmp[,wrMisc::naOmit(annotColNo[c(5:length(annotCol))])]) # may be better to name column 'species'
if(debug) { message(fxNa,"rdnp2 .. annotColNo : ", wrMisc::pasteC(annotColNo)); rdnp2 <- list(annot=annot,annotCol=annotCol,tmp=tmp,specPref=specPref )}
## 'EntryName' & 'GeneName' may contain multiple proteins, pick 1st
chMult <- grep(";",annot[,2])
if(length(chMult) >0) annot[,2] <- sub(";.+","", annot[,2])
chMult <- grep(";",annot[,3])
if(length(chMult) >0) annot[,3] <- sub(";.+","", annot[,3])
## Species (need to run before reparsing badly parsed)
## is there anaything that can be done for annotColNo[3], ie 'First.Protein.Description' ?? was all NA in data provided
if(!is.na(annotColNo[3])) {
chSp <- grep("^[[:alnum:]]+_[[:upper:]]", tmp[,annotColNo[3]])
if(length(chSp) >0) { commonSpec <- .commonSpecies()
spe2 <- sub("^[[:alnum:]]+_", "", tmp[chSp,annotColNo[3]])
chSp3 <- which(sub("^_","", commonSpec[,1]) %in% spe2)
if(length(chSp3) >0) for(i in chSp3) annot[chSp,"Species"] <- commonSpec[i,2]}
}
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header - applicable ?
if(debug) {message(fxNa,"rdnp6d .. "); rdnp6d <- list(annot=annot,tmp=tmp,chSp=chSp,specPref=specPref,annotCol=annotCol)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rdnp6b .. ")}
if(!silent) {
if(any(chSp, na.rm=TRUE) && !all(chSp)) message(fxNa,"Note: ",sum(chSp)," (out of ",nrow(tmp),") lines with unrecognized species")
if(!all(chSp)) { tab <- table(annot[,"Species"])
tab <- rbind(names(tab), paste0(": ",tab," ; "))
if(!silent) message(fxNa,"Count by 'specPref' : ",apply(tab, 2, paste)) }} # all lines assigned
if(debug) {message(fxNa,"rdnp6e .. ")}
## check for unique annot[,"Accession"] - not applicable
if(debug) { message(fxNa,"rdnp7 .. dim annot ",nrow(annot)," and ",ncol(annot)); rdnp7 <- list(annot=annot,tmp=tmp,annot=annot,specPref=specPref) }
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) <1) quantCol <- "\\.raw$"
if(length(quantCol) ==1) {
## pattern search (for abundance/quantitation data)
if(is.character(quantCol)) quantCol <- grep(quantCol, tolower(colnames(tmp)))
}
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
abund <- as.matrix(tmp[, quantCol])
rownames(abund) <- annot[,"EntryName"]
if(debug) { message(fxNa,"rdnp8 .. dim abund ",nrow(abund)," and ",ncol(abund)) }
## check & clean abundances
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"Valid 'sampleNames' were provided rdnp8b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
rownames(abund) <- annot[,"Precursor.Id"]
}
counts <- NULL # not available
if(debug) { message(fxNa,"rdnp8c")}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol) - not applicable ?
if(debug) { message(fxNa,"rdnp11"); rdnp11 <- list(annot=annot,tmp=tmp,abund=abund)}
if(debug) {message(fxNa,"rdnp12 .. ");
rdnp12 <- list(tmp=tmp,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,
refLi=refLi,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr) }
## correct colnames from 'Pathabc.raw' to 'abc'
colnames(abund) <- wrMisc::.trimLeft(sub("\\.raw$|\\.RAW$","", colnames(abund)), silent=silent, debug=debug, callFrom=fxNa)
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any peptides matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## set 0 values to NA (avoid -Inf at log2)
if(!isFALSE(read0asNA)) { ch0 <- abund ==0
if(any(ch0, na.rm=TRUE)) abund[which(ch0)] <- NA }
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rdnp13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")
rdnp13 <- list(tmp=tmp,quant=quant,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,groupPref=groupPref,
refLi=refLi,refLiIni=refLiIni,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr,silent=silent,debug=debug) }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
#setupSd <- readSampleMetaData(sdrf=rdnp13$sdrf, suplAnnotFile=T, quantMeth="DN", path=rdnp13$path, abund=utils::head(rdnp13$quant), groupPref=rdnp13$groupPref, silent=rdnp13$silent, debug=rdnp13$debug, callFrom="readDN")
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=separateAnnot, quantMeth="DN", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rdnp13b .."); rdnp13b <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="DN", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rdnp14"); rdnp14 <- list(setupSd=setupSd, sdrf=sdrf, suplAnnotFile=suplAnnotFile,quant=quant,abund=abund,plotGraph=plotGraph)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="DiaNN",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
if(debug) {message(fxNa,"Read sample-meta data, rdnp15"); rdnp15 <- list()}
## meta-data
notes <- c(inpFile=paFi, qmethod="DiaNN", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readDiaNNPeptides.R
|
#' Read file of protein sequences in fasta format
#'
#' Read fasta formatted file (from \href{https://www.uniprot.org}{UniProt}) to extract (protein) sequences and name.
#' If \code{tableOut=TRUE} output may be organized as matrix for separating meta-annotation (eg uniqueIdentifier, entryName, proteinName, GN) in separate columns.
#'
#' @param filename (character) names fasta-file to be read
#' @param delim (character) delimeter at header-line
#' @param databaseSign (character) characters at beginning right after the '>' (typically specifying the data-base-origin), they will be excluded from the sequance-header
#' @param removeEntries (character) if \code{'empty'} allows removing entries without any sequence entries; set to \code{'duplicated'} to remove duplicate entries (same sequence and same header)
#' @param tableOut (logical) toggle to return named character-vector or matrix with enhaced parsing of fasta-header. The resulting matrix will contain the comumns 'database','uniqueIdentifier','entryName','proteinName','sequence' and further columns depending on argument \code{UniprSep}
#' @param UniprSep (character) separators for further separating entry-fields if \code{tableOut=TRUE}, see also \href{https://www.uniprot.org/help/fasta-headers}{UniProt-FASTA-headers}
#' @param cleanCols (logical) remove columns with all entries NA, if \code{tableOut=TRUE}
#' @param debug (logical) supplemental messages for debugging
#' @param silent (logical) suppress messages
#' @param callFrom (character) allows easier tracking of messages produced
#' @return This function returns (depending on parameter \code{tableOut}) a) a simple character vector (of sequence) with Uniprot ID as name or b) a matrix with columns: 'database','uniqueIdentifier','entryName','proteinName','sequence' and further columns depending on argument \code{UniprSep}
#' @seealso \code{\link{writeFasta2}} for writing as fasta, or for reading \code{\link[base]{scan}} or \code{read.fasta} from the package \href{https://CRAN.R-project.org/package=seqinr}{seqinr}
#' @examples
#' ## Tiny example with common contaminants
#' path1 <- system.file('extdata',package='wrProteo')
#' fiNa <- "conta1.fasta.gz"
#' fasta1 <- readFasta2(file.path(path1,fiNa))
#' ## now let's read and further separate annotation-fields
#' fasta2 <- readFasta2(file.path(path1,fiNa),tableOut=TRUE)
#' str(fasta1)
#' @export
readFasta2 <- function(filename, delim="|", databaseSign=c("sp","tr","generic","gi"), removeEntries=NULL, tableOut=FALSE, UniprSep=c("OS=","OX=","GN=","PE=","SV="),
cleanCols=TRUE, silent=FALSE, callFrom=NULL, debug=FALSE){
## read fasta formatted file (from Uniprot) to extract (protein) sequences and name
## info about Uniprot fasta https://www.uniprot.org/help/fasta-headers
## return (based on 'tableOut') simple character vector (of sequence) with Uniprot ID as name or matrix with cols:'ID','Sequence','EntryName','ProteinName','OS','GN'
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readFasta2")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
## protect characters from delim
deli2 <- if(nchar(delim==1)) paste0("\\",delim) else paste(paste0("\\",unlist(strsplit(delim,""))),collapse="") #"
## initial test reading
if(!file.exists(filename)) stop(" file ",filename," not existing")
sca <- try(readLines(filename))
## faster reading of file ? see https://www.r-bloggers.com/2011/08/faster-files-in-r/
if(inherits(sca, "try-error")) stop(fxNa,"File ",filename," exits but could not read !")
if(debug) {message(fxNa,"Successfully read file '",filename,"'")}
## abandon using scan due to cases of EOL during text read interfering with ...
newLi <- grep("^>", sca)
newLi <- if(is.list(newLi)) newLi <- sort(unlist(newLi)) else as.numeric(newLi)
if(length(newLi) <1) stop(fxNa,"No instances of 'databaseSign', ie '",paste(databaseSign,collapse=""),"' found ! Maybe this is NOT a real fasta-file ?")
byDBsig <- sapply(databaseSign, function(x) grep(paste0("^>",x), sca[newLi]))
names(byDBsig) <- databaseSign
if(debug) {message(fxNa,"Checking for database signs ",wrMisc::pasteC(databaseSign, quoteC="'")," rf2"); rf2 <- list(sca=sca,byDBsig=byDBsig,newLi=newLi,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep) }
## count occurance of prefix types
chLe <- sapply(byDBsig, length)
out <- NULL
if(sum(chLe) < length(newLi)) { # non-standard format or unknown databaseSign
id0 <- sca[newLi]
ch1 <- nchar(gsub("\\|","", id0)) - nchar(id0)
badFo <- ch1 > min(ch1, na.rm=TRUE)
if(any(badFo)) { ## inconsistent format, suppose databaseSign is missing
if(!silent) message(fxNa,"Found ",sum(badFo)," inconsistent entries with missing databaseSign, adding 'xx|'" )
databaseSign <- union(databaseSign,"xx")
sca[newLi[which(badFo)]] <- paste0(">xx|", sub("^>","", sca[newLi[which(badFo)]])) # add unknwn db sign
spePat <- " - [[:upper:]][[:lower:]]+ [[:lower:]]+ \\([[:upper:]][[:lower:]]+\\)$"
chSpe <- which(grepl(spePat, sca[newLi[which(badFo)]]) & !grepl(" OS=[[:upper:]][[:lower:]]", sca[newLi[which(badFo)]]))
if(length(chSpe) >0) { ## found potential species designation (eg ' - Homo sapiens (Human)' while 'OS=' is missing)
if(!silent) message(fxNa,"Found ",length(chSpe)," potential species designations, adding as OS=" )
woSpe <- nchar(sub(spePat,"", sca[newLi[which(badFo)]]))
newSpe <- sub(" \\([[:upper:]][[:lower:]]+\\)$","", substr(sca[newLi[which(badFo)]], woSpe +4, nchar(sca[newLi[which(badFo)]])))
for(i in unique(newSpe)) sca[newLi[which(badFo)]] <- sub(paste0(" - ",i),"", sca[newLi[which(badFo)]])
## future : try converting OS to OX - need to extend .commonSpecies()
sca[newLi[which(badFo)]] <- paste0(sca[newLi[which(badFo)]], " OS=",newSpe)
}
}
} # finish adding missing databaseSign and trying to recover species from non-standard format
if(any(chLe >0, na.rm=TRUE)) { # has prefix
if(any(chLe <1, na.rm=TRUE)) byDBsig <- byDBsig[which(chLe >0)] # eliminate prefix types not found
dbSig <- sub("\\|.+","", sub("^>","", sca[newLi])) # correct database column
if(length(byDBsig) >1) for(i in 2:length(byDBsig)) dbSig[byDBsig[[i]]] <- names(byDBsig)[i]
id0 <- substr(sca[newLi], nchar(dbSig) +2 +nchar(delim), nchar(sca[newLi])) # head wo prefix
} else { # in case no separator found, use text available as ID and as name
id0 <- sca[newLi] ; dbSig <- NULL}
if(debug) {message(fxNa," found ",wrMisc::pasteC(chLe)," occurances of database signs rf3"); rf3 <- list(sca=sca,out=out,newLi=newLi,chLe=chLe,byDBsig=byDBsig,newLi=newLi,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep) }
## check for empty sequences
useLi <- cbind(newLi +1, c(newLi[-1] -1, length(sca)))
## note : if single line of sequence both values on same line have same index
chLe <- useLi[,2] - useLi[,1] <0
if(any(c("empty","removeempty") %in% tolower(removeEntries), na.rm=TRUE) & any(chLe, na.rm=TRUE)) {
if(!silent) message(fxNa," found ",sum(chLe)," case(s) of entries without any sequence underneith - omitting; bizzare !")
useLi <- useLi[which(!chLe),]
id0 <- id0[which(!chLe)]
#sca <- sca[which(!chLe)]
}
if(debug) {message(fxNa,"rf4"); rf4 <- list(sca=sca,out=out,chLe=chLe,newLi=newLi,id0=id0,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep) }
## isolate ID : strsplit by delim
sep1 <- strsplit(id0, delim, fixed=TRUE)
chLe <- sapply(sep1, length)
id <- sub("^ ","",sub(" $","",sapply(sep1, function(x) x[1]))) # remove heading or tailing space
chNa <- is.na(id)
if(any(chNa, na.rm=TRUE)) {
if(!silent) message(fxNa,"Note: ",sum(chNa)," entries have no names, will be given names 'NONAME01'", if(sum(chNa)>1)" etc...")
id[which(chNa)] <- paste0("NONAME",sprintf(paste0("%0",max(2,nchar(sum(chNa))),"d"), 1:sum(chNa)))
}
entryName <- id
if(debug) {message(fxNa,"rf5"); rf5 <- list(out=out,chLe=chLe,newLi=newLi,entryName=entryName,id=id,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep) }
## Try extracting 2nd part after ID
if(any(chLe >1, na.rm=TRUE)) { # use 1st as ID and last as names+further
entryName <- unlist(sapply(sep1, function(x) if(length(x) <1) NA else x[min(2,length(x))])) } # use 2nd after separator (or 1st if)
chNa <- is.na(entryName)
if(any(chNa, na.rm=TRUE)) entryName[which(chNa)] <- id[which(chNa)] # no separator, use text available as ID and as name
if(debug) {message(fxNa,"Isolated ",length(id)," ids ( ",sum(chNa)," with same text as ID and sequence name)" )}
entryName <- sub("^ ","", sub("\\.$","",entryName)) # remove heading space or tailing point
seqs <- apply(useLi, 1, function(x) paste(sca[x[1]:x[2]], collapse=""))
if(debug) {message(fxNa," rf5")}
if(any(c("duplicate","duplicated") %in% tolower(removeEntries), na.rm=TRUE) ) {
chDup <- duplicated(seqs, fromLast=FALSE) & duplicated(entryName, fromLast=FALSE)
if(any(chDup, na.rm=TRUE)) {
if(!silent) message(fxNa,"Removing ",sum(chDup)," duplicated entries (same sequence AND same header)")
seqs <- seqs[which(!chDup)]
entryName <- entryName[which(!chDup)]
} }
if(debug) {message(fxNa," len seqs ",length(seqs)," rf6");
rf6 <- list(out=out,chLe=chLe,newLi=newLi,entryName=entryName,id=id,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep) }
if(isTRUE(tableOut)) { # further separating name/description field
## Uniprot headers : https://www.uniprot.org/help/fasta-headers
## >db|uniqueIdentifier|entryName proteinName OS=OrganismName OX=OrganismIdentifier [GN=GeneName] PE=ProteinExistence SV=SequenceVersion
UniprSep <- sub("^ ","",sub(" $","",UniprSep)) # remove heading or tailing space
out <- matrix(NA, nrow=length(id0), ncol=length(UniprSep) +5,
dimnames=list(NULL,c("database","uniqueIdentifier","entryName","proteinName","sequence",sub("=$","",sub("^ +","",UniprSep)) )))
aftS <- "[[:alpha:]][[:upper:]]*[[:digit:]]*[[:upper:]]*_[[:upper:]]+[[:digit:]]* " # some CAPs + ev some digits
if(debug) {message(fxNa," rf7"); rf7 <- list(out=out,chLe=chLe,newLi=newLi,entryName=entryName,id=id,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep)}
## suplID (if available)
entryNameS <- sub(aftS, "", entryName) # get everything before Uniprot like sparator (space+2upper+"="+anyText)
nch <- nchar(entryName)
ncha <- nchar(entryNameS)
suplID <- if(any(ncha >0, na.rm=TRUE)) substr(entryName, 1, nch -1-ncha) else rep(NA, length(entryName))
chS <- ncha == nchar(entryName)
out[,c("database","uniqueIdentifier","entryName","proteinName","sequence")] <- cbind(dbSig, id, entryNameS, suplID, seqs)
if(debug) {message(fxNa," rf8"); rf8 <- list(out=out,chLe=chLe,newLi=newLi,entryName=entryName,id=id,filename=filename,delim=delim,databaseSign=databaseSign,removeEntries=removeEntries,tableOut=tableOut,UniprSep=UniprSep)}
## extract part after current uniprSep and the before something looking like next uniprSep
grUni <- lapply(UniprSep, grep, entryNameS) # which entires/lines concerned
chUni <- which(sapply(grUni, length) >0) # which separators concerned
UniprSep <- c(UniprSep, "ZYXWVUTSR=") # need to add dummy sequence for last
if(any(chUni, na.rm=TRUE)) for(i in chUni) {
aftS <- paste(sapply(UniprSep[-1*(1:i)], function(x) paste0("\ ",x,"[[:alnum:]]+[[:print:]]*")),collapse="|")
curS <- paste(sapply(UniprSep[i], function(x) paste0("^[[:print:]]* ",x)),collapse="|")
out[grUni[[i]], sub("=$","",UniprSep[i]) ] <- sub(aftS,"", sub(curS,"", entryNameS[grUni[[i]]])) # c(3,5+i)
}
## propagate NONAME to empty proteinName
ch1 <- grepl("NONAME", out[,2]) & nchar(out[,"proteinName"]) <1
if(any(ch1, na.rm=TRUE)) out[which(ch1),"proteinName"] <- out[which(ch1),2]
## remove cols with all NA
chNA <- colSums(!is.na(out)) <1
if(any(chNA) && isTRUE(cleanCols)) out <- if(sum(!chNA) >1) out[,which(!chNA)] else matrix(out[,which(!chNA)], ncol=1, dimnames=list(NULL,colnames(out)[which(!chNA)])) # remove columns with NA only
} else {out <- seqs; names(out) <- entryName}
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readFasta2.R
|
#' Read Tabulated Files Exported by FragPipe At Protein Level
#'
#' This function allows importing protein identification and quantification results from \href{https://fragpipe.nesvilab.org/}{Fragpipe}
#' which were previously exported as tabulated text (tsv). Quantification data and other relevant information will be extracted similar like the other import-functions from this package.
#' The final output is a list containing the elements: \code{$annot}, \code{$raw} and \code{$quant}, or a data.frame with the quantication data and a part of the annotation if argument \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using Fragpipe versions 18.0 and 19.0.
#'
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment related information.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}}, \code{\link{readProlineFile}}
#' @examples
#' FPproFi1 <- "tinyFragpipe1.tsv.gz"
#' path1 <- system.file("extdata", package="wrProteo")
#' ## let's define the main species and allow tagging some contaminants
#' specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="MOUSE")
#' dataFP <- readFragpipeFile(path1, file=FPproFi1, specPref=specPref1, tit="Tiny Fragpipe Data")
#' summary(dataFP$quant)
#'
#' @export
readFragpipeFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="Intensity$",
annotCol=NULL, refLi=NULL, separateAnnot=TRUE, FDRCol=list("Protein.Probability", lim=0.99), # contamCol="Contaminant",
groupPref=list(lowNumberOfGroups=TRUE), plotGraph=TRUE, titGraph="FragPipe", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"),
gr=NULL, sdrf=NULL, suplAnnotFile=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read Fragpipe exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readFragpipeFile")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
## check if path & file exist
if(!grepl("\\.tsv$|\\.tsv\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="tsv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rfp0a ..")
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rfp0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Fragpipe-file") }
NULL
} else {
if(debug) { message(fxNa,"rfp1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rfp1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,sampleNames=sampleNames,read0asNA=read0asNA,quantCol=quantCol,
annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol )}
## locate & extract annotation
## note : space (' ') in orig colnames are transformed to '.'
if(length(annotCol) <1) annotCol <- c("Protein","Protein.ID","Entry.Name","Description","Gene","Organism", "Protein.Length","Protein.Existence","Protein.Probability",
"Top.Peptide.Probability", "Combined.Total.Peptides","Combined.Spectral.Count","Combined.Unique.Spectral.Count")
## note cols 2-6 are part to common format wrProteo
PSMCol <- "\\.Spectral\\.Count$" # pattern searching tag for PSM-data
PepCol <- "Unique\\.Spectral\\.Count$" # pattern searching tag for Number of peptides
## future option : lateron rename columns called as "Description" to annotCol[2]
## below use explicit colnames "Accession","Description", rename if tolower() fits
.chColNa <- function(x, mat, renameTo=NULL, silent=FALSE, fxNa=NULL){
## check in 'matr' for column-name 'x', if required rename best hit (if no direct hit look using grep, then grep wo case); return corrected mat
chX <- x %in% colnames(mat)
if(all(chX)) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[match(x, colnames(mat))] <- renameTo # juste simple rename (single col only)
} else { # try to localize column to use
chX <- grep(x, colnames(mat))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
chX <- grep(tolower(x), tolower(colnames(mat)))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",tolower(x),"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else stop("Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")") }
}
mat }
## check for essential colnames !
if(is.character(annotCol)) annotColNo <- match(annotCol, colnames(tmp))
chNa <- is.na(annotColNo)
if(any(chNa) & silent) message(fxNa,"Missing ",sum(chNa)," annotation columns: ",wrMisc::pasteC(annotCol[chNa], quoteC="'"))
## rename to wrProteo format
tmp <- .chColNa(annotCol[2], tmp, renameTo="Accession", silent=silent, fxNa=fxNa) # rename 'Protein ID' to 'Accession' (Uniprot ID)
tmp <- .chColNa(annotCol[3], tmp, renameTo="EntryName", silent=silent, fxNa=fxNa) # like THOC2_MOUSE
tmp <- .chColNa(annotCol[4], tmp, renameTo="Description", silent=silent, fxNa=fxNa) # full (long) name
annot <- cbind(Accession=tmp[,"Accession"], EntryName=tmp[,"EntryName"], GeneName=NA, Species=NA, Contam=NA, SpecType=NA,
Description=tmp[,"Description"], tmp[,wrMisc::naOmit(annotColNo[-(1:6)])]) # may be better to name column 'species'
if(debug) { message(fxNa,"rfp2 .. annotColNo : ", wrMisc::pasteC(annotColNo)); rfp2 <- list(annot=annot,annotCol=annotCol,tmp=tmp,specPref=specPref )}
## Species (need to run before reparsing badly parsed)
if(!is.na(annotColNo[6])) { spec <- tmp[,annotColNo[6]]
spec <- sub("^\ +|\ +$","", spec) # remove heading or tailing (white) space
chOX <- grep(" OX=", spec)
if(length(chOX) >0) { OX <- sub(" OX=", "", spec[chOX])
spec[chOX] <- sub(" OX=[[:digit:]]+[[:print:]]*","", spec[chOX])
chO2 <- nchar(spec[chOX]) <3 & nchar(OX) >1
if(any(chO2)) spec[chOX[which(chO2)]] <- OX[which(chO2)] # use OX=.. in case no other information available
}
if(TRUE) spec <- sub(" \\([[:alpha:]][[:print:]]+\\).*", "", spec) # remove ' (..)'
annot[,"Species"] <- spec
}
## look for not well parsed (use separator '|' as indicator)
chPa <- grep("\\|", annot[,"Accession"])
if(length(chPa) >0) {
chSp <- grep(" ", annot[chPa,"Accession"])
if(length(chSp) >0) {
# extract species
chOS <- grep("[[:print:]]+ OS=[[:alpha:]]", annot[chPa[chSp],"Accession"])
if(length(chOS) >0) annot[chPa[chSp[chOS]],"Species"] <- sub(" [[:upper:]]{2}=.+","", sub("[[:print:]]+ OS=","", annot[chPa[chSp[chOS]],"Accession"])) # extract species
## extract GeneName
chGn <- grep("[[:print:]]+ GN=", annot[chPa[chSp],"Accession"])
if(length(chGn) >0) annot[chPa[chSp[chGn]],"GeneName"] <- sub(" [[:upper:]]{2}=.+","", sub("[[:print:]]+ GN=","", annot[chPa[chSp[chGn]],"Accession"]))
## extract Description
annot[chPa[chSp],"Description"] <- sub(".*? ", "", sub(" [[:upper:]]{2}=.+","", annot[chPa[chSp],"Accession"]))
## extract EntryName (option 1)
annot[chPa[chSp],"EntryName"] <- gsub(".*\\|","", sub(" .+","", annot[chPa,"Accession"]))
} else {
annot[chPa,"EntryName"] <- gsub(".*\\|","", annot[chPa,"Accession"]) ## extract EntryName (option 2)
}
## extract Accession
annot[chPa,"Accession"] <- sapply(strsplit(annot[chPa,"Accession"], "\\|"), function(x) if(length(x) >1) x[2] else NA)
}
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header
if(cleanDescription) {
if(debug) { message(fxNa,"rfp3a") }
annot[,"Description"] <- sub("[[:punct:]]+$","", sub("\\ +$", "", annot[,"Description"])) # tailing ';' and/or tailing space
annot[,"Description"] <- sub(" \\([[:alpha:]]*$", "", annot[,"Description"]) # tailing (ie truncated) open '(xxx'
}
if(debug) { message(fxNa,"rfp3b"); rfp3b <- list() }
if(debug) {message(fxNa,"rfp4 .. dim annot: ", nrow(annot)," li and ",ncol(annot)," cols; colnames : ",wrMisc::pasteC(colnames(annot))," ")}
.MultGrep <- function(pat, y) if(length(pat)==1) grep(pat, y) else unlist(sapply(pat, grep, y)) # (multiple) grep() when length of pattern 'pat' >0
## Contam
if("Contaminant" %in% colnames(annot)) { # just in case there is a column called 'Contaminant' (so far not seen)
useLi <- which[nchar(annot[,"Contaminant"]) >0 && !is.na(annot[,"Contaminant"])]
if(length(useLi) >0) annot[useLi,"Contam"] <- toupper(gsub(" ","",annot[useLi,"Contaminant"]))}
chConta <- grep("^contam", tmp[,annotCol[1]]) # specific to Fragpipe
if(length(chConta) >0) annot[chConta,"Contam"] <- TRUE
## get more species annot; separate multi-species (create columns 'Accession','GeneName','Species','SpecType')
chSp <- is.na(annot[,"Species"]) | nchar(annot[,"Species"]) <2
if(any(chSp)) { chSep <- grep("_", annot[which(chSp),"EntryName"]) # look for eg 'TRY1_BOVIN'
if(length(chSep) >0) { chSep <- which(chSp)[chSep]
spe2 <- sub("[[:alnum:]]+_", "", annot[chSep,"EntryName"])
if(debug) message(fxNa,"Recover Species name for ",length(chSep)," entries based on 'EntryName'")
commonSpec <- .commonSpecies()
chSp3 <- which(sub("^_","",commonSpec[,1]) %in% spe2)
if(length(chSp3) >0) for(i in chSp3) annot[chSep,"Species"] <- commonSpec[i,2]
}
chSp <- is.na(annot[,"Species"]) | nchar(annot[,"Species"]) <2 } # update
if(debug) {message(fxNa,"rfp6d .. "); rfp6d <- list(annot=annot,tmp=tmp,chSp=chSp,specPref=specPref,annotCol=annotCol,PSMCol=PSMCol,PepCol=PepCol)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rfp6b .. ")}
if(!silent) {
if(any(chSp, na.rm=TRUE) && !all(chSp)) message(fxNa,"Note: ",sum(chSp)," (out of ",nrow(tmp),") lines with unrecognized species")
if(!all(chSp)) { tab <- table(annot[,"Species"])
tab <- rbind(names(tab), paste0(": ",tab," ; "))
if(!silent) message(fxNa,"Count by 'specPref' : ",apply(tab, 2, paste)) }} # all lines assigned
if(debug) {message(fxNa,"rfp6e .. ")}
## check for unique annot[,"Accession"]
chDu <- duplicated(annot[,"Accession"], fromLast=FALSE)
if(any(chDu)) { warning(fxNa," NOTE : ",sum(chDu)," entries have same '",annotCol[2],"' (ie Accession) - correcting to UNIQUE !")
rownames(tmp) <- rownames(annot) <- wrMisc::correctToUnique(annot[,"Accession"], sep="_", atEnd=TRUE, callFrom=fxNa)
} else { rownames(annot) <- rownames(tmp) <- annot[,"Accession"] }
if(debug) { message(fxNa,"rfp7 .. dim annot ",nrow(annot)," and ",ncol(annot)); rfp7 <- list() }
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
if(is.character(quantCol)) quantCol <- match(quantCol, colnames(tmp))
} else {
## pattern search (for abundance/quantitation data)
## problem : extract 'xx1.Intensity' but NOT 'xx.MaxLFQ.Intensity'
useMaxLFQItens <- FALSE
quantColIni <- quantCol <- grep(quantCol, colnames(tmp))
chLFQ <- grep("MaxLFQ\\.", colnames(tmp)[quantCol])
if(length(chLFQ) >0) { if(!silent && length(chLFQ)==length(quantCol)) message(fxNa,"All quantification columns are MaxLFQ !")
if(length(chLFQ) < length(quantCol)) quantCol <- quantCol[(if(useMaxLFQItens) 1 else -1) *chLFQ] else warning("No non-MaxLFQ data available, using MaxLFQ.Intensity instead !") }
}
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
abund <- as.matrix(tmp[, quantCol])
rownames(abund) <- annot[,"Accession"]
if(debug) { message(fxNa,"rfp8 .. dim abund ",nrow(abund)," and ",ncol(abund)) ; rfp8 <- list(abund=abund,sampleNames=sampleNames,annot=annot,tmp=tmp,annot=annot,specPref=specPref)}
## check & clean abundances
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"Valid 'sampleNames' were provided rfp8b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
}
if(debug) { message(fxNa,"rfp9"); rfp9 <- list(abund=abund,sampleNames=sampleNames,annot=annot,tmp=tmp,annot=annot,specPref=specPref,FDRCol=FDRCol)}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol)
if(length(FDRCol) >0) {
if(FDRCol[[1]] %in% colnames(tmp)) {
if(length(FDRCol[[2]]) >0 && is.numeric(FDRCol[[2]])) FdrLim <- FDRCol[[2]][1] else {
if(!silent) message(fxNa,"No valid FDR limit found, using default 0.95 (ie 5% filter)")
FdrLim <- 0.95 }
rmLi <- which(as.numeric(tmp[,FDRCol[[1]]]) < FdrLim) # default 5% 'FDR' filter
if(length(rmLi) == nrow(abund)) warning(fxNa,"Omitting FDR-filter; otherwise NO MORE LINES/proteins remaining !!!") else {
if(length(rmLi) >0) {
if(!silent) message(fxNa,"Removing ",length(rmLi)," lines/proteins removed as NOT passing protein identification filter at ",FdrLim, if(debug) " rfp9b")
abund <- abund[-rmLi,]
if(length(dim(abund)) <2) abund <- matrix(abund, nrow=1, dimnames=list(rownames(annot)[-rmLi], names(abund)))
annot <- if(nrow(abund) ==1) matrix(annot[-rmLi,], nrow=1, dimnames=list(rownames(abund), colnames(annot))) else annot[-rmLi,]
tmp <- if(nrow(abund) ==1) matrix(tmp[-rmLi,], nrow=1, dimnames=list(rownames(abund), colnames(tmp))) else tmp[-rmLi,]}
}
}
}
if(debug) { message(fxNa,"rfp11 .. length(FDRCol) ",length(FDRCol)," dim annot ",nrow(annot)," and ",ncol(annot)); rfp11 <- list()}
PSMCol <- "\\.Spectral\\.Count$" # pattern searching tag for PSM-data
PepCol <- "Unique\\.Spectral\\.Count$" # pattern searching tag for Number of peptides
PSMColExcl <- "Total\\.Spectral\\.Count$" # exclude this pattern searching tag for PSM
usTy <- c("PSM", "UniquePeptides")
## optional/additional counting results (PSM, no of peptides)
PSMExl <- grep(paste0("Combined",PSMCol), colnames(tmp))
PepExl <- grep(paste0("Combined\\.",PepCol), colnames(tmp))
PSMCol <- if(length(PSMCol) ==1) grep(PSMCol, colnames(tmp)) else NULL
PepCol <- if(length(PepCol) ==1) grep(PepCol, colnames(tmp)) else NULL
if(any(c(length(PSMExl), length(PSMColExcl)) >0)) PSMCol <- PSMCol[-which(PSMCol %in% c(PepCol, PSMExl, grep(PSMColExcl, colnames(tmp))))] # remove unwanted columns
if(length(PepExl) >0) PepCol <- PepCol[-which(PepCol %in% PepExl)]
if(any(c(length(PSMCol), length(PepCol)) >0)) {
counts <- array(NA, dim=c(nrow(abund), ncol(abund), length(usTy)), dimnames=list(rownames(abund),colnames(abund), usTy))
if(length(PSMCol) >0) counts[,,"PSM"] <- as.matrix(tmp[,PSMCol])
if(length(PepCol) >0) counts[,,"UniquePeptides"] <- as.matrix(tmp[,PepCol])
} else counts <- NULL
if(debug) {message(fxNa,"rfp12 .. ");
rfp12 <- list(tmp=tmp,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,
refLi=refLi,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,refLi=refLi,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr) }
## correct colnames from 'Xabc_1.Intensity' to 'abc_1'
ch1 <- grepl("^X[[:digit:]]", colnames(abund))
if(any(ch1)) colnames(abund)[which(ch1)] <- sub("^X","", colnames(abund)[which(ch1)])
colnames(abund) <- sub("\\.Intensity$","", colnames(abund))
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## set 0 values to NA (avoid -Inf at log2)
if(!isFALSE(read0asNA)) { ch0 <- abund ==0
if(any(ch0, na.rm=TRUE)) abund[which(ch0)] <- NA }
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rfp13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")
rfp13 <- list(tmp=tmp,quant=quant,abund=abund,annot=annot,sdrf=sdrf, fileName=fileName,path=path,paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,groupPref=groupPref,
refLi=refLi,refLiIni=refLiIni,specPref=specPref,read0asNA=read0asNA,quantCol=quantCol,annotCol=annotCol,separateAnnot=separateAnnot,FDRCol=FDRCol,gr=gr,silent=silent,debug=debug) }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=separateAnnot, quantMeth="FP", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rfp13b .."); rfp13b <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="FP", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- setupSd$sampleNames
if(debug) {message(fxNa,"Read sample-meta data, rfp14"); rfp14 <- list(setupSd=setupSd, sdrf=sdrf, suplAnnotFile=suplAnnotFile,quant=quant,abund=abund,plotGraph=plotGraph)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="FragPipe",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, las=NULL, silent=silent, callFrom=fxNa, debug=debug)
if(debug) {message(fxNa,"Read sample-meta data, rfp15"); rfp15 <- list()}
## meta-data
notes <- c(inpFile=paFi, qmethod="FragPipe", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readFragpipeFile.R
|
#' Read tabulated files imported from MassChroQ
#'
#' Quantification results using MassChroQ should be initially treated using the R-package MassChroqR (both distributed by the PAPPSO at http://pappso.inrae.fr/)
#' for initial normalization on peptide-level and combination of peptide values into protein abundances.
#'
#' The final output of this fucntion is a list containing 3 elements: \code{$annot}, \code{$raw}, \code{$quant} and \code{$notes}, or returns data.frame with entire content of file if \code{separateAnnot=FALSE}. Other list-elements remain empty to keep format compatible to other import functions.
#'
#' @details
#' This function has been developed using MassChroQ version 2.2 and R-package MassChroqR version 0.4.0. Both are distributed by the PAPPSO (http://pappso.inrae.fr/).
#' When saving quantifications generated in R as RData (with extension .rdata or .rda) using the R-packages associated with MassChroq, the ABUNDANCE_TABLE produced by mcq.get.compar(XICAB) should be used.
#'
#' After import data get (re-)normalized according to \code{normalizeMeth} and \code{refLi}, and boxplots or vioplots drawn.
#'
#'
#' @param fileName (character) name of file to be read (may be tsv, csv, rda or rdata); both US and European csv formats are supported
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method (will be sent to \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange. Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, it gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by ProteomeDiscoverer; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes} and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readProlineFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNa <- "tinyMC.RData"
#' dataMC <- readMassChroQFile(file=fiNa, path=path1)
#' @export
readMassChroQFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, refLi=NULL, separateAnnot=TRUE, titGraph="MassChroQ", wex=NULL,
specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"), gr=NULL, sdrf=NULL, suplAnnotFile=FALSE, groupPref=list(lowNumberOfGroups=TRUE), plotGraph=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read MassChroQ (pre-)treated data
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readMassChroQFile")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
if(plotGraph) on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!requireNamespace("utils", quietly=TRUE)) stop("package 'utils' not found ! Please install first from CRAN")
tmp <- list() # initialize
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize for sdrf annotation
counts <- NULL # so far PSM data are not accessible
## check & read file
paFi <- wrMisc::checkFilePath(fileName, path, expectExt=NULL, compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(length(c(grep("\\.txt$",paFi), grep("\\.txt\\.gz$",paFi))) >0) tmp[[1]] <- try(utils::read.delim(paFi, stringsAsFactors=FALSE), silent=TRUE) # read tabulated text-file
if(length(c(grep("\\.csv$",paFi), grep("\\.csv\\.gz$",paFi))) >0) tmp[[2]] <- try(utils::read.csv(paFi, stringsAsFactors=FALSE), silent=TRUE) # read US csv-file
if(length(c(grep("\\.csv$",paFi), grep("\\.csv\\.gz$",paFi))) >0) tmp[[3]] <- try(utils::read.csv2(paFi, stringsAsFactors=FALSE), silent=TRUE) # read Euro csv-file
if(length(c(grep("\\.tsv$",paFi), grep("\\.tsv\\.gz$",paFi))) >0) tmp[[4]] <- try(utils::read.csv(file=paFi, stringsAsFactors=FALSE, sep='\t', header=TRUE)) # read US comma tsv-file
if(length(c(grep("\\.rda$",paFi), grep("\\.rdata$",tolower(paFi)))) >0) {
ls1 <- ls()
tmp[[5]] <- try(load(paFi))
if(!inherits(tmp[[5]], "try-error")) { # dont know under which name the object was saved in RData..
if(length(ls1) +2 ==length(ls())) {
tmp[[5]] <- get(ls()[which(!ls() %in% ls1 & ls() != "ls1")]) # found no way of removing initial object
if(!silent) message(fxNa,"Loading R-object '",ls()[which(!ls() %in% ls1 & ls() != "ls1")],"' as quantification data out of ",paFi)
} else stop(" Either .RData is empty or element loaded has name of one of the arguments of this function and can't be recognized as such")
} else stop("Failed to load .RData") }
if(debug) {message(fxNa,"mc1")}
if(length(tmp) <1) stop("Failed to recognize file extensions of input data (unknown format)")
chCl <- sapply(tmp, inherits, "try-error")
if(all(chCl)) stop(" Failed to extract data from '",fileName,"' (check format & rights to read)")
nCol <- sapply(tmp, function(x) if(length(x) >0) {if(!inherits(x, "try-error")) ncol(x) else NA} else NA)
bestT <- which.max(nCol)
fiType <- c("txt","UScsv","EURcsv","tsv","RData")[bestT]
tmp <- tmp[[bestT]]
## tibble colnames may include/start with '#' ... adopt to rest
corColNa <- grep("^#",colnames(tmp))
if(length(corColNa) >0) colnames(tmp)[which(corColNa)] <- sub("^#","X.",colnames(tmp)[which(corColNa)]) # make colnames alike
if(debug) {message(fxNa,"mc2")}
## recover OS
tmp2 <- sub("^[[:alpha:]]+\\|","", rownames(tmp)) # trim heading database-name
annot <- cbind(Accession=sub("\\|[[:upper:]]+[[:digit:]]*_{0,1}[[:upper:]][[:print:]]*","",tmp2),
EntryName=sub("^[[:upper:]]+[[:digit:]]*\\|","",tmp2), GeneName=NA, Species=NA, Contam=NA, SpecType=NA) #
## extract species out of EntryName
commonSpec <- .commonSpecies()
spec <- apply(commonSpec, 1, function(x) grep(paste0(x[1],"$"), tmp2))
chLe <- sapply(spec,length) >0
if(any(chLe)) for(i in which(chLe)) annot[spec[[i]],"Species"] <- commonSpec[i,2]
## SpecType
if(length(specPref) >0) {
for(i in 1:length(specPref)) { ch1 <- grep(specPref[i], rownames(tmp))
if(length(ch1) >0) annot[which(ch1),"SpecType"] <- names(specPref)[i] } }
## checke for unique rownames
ch1 <- duplicated(annot[,1])
if(all(!ch1)) rownames(annot) <- annot[,1]
if(debug) {message(fxNa,"mc3")}
## colnames for quantitative data
if(length(sampleNames) >0) if(length(sampleNames)==ncol(tmp)) colnames(tmp) <- sampleNames else message(fxNa,"invalid entry of 'sampleNames' (incorrect length)")
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(tmp)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## normalize
abund <- if(!is.matrix(tmp)) as.matrix(tmp) else tmp
quant <- wrMisc::normalizeThis(abund, method=normalizeMeth, mode="additive", refLines=refLi, callFrom=fxNa) #
if(debug) {message(fxNa,"mc4")}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="MC", path=path, abund=utils::head(abund), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rmc13b .."); rmc13b <- list(sdrf=sdrf,gr=gr,suplAnnotFile=suplAnnotFile,abund=abund, refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="MC", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) # add to all other import-functions ?
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- setupSd$sampleNames
if(debug) {message(fxNa,"Read sample-meta data, rmc14"); rmc14 <- list()}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="MassChroQ",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="MassChroQ", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## prepare for final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=NULL, quantNotes=NULL, notes=notes) else data.frame(abund, annot)
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readMassChroQFile.R
|
#' Read Quantitation Data-Files (proteinGroups.txt) Produced From MaxQuant At Protein Level
#'
#' Protein quantification results from \href{https://www.maxquant.org}{MaxQuant} can be read using this function and relevant information extracted.
#' Input files compressed as .gz can be read as well.
#' The protein abundance values (XIC), peptide counting information like number of unique razor-peptides or PSM values and sample-annotation (if available) can be extracted, too.
#' The protein abundance values may be normalized using multiple methods (median normalization as default), the determination of normalization factors can be restricted to specific proteins
#' (normalization to bait protein(s), or to invariable matrix of spike-in experiments).
#' The protein annotation data gets parsed to extract specific fields (ID, name, description, species ...).
#' Besides, a graphical display of the distribution of protein abundance values may be generated before and after normalization.
#'
#' @details
#' \href{https://www.maxquant.org}{MaxQuant} is proteomics quantification software provided by the MaxPlanck institute.
#' By default MaxQuant writes the results of each run to the path \code{combined/txt}, from there (only) the files
#' 'proteinGroups.txt' (main quantitation at protein level), 'summary.txt' and 'parameters.txt' will be used.
#'
#' Meta-data describing the samples and experimental setup may be available from two sources :
#' a) The file \code{summary.txt} which gets produced by MaxQuant in the same folder as the main quantification data.
#' b) Furthermore, meta-data deposited as \code{sdrf} at Pride can be retreived (via the respective github page) when giving the accession number in argument \code{sdrf}.
#' Then, the meta-data will be examined for determining groups of replicates and
#' the results thereof can be found in $sampleSetup$levels.
#' Alternatively, a dataframe formatted like sdrf-files (ie for each sample a separate line, see also function \code{readSdrf}) may be given.
#' In tricky cases it is also possible to precise the column-name to use for defining the groups of replicates or the method for automatically choosing
#' the most suited column via the 2nd value of the argument \code{sdrf}.
#' Please note, that sdrf is still experimental and only a small fraction of proteomics-data on Pride have been annotated accordingly.
#' If a valid sdrf is furnished, it's information has priority over the information extracted from the MaxQuant produced file summary.txt.
#'
#' This import-function has been developed using MaxQuant versions 1.6.10.x to 2.0.x, the format of the resulting file 'proteinGroups.txt' is typically well conserved between versions.
#' The final output is a list containing these elements: \code{$raw}, \code{$quant}, \code{$annot}, \code{$counts}, \code{$sampleSetup}, \code{$quantNotes}, \code{$notes}, or (if \code{separateAnnot=FALSE}) data.frame
#' with annotation- and main quantification-content. If \code{sdrf} information has been found, an add-tional list-element \code{setup}
#' will be added containg the entire meta-data as \code{setup$meta} and the suggested organization as \code{setup$lev}.
#'
#'
#' @param path (character) path of file to be read
#' @param fileName (character) name of file to be read (default 'proteinGroups.txt' as typically generated by MaxQuant in txt folder). Gz-compressed files can be read, too.
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants
#' @param pepCountCol (character) pattern to search among column-names for count data (1st entry for 'Razor + unique peptides', 2nd fro 'Unique peptides', 3rd for 'MS.MS.count' (PSM))
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param extrColNames (character) column names to be read (1st position: prefix for LFQ quantitation, default 'LFQ.intensity'; 2nd: column name for protein-IDs, default 'Majority.protein.IDs'; 3rd: column names of fasta-headers, default 'Fasta.headers', 4th: column name for number of protein IDs matching, default 'Number.of.proteins')
#' @param specPref (character) prefix to identifiers allowing to separate i) recognize contamination database, ii) species of main identifications and iii) spike-in species
#' @param refLi (character or integer) custom specify which line of data should be used for normalization, ie which line is main species; if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param remRev (logical) option to remove all protein-identifications based on reverse-peptides
#' @param remConta (logical) option to remove all proteins identified as contaminants
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by MaxQuant; if \code{gr} is provided, it gets priority for grouping of replicates
#' if \code{TRUE} default to files 'summary.txt' (needed to match information of \code{sdrf}) and 'parameters.txt' which can be found in the same folder as the main quantitation results;
#' if \code{character} the respective file-names (relative ro absolute path), 1st is expected to correspond to 'summary.txt' (tabulated text, the samples as given to MaxQuant) and 2nd to 'parameters.txt' (tabulated text, all parameters given to MaxQuant)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical) optional plot vioplot of initial and normalized data (using \code{normalizeMeth}); alternatively the argument may contain numeric details that will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (numeric) relative expansion factor of the violin in plot
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot} (columns ), \code{$counts} an array with 'PSM' and 'NoOfRazorPeptides',
#' \code{$quantNotes}, \code{$notes} and optional \code{setup} for meta-data from \code{sdrf}; or a data.frame with quantitation and annotation if \code{separateAnnot=FALSE}
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readProteomeDiscovererFile}}; \code{\link{readProlineFile}} (and other imprtfunctions), \code{\link{matrixNAinspect}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' # Here we'll load a short/trimmed example file (thus not the MaxQuant default name)
#' fiNa <- "proteinGroupsMaxQuant1.txt.gz"
#' specPr <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST", spike="HUMAN_UPS")
#' dataMQ <- readMaxQuantFile(path1, file=fiNa, specPref=specPr, tit="tiny MaxQuant")
#' summary(dataMQ$quant)
#' matrixNAinspect(dataMQ$quant, gr=gl(3,3))
#' @export
readMaxQuantFile <- function(path, fileName="proteinGroups.txt", normalizeMeth="median", quantCol="LFQ.intensity", contamCol="Potential.contaminant",
pepCountCol=c("Razor + unique peptides","Unique peptides","MS.MS.count"), read0asNA=TRUE, refLi=NULL, sampleNames=NULL,
extrColNames=c("Majority.protein.IDs","Fasta.headers","Number.of.proteins"), specPref=c(conta="conta|CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"),
remRev=TRUE, remConta=FALSE, separateAnnot=TRUE, gr=NULL, sdrf=NULL, suplAnnotFile=NULL, groupPref=list(lowNumberOfGroups=TRUE),
titGraph=NULL, wex=1.6, plotGraph=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## prepare
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readMaxQuantFile")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
remStrainNo <- TRUE # if TRUE extract Species in very stringent pattern
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
## functions
## init check
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("Package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
## check if path & file exist
if(!grepl("\\.txt$|\\.txt\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="txt", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rMQ0b .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check format or if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a MaxQuant-file") }
tmp <- NULL
return(NULL)
} else {
## start checking format
if(debug) { message(fxNa,"rMQ1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rMQ1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,read0asNA=read0asNA,quantCol=quantCol,
refLi=refLi,separateAnnot=separateAnnot )} # annotCol=annotCol,FDRCol=FDRCol
## check which columns can be extracted (for annotation)
if(is.integer(contamCol) && length(contamCol) >0) contamCol <- colnames(tmp)[contamCol]
extrColNames <- union(extrColNames, contamCol) # add contamCol if not included in extrColNames
chCol <- extrColNames %in% colnames(tmp)
if(!any(chCol, na.rm=TRUE)) { extrColNames <- gsub("\\."," ",extrColNames)
chCol <- extrColNames %in% colnames(tmp) }
if(all(!chCol, na.rm=TRUE)) stop("Problem locating annotation columns (",wrMisc::pasteC(extrColNames, quoteC="''"),")")
if(any(!chCol, na.rm=TRUE) ) {
if(!silent) message(fxNa,"Note: Can't find columns ",wrMisc::pasteC(extrColNames[!chCol], quoteC="'")," !")
}
## 'REVERSE' peptides
chMajProCol <- extrColNames[1] %in% colnames(tmp)
if(chMajProCol) {
chRev <- grep("REV__", tmp[,extrColNames[1]])
if(length(chRev) >0) {
if(!silent) message(fxNa,"Note: Found ",length(chRev)," out of ",nrow(tmp)," proteins marked as 'REV_' (reverse peptide identification)", if(isTRUE(remRev)) " - Removing")
if(isTRUE(remRev)) tmp <- if(length(chRev) < nrow(tmp) -1) tmp[-1*chRev,] else matrix(tmp[-1*chRev,], nrow=nrow(tmp)-length(remRev), dimnames=list(rownames(tmp)[-1*chRev], colnames(tmp)))
}
## remove MaxQuant internal contaminants CON__
if(isTRUE(remConta) && nrow(tmp) >0) { isConta <- grep("CON__{0,1}[[:alpha:]]+", tmp[,extrColNames[1]])
if(length(isConta) >0) {
if(!silent) message(fxNa,"Note: Found ",length(isConta)," out of ",nrow(tmp)," proteins marked as 'CON_' (contaminants) - Removing")
tmp <- if(length(isConta) < nrow(tmp) -1) tmp[-1*isConta,] else matrix(tmp[-1*isConta,], nrow=nrow(tmp)-length(isConta), dimnames=list(rownames(tmp)[-1*isConta], colnames(tmp)))
} }
} else if(!silent) message(fxNa,"BIZZARE, trouble ahead : Unable to find column '",extrColNames[1],"' (from argument 'extrColNames')")
if(debug) {message(fxNa,"rMQ2"); rMQ2 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,remConta=remConta)}
}
if(length(tmp) >0) {
## further extracting : quantitation
grepX <- function(x) grep(paste0(x,"\\."), colnames(tmp))
useDCol <- if(length(quantCol)==1) grepX(quantCol) else unique(as.integer(sapply(quantCol, grepX)))
if(length(useDCol) <1) stop("NO columns matching terms ",wrMisc::pasteC(quantCol, quoteC="'")," from argument 'quantCol' found !")
#not needed#MQdat <- as.matrix(tmp[,useDCol])
quantColP <- NULL # initialize
if(length(quantCol) <1) stop(" 'quantCol' must be provided !")
if(length(quantCol) >1) { abund <- as.matrix(wrMisc::extrColsDeX(tmp, extrCol=quantCol, doExtractCols=TRUE, silent=silent, callFrom=fxNa))
} else { chP <- substr(quantCol, nchar(quantCol), nchar(quantCol)) != "."
quantColP <- quantCol
quantCol <- if(chP) grep(paste0(quantCol,"\\."), colnames(tmp)) else grep(quantCol, colnames(tmp))
chNa <- is.na(quantCol)
if(all(chNa, na.rm=TRUE)) stop("Could not find any of the columns specified in argument 'quantCol' !")
if(any(chNa, na.rm=TRUE)) {
if(!silent) message(fxNa,"Could not find columns ",wrMisc::pasteC(quantCol[which(chNa)],quote="'")," .. omit")
quantCol <- wrMisc::naOmit(quantCol)}
abund <- as.matrix(tmp[,quantCol]) } # abundance val
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
if(length(dim(abund)) <2 && !is.numeric(abund)) abund <- matrix(as.numeric(abund), ncol=ncol(abund), dimnames=dimnames(abund))
colnames(abund) <- if(length(quantColP)==1) sub(paste0(quantColP,"\\."),"", colnames(abund)) else wrMisc::.trimFromStart(wrMisc::.trimFromEnd(colnames(abund)))
if(debug) {message(fxNa,"rMQ3"); rMQ3 <- list(abund=abund,path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,remConta=remConta)}
## convert 0 to NA
if(!isFALSE(read0asNA)) {ch1 <- abund <= 0
if(any(ch1, na.rm=TRUE)) { abund[which(ch1)] <- NA
if(!silent) message(fxNa,"Transform ",sum(ch1),"(",100*round(sum(ch1)/length(ch1),3),"%) initial '0' values to 'NA'")}}
## further extracting : prepare for countig data
ch1 <- grep(" $",pepCountCol)
if(length(ch1) < length(pepCountCol)) {pepCountCol <- if(length(ch1) >0) paste0(pepCountCol[-1*which(ch1)]," ") else paste0(pepCountCol," ")} # add tailing ' ' (if not yet present)
if(length(grep("\\\\",pepCountCol)) <1) pepCountCol <- gsub("\\.","\\\\.",pepCountCol) # protect '.' "
## prepare for column-name style with '.' or '...'
tm2 <- lapply(as.list(pepCountCol), function(x) c(x, gsub(" ",".", sub(" \\+ ","...",x))) )
names(tm2) <- pepCountCol
usePCol <- lapply(tm2, function(x) {ch1 <- lapply(x, grep, colnames(tmp)); if(length(ch1) >1) ch1[[which.max(sapply(ch1,length))]] else ch1[[1]]})
usePCol <- lapply(usePCol, wrMisc::naOmit)
ch2 <- sapply(usePCol, length) -ncol(abund)
if(any(ch2 >0, na.rm=TRUE)) usePCol[which(ch2 >0)] <- lapply(usePCol[which(ch2 >0)], function(x) x[-1])
ch2 <- sapply(usePCol, length) ==ncol(abund)
if(!silent && any(!ch2, na.rm=TRUE)) message(fxNa,"Could not find peptide counts columns (argument 'pepCountCol') matching to '",pepCountCol[which(!ch2)],"'")
if(debug) {message(fxNa,"rMQ4"); rMQ4 <- list(abund=abund,usePCol=usePCol,ch2=ch2,tm2=tm2,path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,remConta=remConta)}
## make array of PSM counts etc
if(any(ch2, na.rm=TRUE)) {
counts <- array(dim=c(nrow(tmp), ncol(abund), sum(ch2)), dimnames=list(NULL, colnames(abund), pepCountCol[which(ch2)]))
for(i in 1:sum(ch2)) counts[,,i] <- as.numeric(as.matrix(tmp[,usePCol[[which(ch2)[i]]] ]))
} else counts <- NULL
## Annotation
useACol <- list(annC=match(extrColNames, colnames(tmp)) )
annot <- as.matrix(tmp[,useACol$annC])
if(debug) {message(fxNa,"rMQ4b"); rMQ4b <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,counts=counts,
chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,annot=annot,remConta=remConta,specPref=specPref)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, useColumn=c("Majority.protein.IDs","Fasta.headers"), silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rMQ5"); rMQ5 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,counts=counts,
chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,annot=annot,remConta=remConta,specPref=specPref)}
## remove MQ-internal contaminants
if(remConta) {
conLi <- grep("CON__[[:alnum:]]", annot[,"Majority.protein.IDs"])
if(length(conLi) >0) {
iniLi <- nrow(annot)
annot <- annot[-conLi,]
abund <- abund[-conLi,]
#specMQ <- specMQ[-conLi]
counts <- if(length(dim(counts))==3) counts[-conLi,,] else counts[-conLi,,]
if(debug) message(fxNa,"Removing ",length(conLi)," instances of MaxQuant-contaminants to final ",nrow(annot)," lines/IDs")} }
## split Annotation
remHeader <- c("^conta\\|","^sp\\|")
MQan2 <- strsplit(sub(remHeader[1], "", sub(remHeader[2], "", annot[,"Majority.protein.IDs"])), "\\|")
MQanLe <- sapply(MQan2, length)
MQan3 <- matrix(NA, nrow=nrow(annot), ncol=2, dimnames=list(NULL, c("Accession","EntryName")))
chLe <- MQanLe==1
if(any(chLe, na.rm=TRUE)) MQan3[which(chLe),1] <- unlist(MQan2[which(chLe)])
chLe <- MQanLe==2
if(any(chLe, na.rm=TRUE)) MQan3[which(chLe),] <- matrix(unlist(MQan2[which(chLe)]), ncol=2, byrow=TRUE)
chLe <- MQanLe >2
locAccNo <- function(x) { # function to select AccessionNumner (eg P02768) and EntryName (eg ALBU_HUMAN) after strsplit() of concatenated annotation
accIn <- grep("^[[:upper:]]+[[:digit:]]+$|^[[:upper:]]+[[:digit:]]+\\-[[:digit:]]+$", x)
namId <- grep("[[:upper:]]_[[:upper:]]", x)
useInd <- c(acc=if(length(accIn) >0) accIn[1] else NA, name=if(length(namId) >0) namId[1] else NA)
chNA <- is.na(useInd)
if(any(chNA)) useInd[which(chNA)] <- (1:length(x))[-1*wrMisc::naOmit(unique(c(namId,useInd)))][1:sum(chNA)]
x[useInd] }
if(any(chLe, na.rm=TRUE)) MQan3[which(chLe),] <- t(sapply(MQan2[which(chLe)], locAccNo ))
chSemc <- grep(";", MQan3[,2]) # look for semicolon separator (eg "CATA_HUMAN_UPS;conta")
if(length(chSemc) >0) MQan3[chSemc,2] <- sub(";[[:print:]]+","",MQan3[chSemc,2]) # remove all after semicolon (eg "CATA_HUMAN_UPS;conta")
if(debug) {message(fxNa,"rMQ5c"); rMQ5c <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,counts=counts,
chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,annot=annot,remConta=remConta,specPref=specPref)}
## contaminants (fuse from column 'Potential.contaminant' and those found via specPref[1])
contam <- rep(FALSE, nrow(annot))
if("Potential.contaminant" %in% colnames(annot)) { chCo <- grepl("\\+",annot[,"Potential.contaminant"])
if(all(chCo)) { chCo <- FALSE ; warning(fxNa,"ALL proteins were marked as 'Potential.contaminant' ! Nothing would remain, thus ignoring..")}
if(any(chCo, na.rm=TRUE)) contam[which(chCo)] <- TRUE }
chSpPref <- names(specPref) %in% "conta"
if(any(chSpPref)) { specPrCont <- annot[,"SpecType"] %in% "conta"
if(any(specPrCont)) annot[which(specPrCont),"Potential.contaminant"] <- TRUE }
## extract/add GN
MQan3 <- cbind(MQan3,GN=NA)
GNLi <- grep("\\ GN=[[:upper:]]{2,}", annot[,"Fasta.headers"])
if(length(GNLi) >0) { zz <- sub("[[:print:]]+\\ GN=", "",annot[GNLi,"Fasta.headers"]) # remove surplus to left
MQan3[GNLi,"GN"] <- sub("[[:punct:]]$","", sub("\\ +$","", sub("\\ [[:print:]]+","",zz))) } # remove surplus to right (and right trailing space) and trailing ';'
if(debug) {message(fxNa,"rMQ6")}
## finalize annotation
annot <- cbind(MQan3, Species=NA, Contam=contam, annot)
## extract species according to custom search parameters 'specPref'
.annSpecies <- function(spe=c("_HUMAN","Homo sapiens"), anno=annot, exCoNa=extrColNames) {
## extract species tags out of annot[,"Majority.protein.IDs"], place as convert to regular name in anno, return matrix anno
ch1 <- grep(spe[1], anno[,exCoNa[2]])
if(length(ch1) >0) anno[ch1,"Species"] <- spe[2] #"Homo sapiens"
anno }
if(remStrainNo) {
commonSpec <- .commonSpecies()
for(i in 1:nrow(commonSpec)) annot <- .annSpecies(commonSpec[i,], annot, exCoNa=extrColNames)}
if(debug) {message(fxNa,"rMQ7"); rMQ7 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,quantCol=quantCol,remStrainNo=remStrainNo,
abund=abund,chNum=chNum,ch2=ch2, annot=annot,chLe=chLe,MQan2=MQan2,MQan3=MQan3,contam=contam,GNLi=GNLi,remConta=remConta,counts=counts)}
## now complete (overwrite) by info extracted from fasta : ' OS='
#overWriteSpecies <- TRUE
#chSpe <- if(overWriteSpecies) 1:nrow(annot) else which(is.na(annot[,"Species"]) | nchar(annot[,"Species"]) <2) # missing species-info
chSpe <- grep("[[:print:]]+\\ OS=[[:upper:]][[:lower:]]+", annot[,"Fasta.headers"]) # limit to those not found by species extension on protein name
if(length(chSpe) >0) { # if column Species is NA and OS= in fasta-header
OS <- sub("[[:print:]]+\\ OS=","", annot[chSpe,"Fasta.headers"])
if(!remStrainNo) {
## keep strain information : need to first separate entries with (strain and treat separately from rest (below)
## not finished
## remove remaining tailing semicolon to comma (in Species)
ch1 <- grep(";$|,$", OS)
if(length(ch1) >0) OS[ch1] <- sub(";+$|,*$","", OS[ch1])
## remove any other tailing tags (like OX=)
ch1 <- grep("\\ [[:upper:]]{2}=",annot[,"Species"])
if(length(ch1) >0) annot[ch1,"Species"] <- sub("\\ [[:upper:]]{2}=[[:print:]]*","",annot[ch1,"Species"])
OS <- annot[chSpe,"Species"]
} else { ## strict 2 word after OS= (strain names will be cut)
ch1 <- grep("^[[:upper:]][[:lower:]]*\\ [[:lower:]]+\\ [[:print:]]", OS)
if(length(ch1) > 0) { nch <- nchar(sub("^[[:upper:]][[:lower:]]*\\ [[:lower:]]+\\ ", "", OS[ch1])) # loose strain information
OS[ch1] <- substr(OS[ch1], 1, nchar(OS[ch1]) -nch -1) }
#annot[chSpe,"Species"] <- OS
}
## check/complete for truncated species names (ie names found) inside other ones
OS <- gsub(";{1,5}$", "", OS) # remove tailing separators
OSna <- unique(OS)
ch1 <- nchar(OSna) <1
if(debug) {message(fxNa,"rMQ7b")}
if(any(ch1, na.rm=TRUE)) OSna <- OSna[which(nchar(OSna) >0)] # (just in case) remove empty tags
ch2 <- lapply(OSna, grep, OSna)
chTr <- sapply(ch2, length) >1
if(any(chTr, na.rm=TRUE)) { if(!silent) message(fxNa,"Found ",sum(chTr)," species name(s) appearing inside other ones, assume as truncated (eg ",OSna[which(chTr)[1]],")")
for(i in which(chTr)) OS[which(OS==OSna[i])] <- OSna[ch2[[i]][1]]
}
annot[chSpe,"Species"] <- OS
if(debug) {message(fxNa,"rMQ7c")}
}
if(!silent) { chSp <- sum(is.na(annot[,"Species"]))
if(chSp >0) message(fxNa,"Note: ",chSp," proteins with unknown species")
tab <- table(annot[,"Species"])
if(length(tab) >0) {
tab <- rbind(names(tab), paste0(": ",tab,", "))
if(!silent) message(" data by species : ", apply(tab, 2, paste)) } } # all lines assigned
if(debug) {message(fxNa,"rMQ8")}
## MaxQuant internal contaminants specific : remove non-protein DB parts - if possible, eg "CON__ENSEMBL:ENSBTAP00000007350;CON__P01030" -> "CON__P01030"
conID <- paste0("CON__",c("ENSEMBL","REFSEQ","H-INV"),":")
conID <- paste0(conID, c("[[:upper:]]+[[:digit:]]*;{0,1}", "[[:upper:]]+_[[:digit:]]+;{0,1}", "[[:upper:]]+[[:digit:]]+;{0,1}"))
acc1 <- annot[,1]
for(i in 1:length(conID)) {
acc2 <- acc1 # need previous 'status' to compare if all text disappears
acc1 <- sub(conID[i], "", acc1)
chLe <- nchar(acc1) <2
if(any(chLe, na.rm=TRUE)) acc1[which(chLe)] <- sub("CON__","", acc2[which(chLe)]) } # remove entire entry only if something (else) remains
## remove first of CON_ entries (only if min 3 characters 3 remain)
ch2 <- grep("CON__{0,1}[A-Z0-9]+;", acc1)
if(length(ch2) >0) { acc2 <- acc1
acc1 <- sub("CON__{0,1}[A-Z0-9]+;", "", acc1)
chLe <- nchar(acc1) <2
if(any(chLe, na.rm=TRUE)) acc1[which(chLe)] <- sub("CON__","", acc2[which(chLe)]) } # remove entire entry only if something (else) remains
## remove first of "CON_" marks
ch2 <- grep("CON_", acc1)
if(length(ch2) >0) acc1[ch2] <- sub("CON__{0,1}","", acc1[ch2])
annot[,1] <- acc1
if(debug) {message(fxNa,"rMQ8b")}
## check for composite Accession names, keep only part
#ch1 <- grep(",|;|_|\\(|\\|", annot[,1]) # note: need to not exclude/mark '-'
ch1 <- grep(";", annot[,1]) # note: need to not exclude/mark '-'
if(length(ch1) >0) {
## remove 1st part of CON__
if(!silent) message(fxNa,"Found ",length(ch1)," composite accession-numbers (eg ",annot[ch1[1],1],"), truncating ")
ch2 <- sort(union(grep("^CON_",annot[ch1,1]), grep(";CON_",annot[ch1,1]))) # if composite Acc number contains CON_, remove this part (and 1st of keep rest)
if(length(ch2) >0) {
ch3 <- gsub("CON__{0,1}[0-9A-Z]+[-0-9;]*","", annot[ch1[ch2],1]) # remove CON__A0B1C2 or CON__A0B1C2-1 etc
ch3b <- nchar(ch3) >3
if(any(ch3b, na.rm=TRUE)) annot[ch1[ch2[which(ch3b)]],1] <- ch3[which(ch3b)]
}
annot[ch1,1] <- sub(";$","",sub("^;","",annot[ch1,1])) # remove heading or tailing ';'
ch1 <- grep(",|;|\\(|\\|", annot[,1])
## keep 1st part after separator characters
if(length(ch1) >0) annot[ch1,1] <- sub(paste(paste0(c(",", ";", "\\(", "\\|"), "[[:print:]]*"), collapse="|"), "", annot[ch1,1]) # keep 1st ID: remove all after separator ..
}
if(debug) {message(fxNa,"rMQ8c")}
## extract UniProtID as EntryName
tmp <- sub("\\ [[:alnum:]][[:print:]]+", "", annot[,"Fasta.headers"])
annot[,"EntryName"] <- sub("^[[:print:]]+\\|[[:alnum:]]+\\|","",tmp) # also remove _UPS?
colnames(annot)[which(colnames(annot)=="GN")] <- "GeneName" # correct colname
annot <- cbind(annot[,1:6], Description=sub("\\ $","",sub("[[:upper:]]{2}=[[:print:]]+","",substring(annot[,"Fasta.headers"], nchar(tmp) +2))), annot[,7:ncol(annot)])
if(debug) {message(fxNa,"rMQ8d")}
## look for unique col from $annot to use as rownames
chAn <- colSums(apply(annot[,c(1:min(ncol(annot),7))], 2, duplicated), na.rm=TRUE) # look at first 6 cols : how many elements per column duplicated
if(!silent) message(fxNa,"Use column '",colnames(annot)[which.min(chAn)],"' as identifyer (has fewest, ie ",chAn[which.min(chAn)]," duplicated entries) as rownames")
rownames(abund) <- rownames(annot) <- if(any(chAn==0)) annot[,which(chAn==0)[1]] else wrMisc::correctToUnique(annot[,which.min(chAn)], callFrom=fxNa)
if(length(counts) >0) rownames(counts) <- rownames(annot)
if(debug) {message(fxNa,"rMQ9"); rMQ9 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,
annot=annot,chLe=chLe,MQan2=MQan2,MQan3=MQan3,refLi=refLi,contam=contam,GNLi=GNLi,remConta=remConta)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(inherits(quant, "try-error")) { warning(fxNa,"PROBLEMS ahead : Unable to normalize as log2-data !!") }
if(debug) {message(fxNa,"rMQ10"); rMQ10 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chMajProCol=chMajProCol,chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,
quant=quant,annot=annot,chLe=chLe,MQan2=MQan2,MQan3=MQan3,contam=contam,GNLi=GNLi,remConta=remConta)}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="MQ", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rMQ13 .."); rMQ13 <- list(sdrf=sdrf,gr=gr,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## finish groups of replicates & annotation setupSd
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="MQ", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) # add to all other import-functions ?
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rMQ14"); rMQ14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,paFi=paFi,infoDat=infoDat,normalizeMeth=normalizeMeth)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="MaxQuant",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=debug, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="MaxQuant", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readMaxQuantFile.R
|
#' Read Peptide Identificationa and Quantitation Data-Files (peptidess.txt) Produced By MaxQuant
#'
#' Peptide level identification and quantification data produced by \href{https://www.maxquant.org/}{MaxQuant} can be read using
#' this function and relevant information extracted.
#' Input files compressed as .gz can be read as well.
#' The peptide abundance values (XIC), peptide counting information and sample-annotation (if available) can be extracted, too.
#'
#'
#' The peptide annotation data gets parsed to extract specific fields (ID, name, description, species ...).
#' Besides, a graphical display of the distribution of peptide abundance values may be generated before and after normalization.
#'
#' @details
#' \href{https://www.maxquant.org/}{MaxQuant} is proteomics quantification software provided by the MaxPlanck institute.
#' By default MaxQuant write the results of each run to the path \code{combined/txt}, from there (only) the files
#' 'peptides.txt' (main quantitation at peptide level), 'summary.txt' and 'parameters.txt' will be used for this function.
#'
#' Meta-data describing the samples and experimental setup may be available from two sources :
#' a) The file \code{summary.txt} which gets produced by MaxQuant in the same folder as the main quantification data.
#' b) Furthermore, meta-data deposited as \code{sdrf} at Pride can be retreived (via the respective github page) when giving
#' the accession number in argument \code{sdrf}.
#' Then, the meta-data will be examined for determining groups of replicates and
#' the results thereof can be found in $sampleSetup$levels.
#' Alternatively, a dataframe formatted like sdrf-files (ie for each sample a separate line, see also function \code{readSdrf}) may be given.
#' In tricky cases it is also possible to precise the column-name to use for defining the groups of replicates or the method for automatically choosing
#' the most suited column via the 2nd value of the argument \code{sdrf}, see also the function \code{defineSamples} (which gets used internally).
#' Please note, that sdrf is still experimental and only a small fraction of proteomics-data on Pride have been annotated accordingly.
#' If a valid sdrf is furnished, it's information has priority over the information extracted from the MaxQuant produced file summary.txt.
#'
#' This function has been developed using MaxQuant versions 1.6.10.x to 2.0.x, the format of the resulting file 'peptides.txt'
#' is typically well conserved between versions.
#' The final output is a list containing these elements: \code{$raw}, \code{$quant}, \code{$annot}, \code{$counts}, \code{$sampleSetup},
#' \code{$quantNotes}, \code{$notes}, or (if \code{separateAnnot=FALSE}) data.frame
#' with annotation- and main quantification-content. If \code{sdrf} information has been found, an add-tional list-element \code{setup}
#' will be added containg the entire meta-data as \code{setup$meta} and the suggested organization as \code{setup$lev}.
#'
#'
#' @param path (character) path of file to be read
#' @param fileName (character) name of file to be read (default 'peptides.txt' as typically generated by MaxQuant in txt folder). Gz-compressed files can be read, too.
#' @param normalizeMeth (character) normalization method (for details see \code{\link[wrMisc]{normalizeThis}})
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants
#' @param pepCountCol (character) pattern to search among column-names for count data (defaults to 'Experiment')
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param extrColNames (character) column names to be read (1st position: prefix for LFQ quantitation, default 'LFQ.intensity';
#' 2nd: column name for peptide-IDs, default )
#' @param specPref (character) prefix to identifiers allowing to separate i) recognize contamination database,
#' ii) species of main identifications and iii) spike-in species
#' @param refLi (character or integer) custom specify which line of data should be used for normalization, ie which line is main species; if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param remRev (logical) option to remove all peptide-identifications based on reverse-peptides
#' @param remConta (logical) option to remove all peptides identified as contaminants
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund}
#' for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of
#' replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character,
#' this may be the ID at ProteomeExchange. Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided;
#' if \code{gr} is provided, it gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by MaxQuant; if \code{gr} is provided, it gets priority for grouping of replicates
#' if \code{TRUE} default to files 'summary.txt' (needed to match information of \code{sdrf}) and 'parameters.txt' which can be found in the same folder as the main quantitation results;
#' if \code{character} the respective file-names (relative ro absolute path), 1st is expected to correspond to 'summary.txt' (tabulated text, the samples as given to MaxQuant) and 2nd to 'parameters.txt' (tabulated text, all parameters given to MaxQuant)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical) optional plot vioplot of initial and normalized data (using \code{normalizeMeth}); alternatively the argument may contain numeric details that will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot
#' @param wex (numeric) relative expansion factor of the violin in plot
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot} (columns ), \code{$counts} an array with 'PSM' and 'NoOfRazorPeptides',
#' \code{$quantNotes}, \code{$notes} and optional \code{setup} for meta-data from \code{sdrf}; or a data.frame with quantitation and annotation if \code{separateAnnot=FALSE}
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}), for reading protein level \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}
#' @examples
#' # Here we'll load a short/trimmed example file (thus not the MaxQuant default name)
#' MQpepFi1 <- "peptides_tinyMQ.txt.gz"
#' path1 <- system.file("extdata", package="wrProteo")
#' specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST", spec2="HUMAN")
#' dataMQpep <- readMaxQuantPeptides(path1, file=MQpepFi1, specPref=specPref1,
#' tit="Tiny MaxQuant Peptides")
#' summary(dataMQpep$quant)
#' @export
readMaxQuantPeptides <- function(path, fileName="peptides.txt", normalizeMeth="median", quantCol="LFQ.intensity", contamCol="Potential.contaminant",
pepCountCol="Experiment", refLi=NULL, sampleNames=NULL,
extrColNames=c("Sequence","Proteins","Leading.razor.protein","Start.position","End.position","Mass","Missed.cleavages","Unique..Groups.","Unique..Proteins.","Charges"),
specPref=c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN"),
remRev=TRUE, remConta=FALSE, separateAnnot=TRUE, gr=NULL, sdrf=NULL, suplAnnotFile=NULL, groupPref=list(lowNumberOfGroups=TRUE),
titGraph=NULL, wex=1.6, plotGraph=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## prepare
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readMaxQuantPeptides")
opar <- graphics::par(no.readonly=TRUE)
remStrainNo <- TRUE # if TRUE extract Species in very stringent pattern
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
setupSd <- NULL # initialize
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
setupSd <- setupSdmq <- summaryD <- parametersD <- NULL # initialize (meta-data)
if(debug) {message("rMQP1"); rMQP1 <- list(path=path,chPa=chPa,setupSd=setupSd,excluCol=excluCol,remStrainNo=remStrainNo,cleanDescription=cleanDescription)}
## check if path & file exist
if(!grepl("\\.txt$|\\.txt\\.gz$",fileName)) message(fxNa,"Suspicious filename, this function was designed for reading tabulated text files produced by MaxQuant")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="txt", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) {message("rMQP1d")}
chPa <- try(find.package("utils"), silent=TRUE)
if(inherits(chPa, "try-error")) stop("package 'utils' not found ! Please install first from CRAN")
## initial read MaxQuant
tmp <- try(utils::read.delim(file.path(path,fileName), stringsAsFactors=FALSE), silent=TRUE)
if(debug) {message(fxNa,"rMQP2"); rMQP2 <- list(path=path,chPa=chPa,tmp=tmp,contamCol=contamCol,extrColNames=extrColNames,remConta=remConta)}
if(length(tmp) <1 | inherits(tmp, "try-error")) {
message(fxNa,"Unable to read file 'fileName' ! Returning NULL; check if this is original MaxQuant-file and/or rights to read")
tmp <- NULL
return(NULL)
} else {
if(any(extrColNames[2:3] %in% colnames(tmp), na.rm=TRUE)) {
## check which columns can be extracted (for annotation)
if(is.integer(contamCol)) {contamCol <- colnames(tmp)[contamCol]
if(debug) message(fxNa," Custom 'contamCol' points to ",contamCol)}
extrColNames <- wrMisc::naOmit(union(extrColNames, contamCol)) # add contamCol if not included in extrColNames
chCol <- extrColNames %in% colnames(tmp)
if(any(!chCol, na.rm=TRUE)) { extrColNames[which(!chCol)] <- gsub("\\."," ",extrColNames[which(!chCol)])
chCol <- extrColNames %in% colnames(tmp) }
if(all(!chCol)) stop("Problem locating annotation columns (",wrMisc::pasteC(extrColNames, quoteC="''"),")")
if(any(!chCol, na.rm=TRUE) ) {
if(!silent) message(fxNa,"Note: Can't find columns ",wrMisc::pasteC(extrColNames[!chCol], quoteC="'")," !")
}
if(debug) {message(fxNa,"rMQP3")}
## 'REVERSE' peptides
chRazProCol <- extrColNames[3] %in% colnames(tmp)
chRev <- grep("REV__", tmp[,extrColNames[if(chRazProCol) 3 else 2]])
if(debug) message(fxNa,"rMQP3b chRev : ",utils::head(chRev),"\n")
if(length(chRev) >0) {
if(!silent) message(fxNa,"Note: Found ",length(chRev)," out of ",nrow(tmp)," peptides with proteins marked as 'REV_' (reverse peptide identification)", if(isTRUE(remRev)) " - Removing")
if(isTRUE(remRev)) tmp <- if(length(chRev) < nrow(tmp) -1) tmp[-1*chRev,] else matrix(tmp[-1*chRev,], nrow=nrow(tmp)-length(remRev), dimnames=list(rownames(tmp)[-1*chRev], colnames(tmp)))
}
## remove MaxQuant internal contaminants CON__
if(isTRUE(remConta) & nrow(tmp) >0) { isConta <- grep("CON__{0,1}[[:alpha:]]+", tmp[,extrColNames[2]])
if(length(isConta) >0) {
if(!silent) message(fxNa,"Note: Found ",length(isConta)," out of ",nrow(tmp)," proteins marked as 'CON_' (contaminants) - Removing")
tmp <- if(length(isConta) < nrow(tmp) -1) tmp[-1*isConta,] else matrix(tmp[-1*isConta,], nrow=nrow(tmp)-length(isConta), dimnames=list(rownames(tmp)[-1*isConta], colnames(tmp)))
} }
} else if(!silent) message(fxNa,"BIZZARE, trouble ahead : Unable to find columns ",wrMisc::pasteC(extrColNames[2:3],quoteC="'")," (from argument 'extrColNames')")
if(debug) {message(fxNa,"rMQP4"); rMQP4 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chRazProCol=chRazProCol,chRev=chRev,remConta=remConta)}
}
if(length(tmp) >0) {
## further extracting : quantitation
grepX <- function(x) grep(paste0(x,"\\."), colnames(tmp))
useDCol <- if(length(quantCol)==1) grepX(quantCol) else unique(as.integer(sapply(quantCol, grepX)))
if(length(useDCol) <1) warning(fxNa, "NO columns matching terms ",wrMisc::pasteC(quantCol, quoteC="'")," from argument 'quantCol' found !") else {
quantColP <- NULL # initialize
if(length(quantCol) <1) stop(" 'quantCol' must be provided !")
if(length(quantCol) >1) { abund <- as.matrix(wrMisc::extrColsDeX(tmp, extrCol=quantCol, doExtractCols=TRUE, silent=silent, callFrom=fxNa))
} else { chP <- substr(quantCol, nchar(quantCol), nchar(quantCol)) != "."
quantColP <- quantCol
quantCol <- if(chP) grep(paste0(quantCol,"\\."), colnames(tmp)) else grep(quantCol, colnames(tmp))
chNa <- is.na(quantCol)
if(all(chNa) | length(quantCol) <1) stop("Could not find any of the columns specified in argument 'quantCol' !")
if(any(chNa)) {
if(!silent) message(fxNa,"Could not find columns ",wrMisc::pasteC(quantCol[which(chNa)],quote="'")," .. omit")
quantCol <- wrMisc::naOmit(quantCol)}
abund <- as.matrix(tmp[,quantCol]) } # abundance val
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
if(length(dim(abund)) <2) abund <- matrix(as.numeric(abund), ncol=ncol(abund), dimnames=dimnames(abund))
colnames(abund) <- if(length(quantColP)==1) sub(paste0(quantColP,"\\."),"", colnames(abund)) else wrMisc::.trimFromStart(wrMisc::.trimFromEnd(colnames(abund)))
if(debug) {message(fxNa,"rMQP5")}
## convert 0 to NA
ch1 <- abund <= 0
if(any(ch1, na.rm=TRUE)) { abund[which(ch1)] <- NA
if(!silent) message(fxNa,"Transform ",sum(ch1),"(",100*round(sum(ch1)/length(ch1),3),"%) initial '0' values to 'NA'")}
}
## further extracting : prepare for countig data
ch1 <- !grepl("\\^",pepCountCol)
if(any(ch1, na.rm=TRUE)) {pepCountCol[which(ch1)] <- paste0("^",pepCountCol[which(ch1)])} # add heading '^' (if not yet present)
ch1 <- !grepl(" $",pepCountCol)
if(any(ch1, na.rm=TRUE)) {pepCountCol[which(ch1)] <- paste0(pepCountCol[which(ch1)]," ")} # add tailing ' ' (if not yet present)
if(length(grep("\\\\",pepCountCol)) <1) pepCountCol <- gsub("\\.","\\\\.",pepCountCol) # protect '.' (if not yet protected)
## prepare for column-name style with '.' or '...'
tm2 <- lapply(as.list(pepCountCol), function(x) c(x, gsub(" ",".", sub(" \\+ ","...",x))) )
names(tm2) <- pepCountCol
usePCol <- lapply(tm2, function(x) {ch1 <- lapply(x, grep, colnames(tmp)); if(length(ch1) >1) ch1[[which.max(sapply(ch1,length))]] else ch1[[1]]})
usePCol <- lapply(usePCol, wrMisc::naOmit)
ch2 <- sapply(usePCol, length) -ncol(abund) # take abund as ref for not extracting more cols
if(any(ch2 >0, na.rm=TRUE)) usePCol[which(ch2 >0)] <- lapply(usePCol[which(ch2 >0)], function(x) x[-1])
ch2 <- sapply(usePCol, length) ==ncol(abund) # update
if(!silent & any(!ch2, na.rm=TRUE)) message(fxNa,"Could not find peptide counts columns (argument 'pepCountCol') matching to '",pepCountCol[which(!ch2)],"'")
if(debug) {message(fxNa,"rMQP6"); rMQP6 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chRazProCol=chRazProCol,chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,usePCol=usePCol,pepCountCol=pepCountCol,specPref=specPref,remConta=remConta)}
## make array of PSM counts etc
if(any(ch2, na.rm=TRUE)) {
counts <- array(dim=c(nrow(tmp),ncol(abund),sum(ch2)), dimnames=list(NULL, colnames(abund), pepCountCol[which(ch2)]))
for(i in 1:sum(ch2)) counts[,,i] <- as.numeric(as.matrix(tmp[,usePCol[[which(ch2)[i]]] ]))
} else counts <- NULL
if(debug) {message(fxNa,"rMQP6a")}
## Annotation
useACol <- list(annC=match(extrColNames, colnames(tmp)) )
MQann <- as.matrix(tmp[,useACol$annC])
specMQ <- rep(NA, nrow(abund)) # initialize
if(debug) {message(fxNa,"rMQP6b")}
useProCo <- 2+ extrColNames[3] %in% colnames(tmp) # specific to eptide reading
.MultGrep <- function(pat, y) if(length(pat)==1) grep(pat, y) else unlist(sapply(pat, grep, y)) # (multiple) grep() when length of pattern 'pat' >0
## MaxQuant internal contaminants specific : remove non-protein DB parts - if possible, eg "CON__ENSEMBL:ENSBTAP00000007350;CON__P01030" -> "CON__P01030"
conID <- paste0("CON__",c("ENSEMBL","REFSEQ","H-INV"),":")
ch2 <- sapply(sapply(conID, grep, MQann[,useProCo]), length) >0
if(any(ch2, na.rm=TRUE)) {
conID <- conID[which(ch2)]
conID <- paste0(conID, c("[[:upper:]]+[[:digit:]]*;{0,1}", "[[:upper:]]+_[[:digit:]]+;{0,1}", "[[:upper:]]+[[:digit:]]+;{0,1}"))
acc1 <- MQann[,useProCo]
for(i in 1:length(conID)) {
acc2 <- acc1 # need previous 'status' to compare if all text disappears
acc1 <- sub(conID[i], "", acc1)
chLe <- nchar(acc1) <2
if(any(chLe, na.rm=TRUE)) acc1[which(chLe)] <- sub("CON__","", acc2[which(chLe)]) } # remove entire entry only if something (else) remains
## remove first of CON_ entries (only if min 3 characters 3 remain)
ch2 <- grep("CON__{0,1}[A-Z0-9]+;", acc1)
if(length(ch2) >0) { acc2 <- acc1
acc1 <- sub("CON__{0,1}[A-Z0-9]+;", "", acc1)
chLe <- nchar(acc1) <2
if(any(chLe, na.rm=TRUE)) acc1[which(chLe)] <- sub("CON__","", acc2[which(chLe)]) } # remove entire entry only if something (else) remains
## remove first of "CON_" marks
ch2 <- grep("CON_", acc1)
if(length(ch2) >0) acc1[ch2] <- sub("CON__{0,1}","", acc1[ch2])
MQann[,useProCo] <- acc1 }
if(length(specPref) >0) {
## look if available, for specif tags (otherwise look in 'Proteins')
specMQ0 <- lapply(specPref, .MultGrep, MQann[,useProCo]) # in 'Proteins'
for(i in 1:length(specMQ0)) {if(length(specMQ0[[i]]) >0) specMQ[as.integer(specMQ0[[i]])] <- names(specMQ0)[i]}
}
if(debug) {message(fxNa,"rMQP6c")}
MQann <- cbind(SpecType=specMQ, MQann) # better to name column 'species' ??
if(debug) {message(fxNa,"rMQP7"); rMQP7 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chRazProCol=chRazProCol,counts=counts,
chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,MQann=MQann,remConta=remConta)}
## remove MQ-internal contaminants
if(remConta & extrColNames[useProCo] %in% colnames(MQann)) {
conLi <- grep("CON__[[:alnum:]]", MQann[,extrColNames[2]])
if(length(conLi) >0) {
iniLi <- nrow(MQann)
MQann <- MQann[-conLi,]
abund <- abund[-conLi,]
specMQ <- specMQ[-conLi] # needed ??
#specMQ0 <- specMQ0[-conLi]
counts <- if(length(dim(counts))==3) counts[-conLi,,] else counts[-conLi,,]
if(debug) message(fxNa,"Removing ",length(conLi)," instances of MaxQuant-contaminants to final ",nrow(MQann)," lines/IDs")} }
if(debug) {message(fxNa,"rMQP7b")}
## split Annotation
remHeader <- c("^conta\\|","^sp\\|")
MQan2 <- strsplit(sub(remHeader[1], "", sub(remHeader[2], "", if(useProCo==2) sub(";.+", "", MQann[,useProCo+1]) else MQann[,useProCo+1])), "\\|")
MQan2 <- t(sapply(MQan2, function(x) if(length(x)==1) { c(NA,x)} else x[1:2]) )
colnames(MQan2) <- c("Accession","EntryName")
MQan2 <- cbind(MQan2, Species=sub("_.+|[[:punct:]].+","", sub("[[:upper:]]+[[:digit:]]*_", "", MQan2[,2]))) # separate AccessionNumber (eg P02768) and EntryName (eg ALBU_HUMAN)
## extract species according to custom search parameters 'specPref'
.annSpecies <- function(spe=c("_HUMAN","Homo sapiens"), anno=MQann, exCoNa=extrColNames) {
## extract species tags out of MQann[,exCoNa[2]], place as convert to regular name in anno, return matrix anno
ch1 <- grep(spe[1], anno[,exCoNa[2]])
if(length(ch1) >0) anno[ch1,"Species"] <- spe[2] #"Homo sapiens"
anno }
if(remStrainNo) {
commonSpec <- .commonSpecies()
commonSpec[,1] <- sub("^_","", commonSpec[,1]) # '_' has already been stripped off during strsplit
for(i in 1:nrow(commonSpec)) MQan2[,"Species"] <- sub(commonSpec[i,1], commonSpec[i,2], MQan2[,"Species"]) }
MQann <- cbind(MQann, MQan2)
if(debug) { message(fxNa,"rMQP7c")}
## contaminants (fuse from column 'Potential.contaminant' and those found via specPref[1])
contam <- rep(FALSE, nrow(MQann))
if(!all(is.na(specMQ0))) if(length(specMQ0$conta) >0) contam[specMQ0$conta] <- TRUE ## from 'specPref' search
if("Potential.contaminant" %in% colnames(MQann)) { chCo <- grepl("+",MQann[,"Potential.contaminant"])
if(any(chCo, na.rm=TRUE)) contam[which(chCo)[1]] <- TRUE
MQann[,"Potential.contaminant"] <- contam
} else MQann <- cbind(MQann,Potential.contaminant=contam)
if(debug) {message(fxNa,"rMQP9")}
## look for unique col from $annot to use as rownames
rowNa <- MQann[,2]
chAn <- sum(duplicated(rowNa))
if(chAn >0) {
chMod <- grep("^Oxidation", colnames(tmp))
if(length(chMod) >0) {
hasMod <- which(nchar(tmp[,chMod[1]]) >0)
if(length(hasMod) >0) {rowNa <- paste0(rowNa,".ox")
chAn <- sum(duplicated(rowNa))}
} }
if(chAn >0) {rowNa <- wrMisc::correctToUnique(rowNa)
if(!silent) message(fxNa,"Note : Some peptide sequences appear duplicated (despite considering for oxidation)")
}
rownames(abund) <- rownames(MQann) <- rowNa
if(length(counts) >0) rownames(counts) <- rownames(MQann)
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) { refLi <- which(MQann[,"SpecType"]==refLi)
if(length(refLi) <1) message(fxNa,"Could not find any peptide matching argument 'refLi', ignoring ...") else {
if(!silent) message(fxNa,"Normalize using subset of ",length(refLi)) } } # may be "mainSpe"
if(length(refLi) <1) refLi <- NULL
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(inherits(quant, "try-error")) { warning(fxNa,"PROBLEMS ahead : Unable to normalize as log2-data !!") }
if(debug) {message(fxNa,"rMQP10"); rMQP10 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,chRev=chRev,quantCol=quantCol,abund=abund,chNum=chNum,ch2=ch2,
quant=quant,MQann=MQann,MQan2=MQan2,contam=contam,remConta=remConta)}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="MQ", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rMQP13 .."); rMQP13 <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="MQ", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rMQP14"); rMQP14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,MQann=MQann,setupSd=setupSd)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="MaxQuant Peptides",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=debug, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=file.path(path,fileName), qmethod="MaxQuant", qMethVersion=if(length(parametersD) >0) "xx" else NA,
rawFilePath=if(length(parametersD) >0) "xx" else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()), created=as.character(Sys.time()),
wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## prepare for final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=MQann, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(abund, MQann) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readMaxQuantPeptides.R
|
#' Read csv files exported by OpenMS
#'
#' Protein quantification results form \href{https://openms.de/}{OpenMS}
#' which were exported as \code{.csv} can be imported and relevant information extracted.
#' Peptide data get summarized by protein by top3 or sum methods.
#' The final output is a list containing the elements: \code{$annot}, \code{$raw}, \code{$quant} ie normaized final quantifications, or returns data.frame with entire content of file if \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed based on the OpenMS peptide-identification and label-free-quantification module.
#' Csv input files may also be compresses as .gz.
#'
#' Note: With this version the information about protein-modifications (PTMs) may not yet get exploited fully.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method (will be sent to \code{\link[wrMisc]{normalizeThis}})
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param sampleNames (character) new column-names for quantification data (by default the names from files with spectra will be used)
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param sumMeth (character) method for summarizing peptide data (so far 'top3' and 'sum' available)
#' @param minPepNo (integer) minumun number of peptides to be used for retruning quantification
#' @param protNaCol (character) column name to be read/extracted for the annotation section (default "ProteinName")
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param tit (character) custom title to plot
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param plotGraph (logical) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes},\code{$expSetup} and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readProtDiscovFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNa <- "OpenMS_tiny.csv.gz"
#' dataOM <- readOpenMSFile(file=fiNa, path=path1, tit="tiny OpenMS example")
#' summary(dataOM$quant)
#'
#' @export
readOpenMSFile <- function(fileName=NULL, path=NULL, normalizeMeth="median", refLi=NULL, sampleNames=NULL, quantCol="Intensity",
sumMeth="top3", minPepNo=1, protNaCol="ProteinName", separateAnnot=TRUE, plotGraph=TRUE, tit="OpenMS", wex=1.6,
specPref=c(conta="LYSC_CHICK", mainSpecies="OS=Homo sapiens"), silent=FALSE, debug=FALSE, callFrom=NULL) {
## read OpenMS exported csv
#fileName <- "C:\\E\\projects\\MassSpec\\smallProj\\2021\\OpenMS\\beforeMSstats\\proteomics_lfq\\out.csv";
#specPref <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike="UPS")
## initialize
pepNaCol <- "PeptideSequence"; preCol <- "PrecursorCharge"; condCol <- "Condition"; runCol <- c("BioReplicate","Run"); mzMLCol <- "Reference"
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readOpenMSFile")
opar <- graphics::par(no.readonly=TRUE)
chPa <- try(find.package("utils"), silent=TRUE)
if(inherits(chPa, "try-error")) stop("package 'utils' not found ! Please install first")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
infoDat <- NULL
if(debug) message("rmso1")
## check & read file
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="csv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
##
tmp <- list()
tmp[[1]] <- try(utils::read.csv(paFi, stringsAsFactors=FALSE), silent=TRUE) # read US csv-file
tmp[[2]] <- try(utils::read.csv2(paFi, stringsAsFactors=FALSE), silent=TRUE) # read Euro csv-file
chCl <- sapply(tmp, inherits, "try-error")
if(all(chCl)) stop(" Failed to extract data from '",fileName,"' check format (should be .csv or .csv.gz) & rights to read")
nCol <- sapply(tmp, function(x) if(length(x) >0) {if(! inherits(x, "try-error")) ncol(x) else NA} else NA)
bestT <- which.max(nCol)
datA <- tmp[[bestT]]
tmp <- NULL # reset
## locating of columns to treat
chCoS <- match(runCol, colnames(datA))
if(all(is.na(chCoS))) stop("Cannot find column indicating replicates/runs")
chCoS <- if(sum(!is.na(chCoS)) >1) chCoS[1] else wrMisc::naOmit(chCoS)
chCoQ <- match(quantCol, colnames(datA))
if(is.na(chCoQ)) stop("Cannot find column indicating intensity values")
chCoPr <- match(protNaCol, colnames(datA))
if(is.na(chCoPr)) stop("Cannot find column indicating protein names")
chCoPe <- match(pepNaCol, colnames(datA))
if(is.na(chCoPe)) message(fxNa,"Cannot find column indicating peptide names")
chCoPc <- match(preCol, colnames(datA))
if(is.na(chCoPc)) message(fxNa,"Cannot find column indicating precursor charge")
## extract mzML filenames to 'Run'
chCoMz <- match(mzMLCol, colnames(datA))
if(is.na(chCoMz)) {
message(fxNa,"Cannot find column indicating mzML-file names")
expSetup <- datA[match(unique(datA[,chCoS]), datA[,chCoS]), match(c(condCol,runCol), colnames(datA))]
} else {
expSetup <- datA[match(unique(datA[,chCoMz]), datA[,chCoMz]), match(c(condCol,runCol,mzMLCol), colnames(datA))]
datA <- datA[,-1*chCoMz] # remove column "Reference" (redundant info, captured in expSetup)
}
## main quantitation
if(length(chCoPe >0)) { # "PeptideSequence" found
## need to check if 1 prot has mult peptides
#datA[which(datA[,1]=="P02768ups|ALBU_HUMAN_UPS;sp|ALBU_HUMAN|" & datA[,chCoS]==1),]
ch1 <- duplicated(paste(datA[,chCoPr],datA[,chCoS]), fromLast=TRUE)
if(any(ch1)) {
## re-sort for easier/faster treating
nPep <- nrow(datA)
datA <- datA[order(paste(datA[,chCoPr],datA[,chCoS])),]
useLi <- which(ch1 | duplicated(paste(datA[,chCoPr],datA[,chCoS]), fromLast=FALSE))
## summarize peptides ..
## ? how to count same peptide if (in same sample) at diff charge states (at diff quant) ?
##
## NOTE : not yet developed for distinguishing PTM variants !!
if(!silent) message(fxNa,"summarizing ",nrow(datA)," peptides using ",sumMeth)
IDs <- unique(datA[,chCoPr])
## much faster with separate quant summarization and attaching of 'constant' info (compared to 'integrated' function)
firOf <- match(unique(paste(datA[,chCoPr],datA[,chCoS])), paste(datA[,chCoPr],datA[,chCoS]))
sumPro <- unlist(by(datA[useLi,c(chCoQ)], paste(datA[useLi,chCoPr],datA[useLi,chCoS]),
function(x) if(sumMeth=="top3") {v <- sort(x,decreasing=TRUE); c(mean(v[1:min(3,length(v))]),length(x))} else c(sum(x),length(x)) ) )
newOrd <- match(paste0(paste(datA[firOf,chCoPr],datA[firOf,chCoS]),"2"), names(sumPro)[2*(1:(length(sumPro)/2))])
byProtSum <- cbind(datA[firOf, c(chCoPr,chCoS)], matrix(as.numeric(sumPro), ncol=2, byrow=TRUE)[newOrd,])
colnames(byProtSum) <- c(colnames(datA[,c(chCoPr,chCoS,chCoQ)]),"PSM")
useLr <- if(length(useLi)==nrow(datA)) NULL else (1:nrow(datA))[-1*useLi] # lines with just one pep
datA <- cbind(datA[firOf,-ncol(datA)], byProtSum[,c(-1:0) +ncol(byProtSum)])
## optinal filter for min no of peptides (using PSM)
if(minPepNo[1] > 1) datA <- datA[which(datA[,ncol(datA)] >=minPepNo[1]),]
} }
##
## extract quantity (ie unstack)
samp <- unique(datA[,chCoS])
IDs <- unique(datA[,chCoPr])
abund <- array(dim=c(length(IDs),nrow(expSetup),2), dimnames=list(IDs, sub("\\.mzML","",expSetup[,ncol(expSetup)]) ,c("XIC","PSM")))
for(i in 1:length(samp)) {tmp <- datA[which(datA[,chCoS]==samp[i]), c(chCoPr, c(-1,0) +ncol(datA))]; abund[match(tmp[,1],IDs),i,] <- as.matrix(tmp[,-1]) }
## need to recuperate info "BioReplicate" vs "Condition" to grp
grp <- datA[match(samp,datA[,chCoS]),"Condition"]
names(grp) <- samp
## sparate ID from 'ProteinName' entries: remove tailing punctuation or open brackets (ie not closed) at end of fasta header
annot <- matrix(NA,nrow=nrow(abund), ncol=6, dimnames=list(rownames(abund),c("Accession","EntryName","ProteinName","Species","Contam","SpecType")))
annot[,1] <- sub("\\|[[:print:]]*$","", sub("^[[:lower:]]{2}\\|","", rownames(abund))) # extract only 1st ID
annot[,"ProteinName"] <- sub("[[:upper:]][[:alnum:]]+\\|","", sub("^[[:lower:]]{2}\\|","", rownames(abund))) # extract ProteinName
chMult <- grep(";[[:alpha:]]", annot[,chCoPr]) # check for multiple concatenated protein-names protNa
if(length(chMult) >0) {
annot[chMult,"ProteinName"] <- sub(";[[:alpha:]]+\\|[[:print:]]*","",annot[chMult,"ProteinName"]) } # extract 1st ProteinName
## extract common organism species names (unfortunately OS= term can't be found in csv')
if(TRUE) {
commonSpec <- .commonSpecies() # allow customizing via argument ?
for(i in 1:nrow(commonSpec)) {useLi <- grep(commonSpec[i,1], annot[,"ProteinName"])
if(length(useLi) >0) annot[useLi,"Species"] <- commonSpec[i,2]} }
## look for special annotation terms
if(length(specPref) >0) {
for(i in 1:min(length(specPref),6)) { useLi <- grep(specPref[i], rownames(abund))
if(length(useLi) >0) annot[useLi,"SpecType"] <- if(nchar(names(specPref)[i]) >0) names(specPref)[i] else c("conta","mainSpe","species2","species3","species4")[i]}
}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) & length(refLi)==1) { refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1) message(fxNa," could not find any protein matching argument 'refLi', ignoring ...") else {
if(!silent) message(fxNa," normalize using subset of ",length(refLi))}} # may be "mainSpe"
if(length(refLi) <1) refLi <- NULL
## take log2 & normalize
quant <- wrMisc::normalizeThis(log2(abund[,,1]), method=normalizeMeth, refLines=refLi, callFrom=fxNa)
## plot distribution of intensities
custLay <- NULL
if(length(plotGraph) >0) { if(is.numeric(plotGraph)) { custLay <- plotGraph; plotGraph <- TRUE
} else {plotGraph <- as.logical(plotGraph[1])}}
if(plotGraph) {
if(length(custLay) >0) graphics::layout(custLay) else graphics::layout(1:2)
graphics::par(mar=c(3, 3, 3, 1)) # mar: bot,le,top,ri
if(is.null(tit)) tit <- "OpenMS quantification"
chGr <- try(find.package("wrGraph"), silent=TRUE)
chSm <- try(find.package("sm"), silent=TRUE)
misPa <- c(inherits(chGr, "try-error"), inherits(chSm, "try-error"))
titSu <- if(length(refLi) >0) paste0(c(" by ",if(length(refLiIni) >1) c(length(refLi)," selected lines") else c("'",refLiIni,"'")),collapse="") else NULL
if(any(misPa)) {
if(!silent) message(fxNa," missing package ",wrMisc::pasteC(c("wrGraph","sm")[which(misPa)],quoteC="'")," for drawing vioplots")
## wrGraph not available : simple boxplot
graphics::boxplot(log2(abund[,,1]), main=paste(tit,"(initial)",sep=" "), las=1, outline=FALSE)
graphics::abline(h=round(log2(stats::median(abund[,,1],na.rm=TRUE))) +c(-2:2), lty=2, col=grDevices::grey(0.6))
## plot normalized
graphics::boxplot(quant, main=paste0(tit," (",normalizeMeth,"-normalized",titSu,")"), las=1, outline=FALSE)
graphics::abline(h=round(stats::median(quant, na.rm=TRUE)) +c(-2:2), lty=2, col=grDevices::grey(0.6))
} else { # wrGraph and sm are available
wrGraph::vioplotW(log2(abund[,,1]), tit=paste(tit,"(initial)",sep=" "), wex=wex, callFrom=fxNa)
graphics::abline(h=round(stats::median(log2(abund[,,1]), na.rm=TRUE)) +c(-2:2), lty=2, col=grDevices::grey(0.6))
## now normalized
wrGraph::vioplotW(quant, tit=paste0(tit," (",normalizeMeth,"-normalized",titSu,")"), wex=wex, callFrom=fxNa)
graphics::abline(h=round(stats::median(quant, na.rm=TRUE)) +c(-2:2), lty=2, col=grDevices::grey(0.6))
}
on.exit(graphics::par(opar)) } #
## meta-data
notes <- c(inpFile=paFi, qmethod="OpenMS", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA, normalizeMeth=normalizeMeth, pepSumMeth=sumMeth, nIniPep=nPep,
call=deparse(match.call()), created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
## final output
if(separateAnnot) list(raw=abund[,,1], quant=quant, annot=annot, counts=abund[,,2], expSetup=expSetup, notes=notes) else data.frame(quant,annot)
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readOpenMSFile.R
|
#' Read xlsx, csv or tsv files exported from Proline and MS-Angel
#'
#' Quantification results from Proline \href{http://www.profiproteomics.fr/proline/}{Proline} and MS-Angel exported as xlsx format can be read directly using this function.
#' Besides, files in tsv, csv (European and US format) or tabulated txt can be read, too.
#' Then relevant information gets extracted, the data can optionally normalized and displayed as boxplot or vioplot.
#' The final output is a list containing 6 elements: \code{$raw}, \code{$quant}, \code{$annot}, \code{$counts}, \code{$quantNotes} and \code{$notes}.
#' Alternatively, a data.frame with annotation and quantitation data may be returned if \code{separateAnnot=FALSE}.
#' Note: There is no normalization by default since quite frequently data produced by Proline are already sufficiently normalized.
#' The figure produced using the argument \code{plotGraph=TRUE} may help judging if the data appear sufficiently normalized (distribtions should align).
#'
#' @details
#' This function has been developed using Proline version 1.6.1 coupled with MS-Angel 1.6.1.
#' The classical way of using ths function consists in exporting results produced by Proline and MS-Angel as xlsx file.
#' Besides, other formats may be read, too. This includes csv (eg the main sheet/table of ths xlsx exported file saved as csv).
#' \href{https://github.com/wombat-p}{WOMBAT} represents an effort to automatize quantitative proteomics experiments, using this route
#' data get exported as txt files which can be read, too.
#'
#' @param fileName (character) name of file to read; .xlsx-, .csv-, .txt- and .tsv can be read (csv, txt and tsv may be gz-compressed). Reading xlsx requires package 'readxl'.
#' @param path (character) optional path (note: Windows backslash sould be protected or written as '/')
#' @param normalizeMeth (character) normalization method (for details and options see \code{\link[wrMisc]{normalizeThis}})
#' @param logConvert (logical) convert numeric data as log2, will be placed in $quant
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param quantCol (character or integer) colums with main quantitation-data : precise colnames to extract, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param annotCol (character) precise colnames or if length=1 pattern to search among column-names for $annot
#' @param remStrainNo (logical) if \code{TRUE}, the organism annotation will be trimmed to uppercaseWord+space+lowercaseWord (eg Homo sapiens)
#' @param pepCountCol (character) pattern to search among column-names for count data of PSM and NoOfPeptides
#' @param trimColnames (logical) optional trimming of column-names of any redundant characters from beginning and end
#' @param refLi (integer) custom decide which line of data is main species, if single character entry it will be used to choose a group of species (eg 'mainSpe')
#' @param separateAnnot (logical) separate annotation form numeric data (quantCol and annotCol must be defined)
#' @param plotGraph (logical or matrix of integer) optional plot vioplot of initial data; if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by quantification software; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of messages produced
#' @param debug (logical) display additional messages for debugging
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot} (columns ), \code{$counts} an array with 'PSM' and 'NoOfPeptides', \code{$quantNotes} and \code{$notes}; or a data.frame with quantitation and annotation if \code{separateAnnot=FALSE}
#' @seealso \code{\link[utils]{read.table}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNa <- "exampleProlineABC.csv.gz"
#' dataABC <- readProlineFile(path=path1, file=fiNa)
#' summary(dataABC$quant)
#' @export
readProlineFile <- function(fileName, path=NULL, normalizeMeth="median", logConvert=TRUE, sampleNames=NULL, quantCol="^abundance_",
annotCol=c("accession","description","is_validated","protein_set_score","X.peptides","X.specific_peptides"), remStrainNo=TRUE,
pepCountCol=c("^psm_count_","^peptides_count_"), trimColnames=FALSE, refLi=NULL, separateAnnot=TRUE, plotGraph=TRUE, titGraph=NULL,
wex=2, specPref=c(conta="_conta\\|", mainSpecies="OS=Homo sapiens"), gr=NULL, sdrf=NULL, suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=TRUE), silent=FALSE, callFrom=NULL, debug=FALSE) {
## 'quantCol', 'annotCol' (character) exact col-names or if length=1 pattern to search among col-names for $quant or $annot
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readProlineFile")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(debug) {message(fxNa," rpf1")}
.adjustDecUnit <- function(txt, abbr=NULL, unit=NULL, silent=TRUE, callFrom=NULL) {
## (move to wrMisc?) Replace differing pairs of units (appearing at different spots of txt)
## note: won't work when spanning 3 types of decimal units; won't translate use of comma !!
## note: 'micro' written as 'u'
## .adjustDecUnit(c("5kg","500g","2kg","100g"), silent=FALSE)
fxNa <- wrMisc::.composeCallName(callFrom, newNa="adjustDecUnit")
if(length(abbr) <1) abbr <- c("k","","m","u","n","p","f","a","z")
if(length(unit) <1) unit <- c("mol","Mol","l","liter","g","V","A","s","K")
chX <- matrix(rep(1:length(abbr), each=2)[-1*c(1,length(abbr)*2)], nrow=2)
chPa <- sapply(unit, function(y) {apply(chX, 2, function(x) { length(c(grep(paste0("[[:digit:]]",abbr[x[1]],y), txt), grep(paste0("[[:digit:]]",abbr[x[2]],y), txt)))==length(txt)})})
if(any(chPa)) {
whUnit <- which.max(colSums(chPa))
sear <- chX[1, which.max(chPa[,whUnit])] # index of abbrev
repl <- paste0("000",abbr[sear +1], names(whUnit))
#message("xx2"); xx2 <- list(txt=txt,abbr=abbr,chX=chX,chPa=chPa,unit=unit,whUnit=whUnit,sear=sear,repl=repl)
if(!silent) message("replace '",paste0(abbr[sear], names(whUnit)),"' by '", repl,"'")
sear <- paste0(abbr[sear], names(whUnit))
txt <- sub(sear, repl, txt)
}
txt }
## check if path & file exist
if(!grepl("\\.csv$|\\.xlsx$|\\.tsv$|\\.csv\\.gz$|\\.tsv\\.gz$", fileName)) message(fxNa,"Trouble ahead ? Expecting .xlsx, .csv or .tsv file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt=NULL, compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) {message(fxNa," rpf2")}
## read file
out <- counts <- fileTy <- infoDat <- NULL # initialize default
if(length(grep("\\.xlsx$", paFi)) >0) {
## Extract out of Excel
reqPa <- c("readxl")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package( '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
sheets <- if(debug) try(readxl::excel_sheets(paFi[1]), silent=TRUE) else suppressMessages(try(readxl::excel_sheets(paFi[1]), silent=TRUE))
if(debug) {message(fxNa," rpf3")}
if(inherits(sheets, "try-error")) { message(fxNa,"Unable to read file '",paFi[1],"' ! Returning NULL; check format & rights to read")
} else {
if(!silent) message(fxNa,"Found sheets ",wrMisc::pasteC(sheets,quoteC="'"))
prSh <- which(sheets == "Protein sets")
if(length(prSh) <1) prSh <- grep("Protein", sheets)
if(length(prSh) >1) {
if(!silent) message(fxNa,"Multipe sheets containing 'Protein' found, using 1st :",sheets[prSh])
prSh <- prSh[1]
} else if(length(prSh) <1) {
prSh <- length(sheets)
if(!silent) message(fxNa,"No sheets containing 'Protein' in name found, try using : '",sheets[prSh],"'")
}
if(debug) message(fxNa,"Ready to use sheet ",prSh," rpf4")
out <- as.data.frame(if(debug) readxl::read_xlsx(paFi[1], sheet=prSh) else suppressMessages(readxl::read_xlsx(paFi[1], sheet=prSh)))
qCoSh <- which(sheets=="Quant config")
quantConf <- if(length(qCoSh) >0) { if(debug) readxl::read_xlsx(paFi[1], sheet=qCoSh[1]) else suppressMessages(readxl::read_xlsx(paFi[1], sheet=qCoSh[1]))} else NULL
if(length(quantConf) >0) {
tmp <- quantConf[[1]]
quantConf <- quantConf[[2]]
names(quantConf) <- sub("^#","X.",tmp)
}
if(debug) message(fxNa,"Initial xlsx read as ",nrow(tmp)," lines & ",ncol(tmp)," rows rpf5")
fileTy <- "xlsx"
## tibble colnames may include/start with '#'
## adopt annotCol
annotCol <- sub("^X.","#",annotCol) # adopt to real reading of colnames
bestT <- 0 } # initialize
} else {
## anythng else but Excel xlsx ...
## try to find out which input format; read according to extension multiple times and choose the one with most columns
tmp <- list()
if(grepl("\\.txt$|\\.txt\\.gz$",paFi[1])) tmp[[1]] <- try(utils::read.delim(paFi[1], stringsAsFactors=FALSE), silent=TRUE) # read tabulated text-file (from Proline/parse_Proline.R)
if(grepl("\\.csv$|\\.csv\\.gz$",paFi[1])) {
tmp[[2]] <- try(utils::read.csv(file=paFi[1], stringsAsFactors=FALSE), silent=TRUE) # read US csv-file
tmp[[3]] <- try(utils::read.csv2(file=paFi[1], stringsAsFactors=FALSE), silent=TRUE)} # read Euro csv-file
if(grepl("\\.tsv$|\\.tsv\\.gz$",paFi[1])) {
tmp[[4]] <- try(utils::read.csv(file=paFi[1], stringsAsFactors=FALSE, sep='\t', header=TRUE)) # read US comma tsv-file
tmp[[5]] <- try(utils::read.csv2(file=paFi[1], stringsAsFactors=FALSE, sep='\t', header=TRUE))} # read Euro tsv-file
if(debug) {message(fxNa," rpf6"); rpf6 <- list(fileName=fileName,paFi=paFi,chPa=chPa,path=path,tmp=tmp)}
chCl <- sapply(tmp, inherits, "try-error") | sapply(tmp, length) <2
if(all(chCl)) stop(" Failed to extract data from '",fileName,"' (check format & rights to read)")
nCol <- sapply(tmp, function(x) if(length(x) >0) {if(! inherits(x, "try-error")) ncol(x) else NA} else NA)
bestT <- which.max(nCol)
if(length(bestT) <1) stop("Problem when reading flat file : insufficient columns, check type of input !")
if(debug) message(fxNa,"Reading flat file, best as type no ",bestT," , ie as ",c("txt","US-csv","Euro-csv","tsv (US)","tsv (Euro)")[bestT]," rpf7")
out <- tmp[[bestT]]
fileTy <- c("txt","US.csv","Euro.csv")[bestT]
if(bestT==1) {
if(length(annotCol) >1) { if(debug) message(fxNa,"Set 1st letter of annotCol[1:2] as upercase")
substr(annotCol[1:2], 1, 1) <- toupper(substr(annotCol[1:2], 1, 1))} # capitalize 1st letter to get c("Accession","Description",...
chQuCol <- if(length(quantCol)==1) grep(quantCol, colnames(out)) else match(quantCol, colnames(out))
if(length(chQuCol) <1 & length(quantCol)==1) { quantCol <- "^Intensity" # adjust
chQuCol <- grep(quantCol, colnames(out))
if(!silent) message(fxNa,"Data read as txt, adjusting argument 'quantCol' to '^Intensity' ",c("NOT ")[length(chQuCol) <1]," succeful")
}
}
if(debug) {message(fxNa," rpf8"); rpf8 <- list(fileName=fileName,chPa=chPa,path=path,tmp=tmp,out=out,bestT=bestT)}
## quant info settings from separate sheet:
quanConfFile <- "Quant config.tsv"
if(length(fileName) >1) if(all(!is.na(fileName[2]) & nchar(fileName[2]) >0)) quanConfFile <- fileName[2]
paFi2 <- if(all(chPa)) { # extract & use path out of fileName of csv
if(is.null(grep(dirname(fileName[1]),quanConfFile))) file.path(path[1],quanConfFile) else file.path(dirname(fileName),quanConfFile) # use path of fileName -if present, otherwise add path
} else file.path(path[1], quanConfFile)
if(file.exists(paFi2)) {
quantConf <- try(utils::read.csv(file=paFi2, stringsAsFactors=FALSE, sep='\t', header=TRUE))
if(inherits(quantConf, "try-error")) quantConf <- NULL else {
tmp <- colnames(quantConf)[-1]
quantConf <- as.character(quantConf)[-1]
names(quantConf) <- tmp
}
} else {quantConf <- NULL
if(debug) message(fxNa," file '",paFi2,"' not found for quantifaction specific information rpf8c")}
## adopt annotCol
annotCol <- sub("^#","X.",annotCol)
}
if(debug) {message(fxNa," rpf9")}
if(length(tmp) >0) {
## look for annotation columns
if(!isTRUE(separateAnnot)) separateAnnot <- FALSE
if(any(c(length(quantCol), length(annotCol)) <1)) separateAnnot <- FALSE else {
if(any(all(is.na(quantCol)), all(is.na(annotCol)))) separateAnnot <- FALSE
}
if(debug) {message(fxNa," rpf10")}
## extract meta-data and abundances from main table
metaCo <- wrMisc::naOmit(if(length(annotCol) >1) wrMisc::extrColsDeX(out, extrCol=annotCol, doExtractCols=FALSE, silent=silent, callFrom=fxNa) else grep(annotCol,colnames(out)))
chMissCo <- !(annotCol %in% colnames(out)[metaCo])
if(any(chMissCo)) {
if(any(chMissCo[1:2])) {
metaCo2 <- match(tolower(annotCol[1:2]), tolower(colnames(out)))
if(length(metaCo2) <1) stop("Can't find column ",wrMisc::pasteC(annotCol[which(chMissCo[1:2])])," in data")
if(!silent) message(fxNa,"Found only partial match of 'annotCol' (upper/lower-case issues), adjusting names ")
colnames(out)[metaCo2] <- annotCol[1:2]
metaCo <- union(metaCo2,metaCo) }
}
if(debug) {message(fxNa," rpf11")}
## further refine : separate Accession (ie annotCol[1]) (eg P00359) from ProteinName (eg TDH3)
annot <- as.matrix(out[,metaCo])
chSep <- nchar(utils::head(annot[,annotCol[1]])) - nchar(gsub("\\|","",utils::head(annot[,annotCol[1]])))
tmp <- if(any(chSep >1)) sub("^[[:alpha:]]+\\||^[[:alpha:]]+_[[:alpha:]]+\\|","", annot[,annotCol[1]]) # presume presence of database origin-tag -> remove (eg sp|...)
tmp3 <- sub("^[[:alpha:]]+_{0,1}[[:alpha:]]*\\|[[:upper:]][[:digit:]]+-{0,1}[[:digit:]]*\\|[[:alnum:]]+_{0,1}[[:alnum:]]*_{0,1}[[:alnum:]]*\\ ",
"",annot[,annotCol[2]]) # supposes annotCol[2], without db|accession|EntryName
tmp2 <- cbind(Accession=sub("\\|[[:print:]]+$","",tmp), EntryName=sub("^[[:alnum:]]+-{0,1}[[:digit:]]{0,2}\\|","",tmp),
ProteinName=sub("\\ [[:upper:]]{2}=[[:print:]]+","",tmp3) , GN=NA, Species=NA, Contam=NA, SpecType=NA) #
if(debug) {message(fxNa," rpf12")}
## recover GN
GNLi <- grep("\\ GN=[[:upper:]]{2,}[[:digit:]]*", tmp3)
if(length(GNLi) >0) { zz <- sub("[[:print:]]+\\ GN=", "",tmp3[GNLi]) # remove surplus to left
tmp2[GNLi,"GN"] <- sub("\\ +$","",sub("\\ [[:print:]]+","",zz)) } # remove surplus to right (and right trailing space)
## recover OS
OSLi <- grep("\\ OS=[[:upper:]][[:lower:]]+", tmp3)
if(length(OSLi) >0) { zz <- sub("[[:print:]]+\\ OS=", "",tmp3[OSLi]) # remove surplus to left
tmp2[OSLi,"Species"] <- sub("\\ [[:upper:]]{2}=[[:print:]]+","",zz) } # remove surplus to right (next tag)
if(debug) {message(fxNa," rpf13")}
## extract/check on Description
annot <- cbind(tmp2,annot[,-1*match(annotCol[1], colnames(annot))])
chDesc <- colnames(annot) %in% "description"
if(any(chDesc)) colnames(annot)[which(chDesc)] <- "Description" # make uniform to other
chUniNa <- duplicated(annot[,1])
if(any(chUniNa) & !silent) message(fxNa,"Caution: ",sum(chUniNa)," Accession entries appear repeatedly ! Making unique rownames by adding counter ...")
rowNa <- if(any(chUniNa)) wrMisc::correctToUnique(annot[,1], callFrom=fxNa) else annot[,1]
rownames(annot) <- rowNa
if(isTRUE(remStrainNo)) {
chSp <- grep("^[[:upper:]][[:lower:]]*\\ [[:lower:]]+\\ [[:print:]]*", annot[,"Species"])
if(length(chSp) >0) { nch1 <- nchar(sub("^[[:upper:]][[:lower:]]+\\ [[:lower:]]+", "", annot[chSp,"Species"]))
annot[chSp,"Species"] <- substr(annot[chSp,"Species"], 1, nchar(annot[chSp,"Species"]) - nch1) }
}
if(debug) {message(fxNa," rpf14"); rpf14 <- list(out=out,annot=annot,specPref=specPref,quantCol=quantCol,rowNa=rowNa,tmp=tmp)}
## set protein/gene annotation to common format
chNa <- match(c("GN","ProteinName"), colnames(annot))
colnames(annot)[chNa] <- c("GeneName","Description")
## locate special groups, column "SpecType"
if(length(specPref) >0) {
annot <- .extrSpecPref(specPref, annot, useColumn=c("Species","EntryName","GeneName","Accession"), silent=silent, debug=debug, callFrom=fxNa) }
## locate & extract abundance/quantitation data
if(length(quantCol) >1) { abund <- as.matrix(wrMisc::extrColsDeX(out, extrCol=quantCol, doExtractCols=TRUE, silent=silent, callFrom=fxNa))
} else {
quantCol2 <- grep(quantCol, colnames(out))
chNa <- is.na(quantCol2)
if(all(chNa)) stop("Could not find any of of the columns specified in argument 'quantCol' !")
if(any(chNa)) {
if(!silent) message(fxNa,"Could not find columns ",wrMisc::pasteC(quantCol2[which(chNa)],quote="'")," .. omit")
quantCol2 <- wrMisc::naOmit(quantCol2)}
abund <- as.matrix(out[,quantCol2]) } # abundance val
## peptide counts
if(length(pepCountCol) >1) { supCol <- lapply(pepCountCol, grep, colnames(out))
chLe <- sapply(supCol,length)
chLe <- chLe==ncol(abund)
if(any(chLe)) { pepCount <- array(dim=c(nrow(out), ncol(abund), sum(chLe)),
dimnames=list(rowNa, colnames(abund), sub("\\^peptides_count","NoOfPeptides",sub("\\^psm_count","PSM",sub("_$","",pepCountCol)))[which(chLe)]))
for(i in 1:sum(chLe)) pepCount[,,i] <- as.matrix(out[,supCol[[which(chLe)[i]]]])
} else pepCount <- NULL
}
if(debug) {message(fxNa," rpf15"); rpf15 <- list(out=out,annot=annot,specPref=specPref,quantCol=quantCol,rowNa=rowNa,tmp=tmp)}
## check abundance/quantitation data
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(out[,quantCol2], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
rownames(abund) <- rowNa
##
## Custom (alternative) colnames
if(length(sampleNames) ==ncol(abund)) {
colnames(abund) <- colnames(pepCount) <- sampleNames
} else {
colnames(abund) <- sub(if(length(quantCol) >1) "^abundance" else quantCol, "", colnames(abund))
if(trimColnames) colnames(abund) <- wrMisc::.trimFromStart(wrMisc::.trimFromEnd(colnames(abund)))
}
## treat colnames, eg problem with pxd001819 : some colnames writen as amol, other as fmol
adjDecUnits <- FALSE
if(adjDecUnits) {
colnames(abund) <- sub("^ +","", .adjustDecUnit(wrMisc::trimRedundText(colnames(abund), silent=silent, debug=debug, callFrom=fxNa)))
}
if(debug) { message(fxNa," rpf16"); rpf16 <- list(abund=abund,annot=annot,sampleNames=sampleNames,normalizeMeth=normalizeMeth,refLi=refLi,sdrf=sdrf,suplAnnotFile=suplAnnotFile,path=path,fxNa=fxNa,silent=silent)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi) ==1) {
refLi <- which(annot[,"SpecType"] ==refLi)
if(length(refLi) <1 && identical(refLiIni, "mainSpe")) refLi <- which(annot[,"SpecType"] =="mainSpecies") # fix compatibility problem 'mainSpe' to 'mainSpecies'
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
if(debug) { message(fxNa," rpf16b"); rpf16b <- list()}
## take log2 & normalize
if(length(normalizeMeth) <1) normalizeMeth <- "median"
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=!debug)
if(debug) { message(fxNa,"rpf16c .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
if(isTRUE(suplAnnotFile[1])) suplAnnotFile <- paFi
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="PL", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rpf16d .."); rpf16d <- list(sdrf=sdrf,gr=gr,suplAnnotFile=suplAnnotFile, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## finish groups of replicates & annotation setupSd
## need to transform fmol in amol for proper understanding & matching with metadata
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="PL", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rpf17"); rpf17 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="Proline",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data to export
notes <- c(inpFile=paFi, qmethod="Proline", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA, normalizeMeth="none", call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=paste(utils::packageVersion("wrProteo"), collapse="."), machine=Sys.info()["nodename"])
##
if(separateAnnot) {
if(!is.numeric(quant) && logConvert) { message(fxNa,"Problem: Abundance data seem not numeric, can't transform log2 !")}
out <- list(raw=abund, quant=quant, annot=annot, counts=pepCount, sampleSetup=setupSd, quantNotes=quantConf, notes=notes)
if(!logConvert) out$quant <- 2^out$quant }
}
## final Proline-import
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readProlineFile.R
|
#' Read Tabulated Files Exported By ProteomeDiscoverer At Protein Level, Deprecated
#'
#' Depreciated old version of Protein identification and quantification results from
#' \href{https://www.thermofisher.com/order/catalog/product/OPTON-30812}{Thermo ProteomeDiscoverer}
#' which were exported as tabulated text can be imported and relevant information extracted.
#' The final output is a list containing 3 elements: \code{$annot}, \code{$raw} and optional \code{$quant},
#' or returns data.frame with entire content of file if \code{separateAnnot=FALSE}.
#' Please use readProteomeDiscovererFile() from the same package instead !
#'
#' @details
#' This function has been replaced by \code{readProteomeDiscovererFile} (from the same package) !
#' The syntax and strcuture of output has remained the same, you can simply replace the name of the function called.
#'
#' This function has been developed using Thermo ProteomeDiscoverer versions 2.2 to 2.5.
#' The format of resulting files at export also depends which columns are chosen as visible inside ProteomeDiscoverer and subsequently get chosen for export.
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment realted information.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' This function replaces the depreciated function \code{readPDExport}.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data (ProteomeDiscoverer does not automatically use file-names from spectra); this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants marked by ProteomeDiscoverer.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by ProteomeDiscoverer; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNa <- "tinyPD_allProteins.txt.gz"
#' ## Please use the function readProteinDiscovererFile(), as shown below (same syntax)
#' dataPD <- readProteomeDiscovererFile(file=fiNa, path=path1, suplAnnotFile=FALSE)
#' summary(dataPD$quant)
#'
#' @export
readProtDiscovFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="^Abundances*", annotCol=NULL, contamCol="Contaminant",
refLi=NULL, separateAnnot=TRUE, FDRCol=list(c("^Protein.FDR.Confidence","High"), c("^Found.in.Sample.","High")), gr=NULL, sdrf=NULL, suplAnnotFile=TRUE,
groupPref=list(lowNumberOfGroups=TRUE), specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"), plotGraph=TRUE, wex=1.6, titGraph="Proteome Discoverer",
silent=FALSE, debug=FALSE, callFrom=NULL) {
## read ProteomeDiscoverer exported txt
.Deprecated(new="readProtDiscovererFile", package="wrProteo", msg="The function readProtDiscovFile() has been deprecated and replaced by readProtDiscovererFile() from the same package
\n +++ Synthax and structure of output remain the same ! \n")
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readProtDiscovFile")
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
.corPathW <- function(x) gsub("\\\\", "/", x)
## check if path & file exist
msg <- "Invalid entry for 'fileName'"
if(length(fileName) >1) { fileName <- fileName[1]
if(!silent) message(fxNa," 'fileName' shoud be of length=1, using 1st value") # nolint # nolint
} else { if(length(fileName) <1) stop(msg) else if(nchar(fileName) <0) stop(msg)}
paFi <- fileName # presume (& correct if path is given)
chFi <- file.exists(fileName) # presume (& correct otherwise)
if(length(path) >0) if(!dir.exists(path[1])) { path <- NULL
if(!silent) message(fxNa,"Invalid path '",path[1],"' (not existing), ignoring...") }
if(length(path) >0) { chFi <- file.exists(file.path(path[1], fileName))
if(chFi) paFi <- file.path(path[1], fileName) else {
if(file.exists(fileName)) {paFi <- fileName
if(!silent) message(fxNa,"Note : Unable to find file '",fileName,"' in path '",path,"' but found without specified path !")
} else chFi <- FALSE # if path+fileName not found, check without path
} }
if(!chFi) stop(" File ",fileName," was NOT found ",if(length(path) >0) paste(" in path ",path)," !") # nolint: line_length_linter.
chFi <- file.info(file.path(path[1], fileName))$size > 1
if(!chFi) stop(" File ",fileName," was found BUT TOO SMALL (size ",file.info(file.path(path[1], fileName))$size," bytes) !")
if(!grepl("\\.txt$|\\.txt\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
if(debug) message(fxNa,"rpd0a ..")
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rpd0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a ProteomeDiscoverer-file") }
NULL
} else {
if(debug) { message(fxNa,"rpd1 ... dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col ")
rpd1 <- list(tmp=tmp,paFi=paFi,annotCol=annotCol,fileName=fileName) }
## locate & extract annotation
if(length(annotCol) <1) annotCol <- c("Protein.ID","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.")
## option for future: also extract column "MarkedAs"
PSMCol <- "^Number.of.PSMs.by.Search.Engine" # pattern searching tag for PSM-data
PepCol <- "^Number.of.Peptides.by.Search.Engine" # pattern searching tag for Number of peptides
## future option : lateron rename columns called as "Description" to annotCol[2]
## below use explicit colnames "Accession","Description", rename if tolower() fits
.chColNa <- function(x, mat, altern=NULL, renameTo=NULL, silent=FALSE, fxNa=NULL){
## check in 'matr' for column-name 'x', if required rename best hit (if no direct hit look using grep, then grep wo case); return corrected mat
chX <- x %in% colnames(mat)
if(all(chX)) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[match(x, colnames(mat))] <- renameTo # juste simple rename # nolint # nolint: line_length_linter.
} else { # try to localize column to use
chX <- grep(x, colnames(mat))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent & length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
if(length(altern==1)) chX <- grep(altern, colnames(mat))
if(length(chX) > 0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
chX <- grep(tolower(x), tolower(colnames(mat)))
if(length(chX) > 0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",tolower(x),"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else stop("Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")") } }
}
mat }
## check for essential colnames !
tmp <- .chColNa(annotCol[1], tmp, altern="Accession", rename="Accession", silent=silent, fxNa=fxNa)
tmp <- .chColNa(annotCol[2], tmp, rename="Description", silent=silent, fxNa=fxNa)
annotCol[1:2] <- c("Accession","Description") # update (just in case..)
if(is.character(annotCol)) annotColNo <- match(annotCol, colnames(tmp))
if(length(contamCol) >0) {
contamCol <- if(is.character(contamCol)) which(colnames(tmp)==contamCol[1]) else as.integer(contamCol[1])
if(length(contamCol) >0) contamFilter <- TRUE
annotColNo <- union(annotColNo, contamCol)
}
if(debug) { message(fxNa,"rpd2 .. annotColNo : ", wrMisc::pasteC(annotColNo)," contamCol : ",wrMisc::pasteC(contamCol)," ")
rpd2 <- list(tmp=tmp,annotCol=annotCol,PSMCol=PSMCol,PepCol=PepCol,fileName=fileName)}
## check for R-friendly export
Rfriendly <- FALSE
specRepl <- cbind(ini=c("Coverage...."), new=c("Coverage.in.Percent"))
annotCol2 <- unique(c(sub("X\\.\\.","Number.of.",annotCol), apply(specRepl, 1, function(x) sub(x[1], x[2], annotCol)) ))
annotColN2 <- match(annotCol2, colnames(tmp))
if(sum(!is.na(annotColN2)) > sum(!is.na(annotColNo))) { Rfriendly <- TRUE
annotColNo <- annotColN2
annotCol <- annotCol2
if(!silent) message(fxNa,"Setting 'annotCol' to export of 'R-friendly' colnames")}
if(all(is.na(annotColNo))) stop("Problem with 'annotCol' : Could NOT find any annotation-column") # nolint: line_length_linter.
if(any(is.na(annotColNo), na.rm=TRUE)) { if(!identical(annotCol, annotCol2)) message(fxNa,"Can't find column(s) ",wrMisc::pasteC(annotCol[is.na(annotColNo)],quote="'"))
annotCol <- annotCol[!is.na(annotColNo)] }
annot <- as.matrix(tmp[,wrMisc::naOmit(annotColNo)])
if(debug) { message(fxNa,"rpd3 .. Rfriendly: ",Rfriendly," ncol annot ",ncol(annot)," cols; colnames : ",wrMisc::pasteC(colnames(annot))," ")
rpd3 <- list(tmp=tmp,annotCol=annotCol,PSMCol=PSMCol,PepCol=PepCol,fileName=fileName)}
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header
if(cleanDescription) {
if(debug) { message(fxNa,"rpd3a") }
annot[,"Description"] <- sub("[[:punct:]]+$","", sub("\\ +$", "", annot[,"Description"])) # tailing ';' and/or tailing space
annot[,"Description"] <- sub(" \\([[:alpha:]]*$", "", annot[,"Description"]) # tailing (ie truncated) open '(xxx'
}
tmp <- .chColNa("Accession", tmp, silent=silent, fxNa=fxNa)
annot <- cbind(Accession=annot[,"Accession"], EntryName=NA, GeneName=NA, Species=NA, Contam=NA, SpecType=NA, annot[,-1]) # may be better to name column 'species'
if(debug) {message(fxNa,"rpd4 .. dim annot: ", nrow(annot)," li and ",ncol(annot)," cols; colnames : ",wrMisc::pasteC(colnames(annot))," "); rpd4 <- list(annot=annot,tmp=tmp,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol)}
if("Contaminant" %in% colnames(annot)) annot[,"Contam"] <- toupper(gsub(" ","",annot[,colnames(tmp)[contamCol]]))
## try extract GeneNames from 'Description'
chPrNa <- is.na(annot[,"GeneName"])
if(all(chPrNa)) { grLi <- grep("\\ GN=[[:upper:]]{2,}[[:digit:]]", annot[which(chPrNa),"Description"])
if(length(grLi) >0) { zz <- sub("[[:print:]]+\\ GN=", "", annot[which(chPrNa)[grLi],"Description"]) # remove surplus to left
annot[which(chPrNa)[grLi],"GeneName"] <- sub("\\ [[:print:]]+","",zz) # remove surplus to right
} }
if(debug) { message(fxNa,"rpd6 .. "); rpd6 <- list(annot=annot,specPref=specPref)}
## try extract species from 'Description'
DescrIni <- annot[,"Description"]
chSpe <- grep("OS=[[:upper:]][[:lower:]]+\\ [[:lower:]]+", DescrIni)
if(length(chSpe) >0) { # term OS= exists,
annot[chSpe,"Description"] <- sub("OS=[[:upper:]][[:lower:]]+\\ [[:lower:]][[:print:]]+", "", DescrIni[chSpe]) # everything left of OS=
annot[chSpe,"Species"] <- sub("\\ {0,1}[[:upper:]]{2}=[[:print:]]+", "", substr(DescrIni[chSpe], nchar(annot[chSpe,"Description"]) +4, nchar(DescrIni[chSpe])) ) # all right of OS= until next tag
## remmove ' (strain ..) ' specification
annot[chSpe,"Species"] <- sub("\\ \\(strain\\ [[:print:]]+\\)\\ {0,1}$","", annot[chSpe,"Species"])
annot[chSpe,"Description"] <- sub("\\ $","",annot[chSpe,"Description"]) # remove taining space
}
if(debug) {message(fxNa,"rpd6b .. "); rpdb6 <- list(annot=annot,specPref=specPref)}
## separate multi-species (create columns 'Accession','GeneName','Species','SpecType')
if(!silent) { chSp <- is.na(annot[,"Species"])
if(any(chSp, na.rm=TRUE) && !all(chSp)) message(fxNa,"Note: ",sum(chSp)," (out of ",nrow(tmp),") lines with unrecognized species")
if(!all(chSp)) { tab <- table(annot[,"Species"])
tab <- rbind(names(tab),": ",tab," ; ")
if(!silent) message(fxNa,"Count by 'specPref' : ",apply(tab,2,paste)) }} # all lines assigned
if(length(specPref) >0) {
annot <- .extrSpecPref(specPref, annot, useColumn=c("Species","EntryName","GeneName","Accession","Majority.protein.IDs","Fasta.headers"), suplInp=tmp, silent=silent, debug=debug, callFrom=fxNa) }
if(debug) {message(fxNa,"rpd7 .. "); rpd7 <- list(annot=annot,specPref=specPref,chSp=chSp,tmp=tmp,quantCol=quantCol )}
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
## problem : extract '^Abundances*' but NOT 'Abundances.Count.*'
quantColIni <- quantCol <- grep(quantCol[1], colnames(tmp))
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
} else {
## pattern search (for abundance/quantitation data)
if(length(quantCol) <1) { quantCol <- "^Abundance"
if(!silent) message(fxNa,"Setting argument 'quantCol' to '^Abundance'")}
quantCol <- grep(quantCol, colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("^abundance", tolower(colnames(tmp)))
if(length(quantCol) <1) quantCol <- grep("Intensity$", colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("intensity$", tolower(colnames(tmp)))
quantColIni <- quantCol
if(length(quantCol) <1) stop(msg," specified in argument 'quantCol' !") }
## check for columns to exclude (like 'Abundances.Count.')
if(length(excluCol)==1) {
excCo <- grep(excluCol, colnames(tmp))
if(any(duplicated(excCo, quantCol), na.rm=TRUE)) {
quantCol <- quantCol[-match(excCo, quantCol)]
if(length(quantCol) <1) stop(msg," (all match to 'excluCol')") else {
if(!silent) message(fxNa,"Removed ",length(quantColIni) -length(quantCol)," columns")}
}
}
abund <- as.matrix(tmp[,quantCol]) # abundance val
if(debug) {message(fxNa,"rpd8 .. "); rpd8 <- list(annot=annot,specPref=specPref,abund=abund,quantCol=quantCol)}
## check & clean abudances
chNorm <- grep("\\.Normalized\\.", colnames(abund))
if(length(chNorm)*2 == ncol(abund)) { # in case Normalized makes 1/2 of columns use non-normalized
abund <- abund[,-chNorm]
}
colnames(abund) <- sub("^Abundances\\.Normalized\\._{0,1}|^abundances\\.Normalized\\._{0,1}|^Abundances{0,1}_{0,1}|^abundances{0,1}_{0,1}","",colnames(abund))
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
## remove heading 'X..' from headers (only if header won't get duplicated
chXCol <- grep("^X\\.\\.",colnames(annot))
if(length(chXCol) >0) {
newNa <- sub("^X\\.\\.","",colnames(annot)[chXCol])
chDu <- duplicated(c(newNa, colnames(annot)), fromLast=TRUE)
if(any(chDu, na.rm=TRUE)) newNa[which(chDu)] <- colnames(annot)[chXCol][which(chDu)]
colnames(annot)[chXCol] <- newNa }
## remove heading/tailing spaces (first look which columns might be subject to this treatment)
ch1 <- list(A=grep("^ +",annot[1,]), B=grep("^ +",annot[2,]), C=grep("^ +",annot[floor(mean(nrow(annot))),]), D=grep("^ +",annot[nrow(annot),]) )
chCo <- unique(unlist(ch1))
annot[,chCo] <- sub("^ +","",sub(" +$","",annot[,chCo])) # remove heading/tailing spaces
if(debug) { message(fxNa,"rpd9 .. dim annot ",nrow(annot)," and ",ncol(annot)); rpd9 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat) }
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"rpd9b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
if(debug) { message(fxNa,"rpd9c") }
}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol)
if(length(FDRCol) >0) {
## stand : "Found.in.Sample...S32..F32..Sample" Rfriendly : "Found.in.Sample.in.S33.F33.Sample"
## stand : "X..Protein.Groups" Rfriendly : "Number.of.Protein.Groups"
chFDR <- lapply(FDRCol, function(x) {z <- grep(x[1], colnames(tmp)); if(length(z) ==ncol(abund)) z else NULL})
names(chFDR) <- sapply(FDRCol, function(x) x[1])
chFDR <- chFDR[which(sapply(chFDR, length) >0)]
if(length(chFDR) >0) {
i <- 1 # so far just use 1st instance matching
searchFor <- FDRCol[[which(sapply(FDRCol, function(x) x[1]) %in% names(chFDR)[i])]]
filtFdrHi <- tmp[,chFDR[[i]]] == searchFor[2] # find occurances of best tag 'High'
roSu <- rowSums(filtFdrHi) <1
if(all(roSu) && !silent) message(fxNa,"NONE of the lines/proteins had any '",searchFor[1],"' in column(s) '",searchFor[2],"' !! This is probably not a good filtering-parameter, ignoring")
if(any(roSu, na.rm=TRUE) && !all(roSu)) { if(!silent) message(fxNa,"Removing ",sum(roSu)," lines/proteins without ANY '",searchFor[2],"' in columns '",searchFor[1],"'")
rmLi <- -1*which(roSu)
annot <- annot[rmLi,]
abund <- abund[rmLi,]
filtFdrHi <- filtFdrHi[rmLi,] # useful lateron ?
tmp <- tmp[rmLi,] }
}
}
if(debug) { message(fxNa,"rpd11 .. length(FDRCol) ",length(FDRCol)," dim annot ",nrow(annot)," and ",ncol(annot))}
## rownames : check if Accession is unique
chAc <- duplicated(annot[,"Accession"], fromLast=FALSE)
if(any(chAc, na.rm=TRUE)) {
getLiToRemove <- function(x,useCol=c("rowNo","Contaminant","SpecType")) { # return index for all lines to remove from matrix ...
if(is.data.frame(x)) x <- as.matrix(x)
spe <- grep("^species", x[,useCol[3]])
if(length(spe) >0) {
rmLi <- x[which(1:nrow(x) != spe[1]), useCol[1]]
} else { ## look for any lines marked as Contaminant="true", then mark other(s) for remove
rmLi <- if(any(tolower(x[,useCol[2]])=="true", na.rm=TRUE)) x[which(tolower(x[,useCol[2]]) !="true") ,useCol[1]] }
as.integer(rmLi) }
## check if one of duplicated lines is marked as Contaminant -> remove non-contaminant, BUT NOT 'speciesX' ?
if(contamFilter) { # ready to correct (if possible) duplicated 'Accession' entries
## elaborate procedure for removing duplicate Accession lines : 'fuse' annot where no NA & use quantification-line with fewest NAs
## need to separate all groups of repeated IDs & treat separately
annot <- cbind(annot, rowNo=1:nrow(tmp))
duplAc <- unique(annot[which(chAc), "Accession"])
## need to remove duplicated lines which are not marked as Contaminant="True"
chAc2 <- duplicated(annot[,"Accession"], fromLast=TRUE)
rmLi <- chAc | chAc2
## find lines where is not Contaminant="True" (and keep contaminant)
annot <- cbind(annot, iniIndex=1:nrow(annot), nNA=rowSums(is.na(abund)))
useCol2 <- c("Accession","GeneName","Species","Contam","SpecType","Description","Contaminant", "iniIndex","nNA") # the last 2 are added within function
useCol2 <- wrMisc::naOmit(match(useCol2,colnames(annot)))
abund <- cbind(abund,iniIndex=1:nrow(abund))
rmAbund <- as.integer(unlist(by(abund[which(rmLi),], annot[which(rmLi),"Accession"], function(x) x[(1:nrow(x))[-which.min(rowSums(is.na(x)))],ncol(x)])))
rmAnnot2 <- as.integer(unlist(by(annot[which(rmLi),], annot[which(rmLi),"Accession"], function(x) x[2:nrow(x),ncol(x) -1])))
rmAnnot <- which(chAc)
for(j in unique(annot[which(rmLi),useCol2][1])) { # need loop for 'fusing' columns with fewes NAs and recording which lines should be removed
x <- annot[which(annot[,"Accession"] %in% j),]
useLi <- apply(0+is.na(x),2,which.min)
if(any(useLi >1, na.rm=TRUE)) for(i in 2:max(useLi)) annot[as.integer(x[1,"iniIndex"]),which(useLi==i)] <- annot[as.integer(x[i,"iniIndex"]),which(useLi==i)]
}
if(length(rmAnnot) >0) {annot <- annot[-rmAnnot,]; tmp <- tmp[-rmAnnot,]
abund <- abund[-rmAbund,]
if(!silent) message(fxNa,"Removing ",length(rmAnnot)," lines due to duplicated Accessions (typically due to contaminants)")
}
annot <- annot[,-ncol(annot) +(1:0)] # remove extra columns (ie "iniIndex","nNA")
abund <- abund[,-ncol(abund)] # remove extra column (ie "iniIndex")
chAc <- duplicated(annot[,"Accession"], fromLast=FALSE)
if(debug) { message(fxNa,"rpd11b .. dim abund ",nrow(abund)," and ",ncol(abund)); rpd9 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat)}
}}
## Now we are ready to add unique rownames
if(any(chAc, na.rm=TRUE)) {
if(!silent) message(fxNa,sum(chAc)," (out of ",length(chAc),") cases of duplicated 'Accession' exist, adding extensions for use as rownames")
rownames(tmp) <- rownames(annot) <- wrMisc::correctToUnique(annot[,"Accession"], sep="_", atEnd=TRUE, callFrom=fxNa)
} else rownames(abund) <- rownames(annot) <- annot[,"Accession"]
## optional/additional counting results (PSM, no of peptides)
PSMCol <- if(length(PSMCol) ==1) grep(PSMCol,colnames(tmp)) else NULL
PepCol <- if(length(PepCol) ==1) grep(PepCol,colnames(tmp)) else NULL
usTy <- c("PSM","NoOfPeptides")[which(c(length(PSMCol),length(PepCol)) ==ncol(abund))]
if(length(usTy) >0) {
counts <- array(NA,dim=c(nrow(abund),ncol(abund),length(usTy)), dimnames=list(rownames(abund),colnames(abund),usTy))
if("PSM" %in% usTy) counts[,,"PSM"] <- as.matrix(tmp[,PSMCol])
if("NoOfPeptides" %in% usTy) counts[,,"NoOfPeptides"] <- as.matrix(tmp[,PepCol])
} else counts <- NULL
if(debug) {message(fxNa,"rpd12 .. "); rpd12 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,refLi=refLi,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 && identical(refLiIni, "mainSpe")) refLi <- which(annot[,"SpecType"] =="mainSpecies") # fix compatibility problem 'mainSpe' to 'mainSpecies'
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rpd13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," "); rpd13 <- list(annot=annot,tmp=tmp,abund=abund,quant=quant,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat)}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="PD", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rpd13b .."); rpd13b <- list()}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="PD", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) # add to all other import-functions ?
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rpd14"); rpd14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="Proteome Discoverer",
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="ProteomeDiscoverer", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=match.call(),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readProtDiscovFile.R
|
#' Read Tabulated Files Exported by ProteomeDiscoverer At Peptide Level, Deprecated
#'
#' Depreciated old version of Peptide identification and quantification results from \href{https://www.thermofisher.com/order/catalog/product/OPTON-30812}{Thermo ProteomeDiscoverer}
#' which were exported as tabulated text can be imported and relevant information extracted.
#' The final output is a list containing 3 elements: \code{$annot}, \code{$raw} and optional \code{$quant}, or returns data.frame with entire content of file if \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using Thermo ProteomeDiscoverer versions 2.2 to 2.5.
#' The format of resulting files at export also depends which columns are chosen as visible inside ProteomeDiscoverer and subsequently get chosen for export.
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment realted information.
#' Precedent and following aminoacids (relative to identified protease recognition sites) will be removed form peptide sequences and be displayed in $annot as columns 'prec' and 'foll'.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' This function replaces the depreciated function \code{readPDExport}.
#'
#' Besides, ProteomeDiscoverer version number and full raw-file path will be extracted for $notes in final output.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) new column-names for quantification data (ProteomeDiscoverer does not automatically use file-names from spectra); this argument has priority over \code{suplAnnotFile}
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by ProteomeDiscoverer; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants marked by ProteomeDiscoverer.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param titGraph (character) custom title to plot
#' @param titGraph (character) depreciated custom title to plot, please use 'tit'
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProteomeDiscovererFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#'
#' @export
readProtDiscovPeptides <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, suplAnnotFile=TRUE, gr=NULL, sdrf=NULL, read0asNA=TRUE, quantCol="^Abundances*",
annotCol=NULL, contamCol="Contaminant", refLi=NULL, separateAnnot=TRUE, FDRCol=list(c("^Protein.FDR.Confidence","High"), c("^Found.in.Sample.","High")), plotGraph=TRUE,
titGraph="Proteome Discoverer", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"), silent=FALSE, debug=FALSE, callFrom=NULL) {
message("+++ This function is depreciated, it has been replaced by readProtDiscovererPeptides() from the same package ! \n +++ Synthax and structure of output remain the same ! \n")
## read ProteomeDiscoverer exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readProtDiscovPeptides")
.Deprecated(new="readProtDiscovererPeptides", package="wrProteo", msg="The function readProtDiscovPeptides() has been deprecated and replaced by readProtDiscovererPeptides() from the same package
\n +++ Synthax and structure of output remain the same ! \n")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- c("^Abundance\\.Count","^Abundances\\.Count","^Abundance\\.Ratio","^Abundances\\.Ratio","^Abundance\\.Grouped","^Abundances\\.Grouped") # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- quant <- counts <- NULL # initialize
modifSensible <- TRUE # separate modified from unmodified peptides (by attaching modif to seq)
.corPathW <- function(x) gsub("\\\\", "/", x)
## check if path & (tsv) file exist
if(!grepl("\\.txt$|\\.txt\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="txt", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rPDP2a ..")
## prepare for reading files
if(debug) { message(fxNa,"rPDP3 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])}
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE, header=TRUE), silent=TRUE)
if(inherits(tmp, "try-error")) stop("Unable to read input file ('",paFi,"')!")
if(debug) { message(fxNa,"rPDP3b .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col ")}
## extract peptide sequence
pepSe <- sub("\\.\\[[[:upper:]]\\]$","", sub("^\\[[[:upper:]]\\]\\.","", tmp[,"Annotated.Sequence"]))
precAA <- postAA <- rep("",nrow(tmp))
ch1 <- grep("\\.\\[[[:upper:]]\\]$", tmp[,"Annotated.Sequence"])
if(length(ch1) >0) precAA[ch1] <- substr(tmp[ch1,"Annotated.Sequence"], 2, 2)
ch1 <- grep("^\\[[[:upper:]]\\]\\.", tmp[,"Annotated.Sequence"])
if(length(ch1) >0) postAA[ch1] <- substr(sub(".*\\.\\[","",tmp[ch1,"Annotated.Sequence"]), 1, 1)
## other peptide/protein info
#txtCol <- c("Modifications", "Master.Protein.Accessions","Positions.in.Master.Proteins","Master.Protein.Descriptions")
seqCol <- c("Sequence","Annotated.Sequence","Modifications", "Qvality.PEP","Qvality.q.value", "Number.of.Protein.Groups","Number.of.Proteins","Number.of.PSMs", # no1-8
"Master.Protein.Accessions","Positions.in.Master.Proteins","Modifications.in.Master.Proteins","Master.Protein.Descriptions", #no 9-12; last (ie 12th) missing in data from LN
"Number.of.Missed.Cleavages","Theo.MHplus.in.Da","Contaminant", # no 13-14
"Charge.by.Search.Engine.A5.Sequest.HT","XCorr.by.Search.Engine.A10.Sequest.HT","XCorr.by.Search.Engine.A5.Sequest.HT", "Top.Apex.RT.in.min" ) # no 15-18; 15 & 16 are currently not used, but use grep for 'Charge'
if(debug) {message(fxNa,"rPDP3z length(seqCol) ",length(seqCol))
rPDP3z <- list(tmp=tmp,fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,modifSensible=modifSensible)}
maSeCo1 <- match(seqCol, colnames(tmp))
maSeCo2 <- match(gsub("",".",seqCol), colnames(tmp))
maSeCo <- if(sum(is.na(maSeCo1)) > sum(is.na(maSeCo2))) maSeCo2 else maSeCo1 # switch betw R-friendly and std
#quanCo <- "Abundance.F62.Sample.na"
quantCol <- "^Abundance" # use as pattern
IdentTyCol <- "Found.in.Sample" # use as pattern
## need other example for extracting quantifications ?
#"Confidence.by.Search.Engine.Sequest.HT","Percolator.q.Value.by.Search.Engine.Sequest.HT","Percolator.PEP.by.Search.Engine.Sequest.HT", "XCorr.by.Search.Engine.Sequest.HT","Channel.Occupancy.in.Percent")
if(debug) {message(fxNa,"rPDP4 ")
rPDP4 <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible)}
.chColNa <- function(x, mat, renameTo=NULL, silent=FALSE, fxNa=NULL){
## check in 'matr' for column-name 'x', if required rename best hit (if no direct hit look using grep, then grep wo case); return corrected mat
chX <- x %in% colnames(mat)
if(all(chX)) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[match(x, colnames(mat))] <- renameTo # juste simple rename
} else { # try to localize column to use
chX <- grep(x, colnames(mat))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
chX <- grep(tolower(x), tolower(colnames(mat)))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",tolower(x),"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else stop("Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")") }
}
mat }
## EXTRACT PEPTIDE SEQUENCES
## extract peptide sequences
if(debug) {message(fxNa,"rPDP4a .. Ready to start extracting pep seq ")
rPDP4a <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible)}
if(is.na(maSeCo[1])) { if(is.na(maSeCo[2])) {if(!silent) message(fxNa,"Invalid type of data")#; pepSeq <- NULL
} else pepSeq <- tmp[,maSeCo[2]]
#else { pepSeq <- tmp[,maSeCo[2]] } #sub("\\.\\[A-Z\\]$", "", sub("^\\[A-Z\\]\\.", "", tmp[,maSeCo[2]])) }
} else pepSeq <- tmp[,maSeCo[1]]
fxPrecAA <- function(x) { ## separate/extract note of preceeding & following AA; take char vector, returns 3-column matrix
chPre <- grep("^\\[([[:upper:]]|\\-)\\]\\.", x) # has note of preceeding AA
chFoll <- grep("\\.\\[([[:upper:]]|\\-)\\]($|_)", x) # has note of following AA
out <- cbind(pep=sub("\\.\\[([[:upper:]]|\\-)\\]","", sub("^\\[([[:upper:]]|\\-)\\]\\.","", x)), prec=NA, foll=NA, modifSeq=NA)
if(length(chPre) >0) out[chPre,2] <- sub(".*\\[","", sub("\\]\\..+","", x[chPre])) # the preceeding AA
if(length(chFoll) >0) out[chFoll,3] <- sub("\\].*","", sub(".+\\.\\[","", x[chFoll]))
out }
annot1 <- fxPrecAA(pepSeq) # split
pepSeq <- annot1[,4] <- annot1[,1] # also used lateron for rownames of quant
if(modifSensible) { hasMod <- nchar(tmp[,maSeCo[3]]) >0
if(any(hasMod, na.rm=TRUE)) annot1[which(hasMod),4] <- gsub(" ","", paste(annot1[which(hasMod),1], tmp[which(hasMod),maSeCo[3]], sep="_")) # add separator & modification
}
#old#if(any(hasMod, na.rm=TRUE)) pepSeq[which(hasMod)] <- paste(pepSeq[which(hasMod)],tmp[which(hasMod),maSeCo[3]],sep="_") } # modification-separator
if(debug) {message(fxNa,"Done extracting pep seq rPDP4b"); rPDP4b <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,pepSeq=pepSeq,annot1=annot1,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible) }
## ANNOATION (peptide/protein oriented)
usColAnn <- maSeCo[c(3,6:7,9:14)]
if(any(is.na(usColAnn), na.rm=TRUE)) {
if(!silent) message(fxNa,"Note : ",sum(is.na(usColAnn))," protein-annotation columns (typically exported) were NOT found in this data-set !")
usColAnn <- wrMisc::naOmit(usColAnn) }
if(length(usColAnn) >0) { annot <- if(sum(!is.na(usColAnn)) >1) tmp[, wrMisc::naOmit(usColAnn)] else as.matrix(tmp[, wrMisc::naOmit(usColAnn)])
} else annot <- NULL
chPrecAA <- !is.na(annot1[,2])
chFollAA <- !is.na(annot1[,3])
if(any(chPrecAA)) if("precAA" %in% colnames(annot)) annot[,"precAA"] <- annot1[,2] else annot <- cbind(annot, prec.AA=annot1[,2])
if(any(chFollAA)) if("follAA" %in% colnames(annot)) annot[,"follAA"] <- annot1[,3] else annot <- cbind(annot, foll.AA=annot1[,3])
annot <- if(ncol(annot1) >3) cbind(annot, seq=annot1[,1], modifSeq=annot1[,4]) else cbind(annot, seq=annot1[,1])
chDuNa <- duplicated(annot1[,4])
if(any(chDuNa)) { if(!silent) message(fxNa,"Note : Some 'modifSeq' appear duplicated !!")
rownames(annot) <- wrMisc::correctToUnique(annot1[,4], silent=silent, callFrom=fxNa) # "modifSeq"
} else rownames(annot) <- annot1[,4] # "modifSeq"
usColCha <- grep("^charge", tolower(colnames(tmp))) # include charge
if(length(usColCha) >0) { char <- tmp[,usColCha]
if(length(usColCha) >1) { ## more than 1 cols, need to find best col : choose with fewest NAs
usColCha <- usColCha[which.min(colSums(is.na(char)))] }
if(debug) message(fxNa,"Column for Charge found & added", if(debug) " rPDP4c")
annot <- cbind(annot, Charge=tmp[,usColCha])
}
rm(annot1)
if(debug) {message(fxNa,"rPDP4c .. Done extracting peptide annotation ")
rPDP4c <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,pepSeq=pepSeq,annot=annot,maSeCo=maSeCo,modifSensible=modifSensible, pepSeq=pepSeq,hasMod=hasMod, annot=annot,quantCol=quantCol)}
## ABUNDANCE
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
## problem : extract '^Abundances*' but NOT 'Abundances.Count.*'
quantColIni <- quantCol <- grep(quantCol[1], colnames(tmp))
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
} else {
## pattern search (for abundance/quantitation data)
if(length(quantCol) <1) { quantCol <- "^Abundance"
if(!silent) message(fxNa,"Setting argument 'quantCol' to '^Abundance'")}
quantCol <- grep(quantCol, colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("^abundance", tolower(colnames(tmp)))
if(length(quantCol) <1) quantCol <- grep("Intensity$", colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("intensity$", tolower(colnames(tmp)))
quantColIni <- quantCol
if(length(quantCol) <1) stop(msg," specified in argument 'quantCol' !") }
## check for columns to exclude (like 'Abundances.Count.')
if(length(excluCol) >1) {
excCo <- unique(unlist(lapply(excluCol, grep, colnames(tmp))))
if(length(excCo) >0) {
quantCol <- quantCol[-wrMisc::naOmit(match(excCo, quantCol))]
if(length(quantCol) <1) stop(msg," (all match to 'excluCol')") else {
if(!silent) message(fxNa,"Removed ",length(quantColIni) -length(quantCol)," columns")}
}
}
if(length(quantCol) >0) { abund <- if(length(quantCol) >1) tmp[,quantCol] else {
matrix(tmp[,quantCol], ncol=1, dimnames=list(rownames(tmp),NULL))} # how to know column-name if single sample ?
rownames(abund) <- rownames(annot) #wrMisc::correctToUnique(pepSeq, silent=silent, callFrom=fxNa)
## check for columns to exclude (like 'Abundances.Count.')
if(length(excluCol)==1) {
excCo <- grep(excluCol, colnames(tmp))
if(any(duplicated(excCo, quantCol), na.rm=TRUE)) {
quantCol <- quantCol[-match(excCo, quantCol)]
if(length(quantCol) <1) stop(msg," (all match to 'excluCol')") else {
if(!silent) message(fxNa,"Removed ",length(quantColIni) -length(quantCol)," columns")}
}
}
abund <- as.matrix(tmp[,quantCol]) # abundance val
rownames(abund) <- rownames(annot)
if(debug) {message(fxNa,"rPDP8 .. "); rPDP8 <- list(tmp=tmp,annot=annot,specPref=specPref,abund=abund,quantCol=quantCol)}
## check & clean abudances
chNorm <- grep("\\.Normalized\\.", colnames(abund))
if(length(chNorm)*2 == ncol(abund)) { # in case Normalized makes 1/2 of columns use non-normalized
abund <- abund[,-chNorm]
}
colnames(abund) <- sub("^Abundances\\.Normalized\\._{0,1}|^abundances\\.Normalized\\._{0,1}|^Abundances{0,1}_{0,1}|^abundances{0,1}_{0,1}","",colnames(abund))
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
## remove heading 'X..' from headers (only if header won't get duplicated
### why here ??? 24mar23
chXCol <- grep("^X\\.\\.",colnames(annot))
if(length(chXCol) >0) {
newNa <- sub("^X\\.\\.","",colnames(annot)[chXCol])
chDu <- duplicated(c(newNa, colnames(annot)), fromLast=TRUE)
if(any(chDu, na.rm=TRUE)) newNa[which(chDu)] <- colnames(annot)[chXCol][which(chDu)]
colnames(annot)[chXCol] <- newNa }
## remove heading/tailing spaces (first look which columns might be subject to this treatment)
ch1 <- list(A=grep("^ +",annot[1,]), B=grep("^ +",annot[2,]), C=grep("^ +",annot[floor(mean(nrow(annot))),]), D=grep("^ +",annot[nrow(annot),]) )
chCo <- unique(unlist(ch1))
annot[,chCo] <- sub("^ +","",sub(" +$","",annot[,chCo])) # remove heading/tailing spaces
if(debug) { message(fxNa,"rPDP9 .. dim annot ",nrow(annot)," and ",ncol(annot)); rPDP9 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,contamCol=contamCol,infoDat=infoDat) }
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"rPDP9b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
if(debug) { message(fxNa,"rPDP9c") }
} else {
colnames(abund) <- sub("Abundance\\.F[[:digit:]]+\\.Sample\\.|Abundances\\.F[[:digit:]]+\\.Sample\\.","Sample.", colnames(abund))
}
} else abund <- NULL
## take log2 & normalize
if(length(abund) >0) {
quant <- if(utils::packageVersion("wrMisc") > "1.10") {
try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
} else try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE) #
if(debug) { message(fxNa,"rPDP9d .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")} }
## PD colnames are typically very cryptic, replace ..
if(length(sampleNames)==ncol(abund) && all(!is.na(sampleNames)) ) { # custom sample names given
colnames(abund) <- colnames(abund) <- sampleNames
if(length(counts) >0) colnames(counts) <- sampleNames }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
## META-DATA : read additional annotation & documentation files produced by PD
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="PD", path=path, abund=utils::head(abund), silent=silent, debug=debug, callFrom=fxNa)
}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="PD", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rPDP14"); rPDP14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="Proteome Discoverer",
refLi=refLi, refLiIni=nrow(abund), tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="ProteomeDiscoverer", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=match.call(),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot)
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readProtDiscovPeptides.R
|
#' Read Tabulated Files Exported By ProteomeDiscoverer At Protein Level
#'
#' Protein identification and quantification results from
#' \href{https://www.thermofisher.com/order/catalog/product/OPTON-30812}{Thermo ProteomeDiscoverer}
#' which were exported as tabulated text can be imported and relevant information extracted.
#'
#' @details
#' This function has been developed using Thermo ProteomeDiscoverer versions 2.2 to 2.5.
#' The format of resulting files at export also depends which columns are chosen as visible inside ProteomeDiscoverer and subsequently get chosen for export.
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment realted information.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants.
#'
#' The final output is a list containing as (main) elements: \code{$annot}, \code{$raw} and optional \code{$quant},
#' or returns data.frame with entire content of file if \code{separateAnnot=FALSE}.
#'
#' This function replaces the depreciated function \code{readProtDiscovFile}.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) custom column-names for quantification data (ProteomeDiscoverer does not automatically use file-names from spectra); this argument has priority over \code{suplAnnotFile}
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA
#' @param quantCol (character or integer) define ywhich columns should be extracted as quantitation data : The argument may be the exact column-names to be used, or if length=1
#' content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep};
#' if \code{quantCol='allAfter_calc.pI'} all columns to the right of the column 'calc.pI' will be interpreted as quantitation data
#' (may be useful with files that have been manually edited before passing to wrProteo)
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants marked by ProteomeDiscoverer.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final log2 (normalized) quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by ProteomeDiscoverer; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' fiNa <- "tinyPD_allProteins.txt.gz"
#' dataPD <- readProteomeDiscovererFile(file=fiNa, path=path1, suplAnnotFile=FALSE)
#' summary(dataPD$quant)
#'
#' @export
readProteomeDiscovererFile <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, read0asNA=TRUE, quantCol="^Abundance", annotCol=NULL, contamCol="Contaminant",
refLi=NULL, separateAnnot=TRUE, FDRCol=list(c("^Protein.FDR.Confidence","High"), c("^Found.in.Sample.","High")), gr=NULL, sdrf=NULL, suplAnnotFile=TRUE,
groupPref=list(lowNumberOfGroups=TRUE), specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"), plotGraph=TRUE, wex=1.6, titGraph="Proteome Discoverer",
silent=FALSE, debug=FALSE, callFrom=NULL) {
## read ProteomeDiscoverer exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readProteomeDiscovererFile")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
contamFilter <- TRUE # filter contaminants away
#excluCol <- c("^Abundance\\.Count","^Abundances\\.Count","^Abundance\\.Ratio","^Abundances\\.Ratio","^Abundance\\.Grouped","^Abundances\\.Grouped") # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- NULL # initialize
.corPathW <- function(x) gsub("\\\\", "/", x)
## check if path & (tsv) file exist
if(!grepl("\\.txt$|\\.txt\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="txt", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent, debug=debug)
## note : reading sample-setup from 'suplAnnotFile' at this place won't allow comparing if number of samples/columns corresponds to data; do after reading main data
if(debug) message(fxNa,"rpd0 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(paFi, stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a ProteomeDiscoverer-file") }
NULL
} else {
if(debug) { message(fxNa,"rpd1 ... dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col ")
rpd1 <- list(tmp=tmp,paFi=paFi,annotCol=annotCol,fileName=fileName) }
## locate & extract annotation
## default as R-friendly (convert standard cols later to this format)
if(length(annotCol) <1) annotCol <- c("Accession","Description","Gene","Gene.Name","Marked.as", "Number.of.Peptides","Number.of.PSMs","Number.of.Unique.Peptides","Number.of.AAs","Coverage.in.Percent")
## also ?? "Exp.q.value.Combined","Sum.PEP.Score"
if(debug) message(fxNa,"rpd1a")
## option for future: also extract column "MarkedAs"
PSMCol <- "^Number\\.of\\.PSMs." # pattern searching tag for PSM-data (was previously .. .by.Search.Engine)
PepCol <- "^Number\\.of\\.Peptides." # pattern searching tag for Number of peptides (was preiously .. .by.Search.Engine)
## future option : lateron rename columns called as "Description" to annotCol[2]
## below use explicit colnames "Accession","Description", rename if tolower() fits
.extrCol <- function(x, mat, asIndex=FALSE, notFound=NA, silent=FALSE, fxNa=NULL) {
## integrate to wrMisc ?
## look for column 'x' in matrix, extract index or content of column
if(length(x) >1) x <- x[1]
isNum <- grepl("^[[:digit:]]+$", x)
if(isTRUE(isNum)) {
x <- as.integer(x)
if(x > ncol(mat)) stop(fxNa,"Invalid column number chosen (",x," but only ",ncol(mat)," available)")
out <- if(isTRUE(asIndex)) x else matrix(mat[,x], ncol=1, dimnames=list(rownames(mat),colnames(mat)[x]))
} else {
ch1 <- colnames(mat) %in% x # direct match
if(any(ch1)) { if(sum(ch1) >1 && !silent) message(fxNa,"Note: ",sum(ch1)," columns at direct match, using 1st")
out <- if(isTRUE(asIndex)) which(ch1)[1] else matrix(mat[,which(ch1)[1]], ncol=1, dimnames=list(rownames(mat),colnames(mat)[which(ch1)[1]]))
} else {
ch1 <- grep(x, mat) # partial match (grep)
if(length(ch1) >0) { if(length(ch1) >1 && !silent) message(fxNa,"Note: ",length(ch1)," columns found by grep, using 1st")
out <- if(isTRUE(asIndex)) which(ch1)[1] else matrix(mat[,ch1[1]], ncol=1, dimnames=list(rownames(mat),colnames(mat)[ch1[1]]))
} else {
ch1 <- grep(tolower(x), tolower(mat)) # case-tolerant grep
if(length(ch1) >0) { if(length(ch1) >1 && !silent) message(fxNa,"Note: ",length(ch1)," columns found by grep (case-independent), using 1st")
out <- if(isTRUE(asIndex)) which(ch1)[1] else matrix(mat[,ch1[1]], ncol=1, dimnames=list(rownames(mat),colnames(mat)[ch1[1]]))
} else {
if(grepl("^error|^stop", notFound)) {
msg <- c(fxNa,"Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")")
stop(msg)
} else { if(is.null(notFound)) out <- NULL else {
out <- if(isTRUE(asIndex)) NA else matrix(NA, ncol=1, dimnames=list(rownames(mat),x))
if(!silent) if(grepl("^warning", notFound)) message(msg) else warning(msg)}}
}
}
}
}
out }
## check for essential colnames !
annot <- cbind(Accession=.extrCol(annotCol[1], tmp, silent=silent, fxNa=fxNa), # 1st typically 'Accession'
EntryName=NA, GeneName=NA, Description=.extrCol(annotCol[2], tmp, silent=silent, fxNa=fxNa), Species=NA, Contam=NA) #, iniDescription=NA)
#annot[,"iniDescription"] <- annot[,"Description"]
## add other cols form annotCol
if(debug) { message(fxNa,"rpd1b "); rpd1b <- list() }
#notAnnCol <- c( "Accession","Description","Gene","Number.of.Peptides","Number.of.PSMs","Number.of.Unique.Peptides")
if(length(annotCol) >2) { chSupCol <- match(annotCol[-(1:2)], colnames(tmp))
if(sum(is.na(chSupCol)) < length(annotCol) -2) annot <- cbind(annot, tmp[,wrMisc::naOmit(chSupCol)])
if(!silent) message(fxNa,"Adding supl annotation-columns ",wrMisc::pasteC(annotCol[wrMisc::naOmit(chSupCol)], quoteC="'")) }
## note 'EntryName' (eg 'UBE2C_HUMAN' not avail in PD)
## note 'GeneName' (eg 'UBE2C' can be extracted out of 'Description' after GN=
## extract GN
if(debug) { message(fxNa,"rpd1c "); rpd1c <- list(tmp=tmp,paFi=paFi,annotCol=annotCol,fileName=fileName) }
## add more annot cols
## check for R-friendly
Rfriendly <- !(length(grep("^X\\.\\.", colnames(tmp))) >0 || length(grep("Abundance\\.\\.F[[:digit:]]+\\.\\.", colnames(tmp)) >0))
annotColNo <- NULL # initialize for message rpd2
if(length(annotCol) >2) {
if(is.numeric(annotCol)) { message(fxNa,"Extraction by column index not yet finished")
} else {
annotColNo <- match(annotCol[3:length(annotCol)], colnames(tmp))
if(!Rfriendly) {
## assume that annotation is in first 19 columns (example until 16) !!
maxNCol <- min(ncol(tmp), 19)
## covert standard output to R-friendly
colnames(tmp[1:maxNCol]) <- sub("^X\\.\\.","Number.of.", colnames(tmp[1:maxNCol])) # sub("\\.\\.\\.$",".in.Percent",
## define table of specific terms to substitute ..
subst=cbind(std=c("Exp.q.value.Combined","Coverage....","MW..kDa.","calc..pI"),
rfriendly=c("Exp..q.value..Combined","Coverage.in.Percent","MW.in.kDa","calc.pI"))
chSubst <- match(subst[,1], colnames(tmp)[1:maxNCol])
if(any(!is.na(chSubst))) colnames(tmp)[wrMisc::naOmit(chSubst)] <- subst[wrMisc::naOmit(match(colnames(tmp)[1:maxNCol], subst[,1])), 2]
}
## extract contam separately & remove from annotCol
if(contamCol %in% colnames(tmp)) annot[,"Contam"] <- as.logical(gsub(" ","",tmp[,contamCol]))
annotCol <- annotCol[-which(annotCol %in% contamCol)]
}
}
if(length(annotCol) >2) {
annotColNo <- wrMisc::naOmit(match(annotCol[-(1:2)], colnames(tmp)))
if(length(annotColNo) >0) annot <- cbind(annot, tmp[,annotColNo])
}
if(debug) { message(fxNa,"rpd2 .. annotColNo : ", wrMisc::pasteC(annotColNo)) #," contamCol : ",wrMisc::pasteC(contamCol)," ")
rpd2 <- list(tmp=tmp,annot=annot,annotCol=annotCol,fileName=fileName,Rfriendly=Rfriendly)} # PSMCol=PSMCol,PepCol=PepCol,
## extract GN from 'Description' (like ...GN=LIPK )
chGN <- grep(" GN=[[:alpha:]]", annot[,"Description"])
if(length(chGN) >0) annot[chGN,"GeneName"] <- sub(" [[:upper:]]{2}=.*","", sub(".* GN=","", annot[chGN,"Description"]))
## extract Species from 'Description'
chSpe <- grep(" OS=[[:alpha:]]", annot[,"Description"])
if(length(chSpe) >0) annot[chSpe,"Species"] <- sub(" [[:upper:]]{2}=.*","", sub(".* OS=","", annot[chSpe,"Description"]))
## clean 'Description' entries: remove tailing punctuation or open brackets (ie not closed) at end of (truncated) fasta header
if(cleanDescription) {
if(debug) { message(fxNa,"rpd3a"); rpd3a <- list() }
chD <- grep(" [[:upper:]]{1,2}=", annot[,"Description"])
if(length(chD) >0) annot[chD,"Description"] <- sub(" [[:upper:]]{1,2}=.*","", annot[chD,"Description"]) # remove all add'l fields (eg OS=... OX=... GN=...)
}
if(debug) {message(fxNa,"rpd4 .. dim annot: ", nrow(annot)," li and ",ncol(annot)," cols; colnames : ",wrMisc::pasteC(colnames(annot))," ");
rpd4 <- list(annot=annot,tmp=tmp,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol)}
## identify user defined (main) groups of proteins (eg species, function, etc), create column 'SpecType'
if(length(specPref) >0) {
annot <- .extrSpecPref(specPref=specPref, annot=annot, useColumn=c("Species","EntryName","GeneName","Accession","Marked.as"), suplInp=tmp, silent=silent, debug=debug, callFrom=fxNa) #"Majority.protein.IDs","Fasta.headers",
chCont <- grep("^conta$|^contaminant", tolower(annot[,"SpecType"]))
if(length(chCont) >0) {
if(length(chCont)==nrow(tmp)) {warning(fxNa,"All proteins/peptides were marked as contaminants based on 'specPref' and will be removed ! Trouble ahead ?")
} else if(!silent) message(fxNa,"Note : ",length(chCont)," proteins/peptide(s) were marked (in addition) as contaminants based on 'specPref'")
annot[chCont,"Contam"] <- TRUE
}
}
if(debug) {message(fxNa,"rpd5"); rpd5 <- list(annot=annot,tmp=tmp,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol)}
## filter/remove contaminants unless in SpecTy
if(any(as.logical(annot[,"Contam"]), na.rm=TRUE)) { # filter contam
filt1 <- if("SpecType" %in% colnames(annot)) {
which( !as.logical(annot[,"Contam"]) | !grepl("^conta", tolower(annot[,"SpecType"]))) ## do not filter away proteins/peptides anotated by SpecTy (exept maked as 'conta')
} else which(!as.logical(annot[,"Contam"]))
if(length(filt1)==0) {
warning(fxNa,"All lines will be removed based on contaminant-filter (column '",contamCol,"') - TROUBLE AHEAD ?")
} else if(!silent) message(fxNa,"Note : ",nrow(tmp) - length(filt1)," contaminant protein(s)/peptide(s) will be removed, ",length(filt1)," remain")
tmp <- tmp[filt1,]
annot <- annot[filt1,]
}
if(debug) {message(fxNa,"rpd6b .. "); rpd6b <- list()}
## report by species
if(!silent) { chSp <- is.na(annot[,"Species"])
if(!all(chSp)) { tab <- table(annot[,"Species"])
if(any(chSp)) tab <- c(tab, na=sum(chSp))
tab <- rbind(names(tab),": ",tab," ; ")
tab[length(tab)] <- "" # no separator at last position
message(fxNa,"Count by 'Species' : ",apply(tab, 2, paste)) }} # all lines assigned
if(debug) {message(fxNa,"rpd7 .. "); rpd7 <- list(annot=annot,specPref=specPref,chSp=chSp,tmp=tmp,quantCol=quantCol )}
extraQuantColCheck <- TRUE # avoid 'Abundances.Count.' and 'Abundances.Normalized.' when pattern searching for quant-columns
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) <1) {
## default pattern search (for abundance/quantitation data)
quantCol <- "^Abundance"
if(debug) message(fxNa,"Setting argument 'quantCol' to '^Abundance'")
}
## construct quantColInd for index to use
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
ch1 <- match(quantCol, colnames(tmp)) # look for direct match (rarley ok)
chNA <- is.na(ch1)
if(all(chNA)) { # no direct match found
ch1 <- match(paste0(quantCol,".F.Sample"), sub("\\.F[[:digit:]]+\\.Sample",".F.Sample",colnames(tmp))) # match to simplified "Cancer01.F.Sample" instead of "Cancer01.F1.Sample"
chNA <- is.na(ch1)
if(all(chNA)) { # no composed match found
## run grep on each indiv quantCol
message(fxNa,"grep on each indiv quantCol - Not yet developed")
## develop further ?
stop(fxNa,"None of samples found !!")
}
}
if(any(chNA)) { message(fxNa,"Note : ",sum(chNA)," out of ",length(chNA)," samples NOT found !")
ch1 <- ch1[which(!chNA)]
}
} else { ## single value => use as search pattern
if(debug) {message(fxNa,"rpd7b"); rpd7b <- list(annot=annot,specPref=specPref,chSp=chSp,tmp=tmp,quantCol=quantCol) }
if(identical("allAfter_calc.pI", quantCol)) {
calcPIcol <- c("calc.pI","calc..pI","calc pI","calc_pI") # possible variations of 'calc.pI'
chCol <- calcPIcol %in% colnames(tmp)
if(any(chCol, na.rm=TRUE)) {
quantColInd <- (which(colnames(tmp)==calcPIcol[which(chCol)[1]]) +1) : ncol(tmp)
} else {quantColInd <- NULL; stop(fxNa,"Cannot find column called 'calc_pI' (or 'cal.pI); don't know which columns to choose as quantitation data !")}
} else {
## need to avoid "Abundances.Normalized.F1.Sample" or 'Abundances.Count.*'
ch1 <- grep(paste0(if(substr(quantCol,1,1) !="^") "^",quantCol,"\\.F[[:digit:]]+\\.Sample"), colnames(tmp)) # construct pattern code to "Abundance.F1.Sample"
if(length(ch1) <1) {
ch1 <- grep(paste0(if(substr(quantCol,1,1) !="^") "^",quantCol,"s\\.F[[:digit:]]+\\.Sample"), colnames(tmp)) # construct pattern code to "Abundances.F1.Sample"
if(length(ch1) <1) {
ch1 <- grep(paste0(if(substr(quantCol,1,1) !="^") "^",quantCol,"s{0,1}\\.F[[:digit:]]"), colnames(tmp)) # construct pattern code to "Abundances.F1"
## challange : one may pick too many columns (type "Abundances.Normalized.F1.Sample" or 'Abundances.Count.*')
if(length(ch1) <1) {
qC1 <- paste0(sub("\\\\.","", sub("S$","", quantCol)),"\\.S")
ch1 <- grep(qC1, colnames(tmp)) # construct pattern code to "Abundance.S"
excluPat <- "\\.Normalized|\\.Count|\\.Ratio|\\.Grouped"
if(length(ch1) >0) { ch2 <- grepl(excluPat, colnames(tmp)[ch1]) # avoid '.Normalized' (if not concerning all)
if(length(ch2) >0 && !all(ch2)) ch1 <- ch1[which(!ch2)]
} else {
ch1 <- grep(quantCol, colnames(tmp)) # use directly as pattern (may find too many)
if(length(ch1) >0) { ch2 <- grepl(excluPat, colnames(tmp)[ch1]) # avoid '.Normalized' (if not concerning all)
if(length(ch2) >0 && !all(ch2)) ch1 <- ch1[which(!ch2)]
} else {
warning(fxNa,"Unable to find any matches to '",quantCol,"' !") }
}
}
}
if(debug) message(fxNa,"Found ",length(ch1)," quantitation-columns", if(length(ch1) >0) c(" (eg ",wrMisc::pasteC(colnames(tmp)[utils::head(ch1)], quoteC="'"),")"))
}
}
}
quantColInd <- ch1
if(length(quantColInd) <1) stop(msg," ('",quantCol,"') NOT FOUND !")
## look for columns endig with 'intensity' ?
quantCol <- quantColInd
## extract quantitation
if(length(quantCol) >0) { abund <- if(length(quantCol) >1) tmp[,quantCol] else {
matrix(tmp[,quantCol], ncol=1, dimnames=list(rownames(tmp),NULL))}} # how to know column-name if single sample ?
#rownames(abund) <- wrMisc::correctToUnique(pepSeq, silent=silent, callFrom=fxNa)
if(debug) {message(fxNa,"rpd8 .. "); rpd8 <- list(annot=annot,specPref=specPref,abund=abund,quantCol=quantCol)}
## check & clean abudances
chNorm <- grep("\\.Normalized\\.", colnames(abund))
if(length(chNorm)*2 == ncol(abund)) { # in case Normalized makes 1/2 of columns use non-normalized
abund <- abund[,-chNorm]
}
colnames(abund) <- sub("^Abundances\\.Normalized\\._{0,1}|^abundances\\.Normalized\\._{0,1}|^Abundances{0,1}_{0,1}|^abundances{0,1}_{0,1}","",colnames(abund))
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
## remove heading 'X..' from headers (only if header won't get duplicated
chXCol <- grep("^X\\.\\.",colnames(annot))
if(length(chXCol) >0) {
newNa <- sub("^X\\.\\.","",colnames(annot)[chXCol])
chDu <- duplicated(c(newNa, colnames(annot)), fromLast=TRUE)
if(any(chDu, na.rm=TRUE)) newNa[which(chDu)] <- colnames(annot)[chXCol][which(chDu)]
colnames(annot)[chXCol] <- newNa }
## remove heading/tailing spaces (first look which columns might be subject to this treatment)
ch1 <- list(A=grep("^ +",annot[1,]), B=grep("^ +",annot[2,]), C=grep("^ +",annot[floor(mean(nrow(annot))),]), D=grep("^ +",annot[nrow(annot),]) )
chCo <- unique(unlist(ch1))
annot[,chCo] <- sub("^ +","",sub(" +$","",annot[,chCo])) # remove heading/tailing spaces
if(debug) { message(fxNa,"rpd9 .. dim annot ",nrow(annot)," and ",ncol(annot)); rpd9 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat) }
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"rpd9b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
if(debug) { message(fxNa,"rpd9c") }
} else {
colnames(abund) <- sub("Abundance\\.F[[:digit:]]+\\.Sample\\.|Abundances\\.F[[:digit:]]+\\.Sample\\.","Sample.", colnames(abund))
}
## (optional) filter by FDR (so far use 1st of list where matches are found from argument FDRCol)
if(length(FDRCol) >0) {
## stand : "Found.in.Sample...S32..F32..Sample" Rfriendly : "Found.in.Sample.in.S33.F33.Sample"
## stand : "X..Protein.Groups" Rfriendly : "Number.of.Protein.Groups"
chFDR <- lapply(FDRCol, function(x) {z <- grep(x[1], colnames(tmp)); if(length(z) ==ncol(abund)) z else NULL})
names(chFDR) <- sapply(FDRCol, function(x) x[1])
chFDR <- chFDR[which(sapply(chFDR, length) >0)]
if(length(chFDR) >0) {
i <- 1 # so far just use 1st instance matching
searchFor <- FDRCol[[which(sapply(FDRCol, function(x) x[1]) %in% names(chFDR)[i])]]
filtFdrHi <- tmp[,chFDR[[i]]] == searchFor[2] # find occurances of best tag 'High'
roSu <- rowSums(filtFdrHi) <1
if(all(roSu) && !silent) message(fxNa,"NONE of the lines/proteins had any '",searchFor[1],"' in column(s) '",searchFor[2],"' !! This is probably not a good filtering-parameter, ignoring")
if(any(roSu, na.rm=TRUE) && !all(roSu)) { if(!silent) message(fxNa,"Removing ",sum(roSu)," lines/proteins without ANY '",searchFor[2],"' in columns '",searchFor[1],"'")
rmLi <- -1*which(roSu)
annot <- annot[rmLi,]
abund <- abund[rmLi,]
filtFdrHi <- filtFdrHi[rmLi,] # useful lateron ?
tmp <- tmp[rmLi,] }
}
}
if(debug) { message(fxNa,"rpd11 .. length(FDRCol) ",length(FDRCol)," dim annot ",nrow(annot)," and ",ncol(annot))}
## rownames : check if Accession is unique
chAc <- duplicated(annot[,"Accession"], fromLast=FALSE)
if(any(chAc, na.rm=TRUE)) {
getLiToRemove <- function(x,useCol=c("rowNo","Contaminant","SpecType")) { # return index for all lines to remove from matrix ...
if(is.data.frame(x)) x <- as.matrix(x)
spe <- grep("^species", x[,useCol[3]])
if(length(spe) >0) {
rmLi <- x[which(1:nrow(x) != spe[1]), useCol[1]]
} else { ## look for any lines marked as Contaminant="true", then mark other(s) for remove
rmLi <- if(any(tolower(x[,useCol[2]])=="true", na.rm=TRUE)) x[which(tolower(x[,useCol[2]]) !="true") ,useCol[1]] }
as.integer(rmLi) }
## check if one of duplicated lines is marked as Contaminant -> remove non-contaminant, BUT NOT 'speciesX' ?
if(contamFilter) { # ready to correct (if possible) duplicated 'Accession' entries
## elaborate procedure for removing duplicate Accession lines : 'fuse' annot where no NA & use quantification-line with fewest NAs
## need to separate all groups of repeated IDs & treat separately
annot <- cbind(annot, rowNo=1:nrow(tmp))
duplAc <- unique(annot[which(chAc), "Accession"])
## need to remove duplicated lines which are not marked as Contaminant="True"
chAc2 <- duplicated(annot[,"Accession"], fromLast=TRUE)
rmLi <- chAc | chAc2
## find lines where is not Contaminant="True" (and keep contaminant)
annot <- cbind(annot, iniIndex=1:nrow(annot), nNA=rowSums(is.na(abund)))
useCol2 <- c("Accession","GeneName","Species","Contam","SpecType","Description","Contaminant", "iniIndex","nNA") # the last 2 are added within function
useCol2 <- wrMisc::naOmit(match(useCol2,colnames(annot)))
abund <- cbind(abund,iniIndex=1:nrow(abund))
rmAbund <- as.integer(unlist(by(abund[which(rmLi),], annot[which(rmLi),"Accession"], function(x) x[(1:nrow(x))[-which.min(rowSums(is.na(x)))],ncol(x)])))
rmAnnot2 <- as.integer(unlist(by(annot[which(rmLi),], annot[which(rmLi),"Accession"], function(x) x[2:nrow(x),ncol(x) -1])))
rmAnnot <- which(chAc)
for(j in unique(annot[which(rmLi),useCol2][1])) { # need loop for 'fusing' columns with fewes NAs and recording which lines should be removed
x <- annot[which(annot[,"Accession"] %in% j),]
useLi <- apply(0+is.na(x),2,which.min)
if(any(useLi >1, na.rm=TRUE)) for(i in 2:max(useLi)) annot[as.integer(x[1,"iniIndex"]),which(useLi==i)] <- annot[as.integer(x[i,"iniIndex"]),which(useLi==i)]
}
if(length(rmAnnot) >0) {annot <- annot[-rmAnnot,]; tmp <- tmp[-rmAnnot,]
abund <- abund[-rmAbund,]
if(!silent) message(fxNa,"Removing ",length(rmAnnot)," lines due to duplicated Accessions (typically due to contaminants)")
}
annot <- annot[,-ncol(annot) +(1:0)] # remove extra columns (ie "iniIndex","nNA")
abund <- abund[,-ncol(abund)] # remove extra column (ie "iniIndex")
chAc <- duplicated(annot[,"Accession"], fromLast=FALSE)
} }
if(debug) { message(fxNa,"rpd11b .. dim abund ",nrow(abund)," and ",ncol(abund)); rpd11b <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat)}
## Now we are ready to add unique rownames
if(any(chAc, na.rm=TRUE)) {
if(!silent) message(fxNa,sum(chAc)," (out of ",length(chAc),") cases of duplicated 'Accession' exist, adding extensions for use as rownames")
rownames(tmp) <- rownames(annot) <- wrMisc::correctToUnique(annot[,"Accession"], sep="_", atEnd=TRUE, callFrom=fxNa)
} else rownames(abund) <- rownames(annot) <- annot[,"Accession"]
## optional/additional counting results (PSM, no of peptides)
PSMCol <- if(length(PSMCol) ==1) grep(PSMCol, colnames(tmp)) else NULL
PepCol <- if(length(PepCol) ==1) grep(PepCol, colnames(tmp)) else NULL
usTy <- c("PSM","NoOfPeptides")[which(c(length(PSMCol), length(PepCol)) ==ncol(abund))]
if(length(usTy) >0) {
counts <- array(NA,dim=c(nrow(abund),ncol(abund),length(usTy)), dimnames=list(rownames(abund),colnames(abund),usTy))
if("PSM" %in% usTy) counts[,,"PSM"] <- as.matrix(tmp[,PSMCol])
if("NoOfPeptides" %in% usTy) counts[,,"NoOfPeptides"] <- as.matrix(tmp[,PepCol])
} else counts <- NULL
if(debug) {message(fxNa,"rpd12 .. "); rpd12 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,refLi=refLi,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 && identical(refLiIni, "mainSpe")) refLi <- which(annot[,"SpecType"] =="mainSpecies") # fix compatibility problem 'mainSpe' to 'mainSpecies'
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(debug) { message(fxNa,"rpd13 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ");
rpd13 <- list(annot=annot,tmp=tmp,abund=abund,quant=quant,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat, refLi=refLi)}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="PD", path=path, abund=utils::head(quant), groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rpd13b .."); rpd13b <- list(annot=annot,tmp=tmp,abund=abund,quant=quant,setupSd=setupSd,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,Rfriendly=Rfriendly,contamCol=contamCol,PSMCol=PSMCol,PepCol=PepCol,infoDat=infoDat, refLi=refLi)}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="PD", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rpd14"); rpd14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="Proteome Discoverer",
refLi=refLi, refLiIni=refLiIni, notLogAbund=TRUE, tit=titGraph, las=NULL, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="ProteomeDiscoverer", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot)
}
}
#' Additional/final chek and adjustments to sample-order after readSampleMetaData()
#'
#' This (low-level) function performs an additional/final chek & adjustments to sample-names after readSampleMetaData()
#'
#' @param abund (matrix or data.frame) abundance data, only the colnames will be used
#' @param setupSd (list) describing sammple-setup, typically produced by from package wrMisc
#' @param gr (factor) optional custom information about replicate-layout, has priority over setuoSd
#' @param sampleNames (character) custom sample-names, has priority over abund and setuoSd
#' @param quantMeth (character) 2-letter abbreviation of name of quantitation-software (eg 'MQ')
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns an enlaged/updated list 'setupSd' (set setupSd$sampleNames, setupSd$groups)
#' @seealso used in \code{readProtDiscovererFile}, \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' set.seed(2021)
#' @export
.checkSetupGroups <- function(abund, setupSd, gr=NULL, sampleNames=NULL, quantMeth=NULL, silent=FALSE, callFrom=NULL, debug=FALSE) {
## additional/final chek & adjustments to sample-names after readSampleMetaData()
## examine
## returns enlaged/updated list 'setupSd' (set setupSd$sampleNames, setupSd$groups)
## assume 'abund' to be valid matrix of data.frame !!
## 'setupSd' as list produced by readSampleMetaData()
## 'gr' .. (char vector or factor) designating who should be considered as replicate/same level, as optional custom entry (has prior over automatic groups/levels)
## 'sampleNames' .. optional custom entry for sample names (has prior over automatic names)
## 'quantMeth' .. (char vect, length=1) design method via abbreviation like 'PD' for ProteomeDiscoverer, 'MQ' for MaxQuant, 'PL' for Proline, etc (used for automatic trimming of default column/sample-names)
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".checkSetupGroups")
delPat <- "_[[:digit:]]+$|\\ [[:digit:]]+$|\\-[[:digit:]]+$" # remove enumerators, ie tailing numbers after separator
rawExt <- "\\.raw$|\\.Raw$|\\.RAW$" # paste(paste0("\\.",c("Raw","raw","RAW"),"$"), collapse="|")
.corPathW <- function(x) gsub("\\\\", "/", x)
extrSamNaSetup <- function(setS, meth) {
## extract (use-given) sampleNames out of setupSd$annotBySoft (colnames may depend on quant-method)
if(!any(c("PD","MQ","PL","FP") %in% meth, na.rm=TRUE)) meth <- "other"
switch(meth, PD = NULL,
MQ = setupSd$annotBySoft$Experiment , PL = setupSd$annotBySoft$Experiment, FP = setupSd$annotBySoft$Experiment, other=NULL)}
defColNa <- function(colN, meth) { # check if colN may represent default colnames (ie not useful since wo any indication about samples)
## note MQ : requires setExperiment to be defined by user, set each sample as different for getting quant by sample !
## note FP : requires setExperiment to be defined by user (defining different bioreplictates sufficient for getting quant by sample)
if(!any(c("PD","MQ","PL","FP") %in% meth, na.rm=TRUE)) meth <- "other"
switch(meth, PD = all(grepl("F[[:digit:]]+\\.Sample$", colN), na.rm=TRUE),
MQ = FALSE , PL = FALSE, FP = FALSE, other=FALSE)}
.extrColNames <- function(abun, meth, silent=FALSE, callFrom=NULL, debug=FALSE) { ## get sampleNames from colnames of abund & clean
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".extrColNames")
# abun=abund; meth=quantMeth
grou <- grou2 <- NULL
colNa2 <- sub(rawExt,"", .corPathW(colnames(abun)))
colNa <- basename(colNa2) # trim to filename
chDu <- duplicated(colNa)
if(debug) {message(fxNa,"eCN1"); eCN1 <- list(abun=abun,meth=meth,colNa2=colNa2,colNa=colNa,chDu=chDu)}
if(any(chDu)) {
if(debug) message(fxNa,"Some stripped filenames appear duplicated, need to keep with path ...")
colNa <- colNa2
chDu <- duplicated(colNa) }
if(any(chDu) && !silent) message(fxNa,"Some filenames appear duplicated !! (eg ",wrMisc::pasteC(utils::head(unique(colNa[which(chDu)]), 3))," )")
if(debug) {message(fxNa,"eCN2"); eCN2 <- list(abun=abun,meth=meth,colNa2=colNa2,colNa=colNa,chDu=chDu)}
if(is.null(colNa) || sum(duplicated(colNa)) ==length(colNa) -1) { sampleNames <- NULL
if(debug) message(fxNa,"All colnames of abund are empty or identical ! (can't use to figure out pattern of replicates/levels)")
} else {
## colNa seem usable
if(any(c("FP") %in% meth)) colNa <- sub("MaxLFQ Intensity$|Intensity$","", colNa)
if(any(c("MQ") %in% meth)) colNa <- sub("^LFQ Intensity","", colNa)
if("PL" %in% meth) colNa <- sub("^abundance|^Abundance","", colNa)
colNa <- sub(" +$|\\.+$|_+$|\\-+$","", colNa) # remove tailing separators (' ','.','_','-')
colNa <- sub("^ +|^\\.+|^_+|^\\-+","", colNa) # heading tailing separators
chDu <- duplicated(colNa)
if(!silent && any(chDu)) message(fxNa,"NOTE : ",sum(chDu)," DUPLICATED colnames for abund !! (eg ",wrMisc::pasteC(utils::head(unique(colNa[which(chDu)]), 3))," )")
if(debug) {message(fxNa,"eCN3"); eCN3 <- list()}
## now address group names/levels
if("PD" %in% meth) {
grou2 <- sub("rep[[:digit:]]+$","", colnames(abun))
grou2 <- sub(" +$|\\.+$|_+$|\\-+$","", grou2) # remove tailing separators (' ','.','_','-')
if(all(grepl("^\\.F[[:digit:]]+\\.Sample$", colnames(abun)))) grou2 <- NULL # PD default names like '.F1.Sample', '.F2.Sample' etc
} else {
if("PL" %in% meth) colNa <- sub("\\.mzDB\\.t\\.xml$","", colNa)
colNa <- sub("\\.raw$|\\.RAW$","", colNa)
sep <- c("_","\\-","\\.")
#sub("\\.mzDB|\\.t\\.xml","",colNa)
rmTx <- paste0(c("Sample","Samp","Replicate","Rep"),"$")
rmTx <- paste(paste0(rep(sep, each=length(rmTx)), rep(rmTx, length(sep))), collapse="|")
grou2 <- sub(rmTx,"",sub(delPat,"", colNa)) # remove tailing enumerators..
}
if(debug) message(fxNa,"Based on colnames(abund) : ",length(unique(grou2))," levels for ",ncol(abun)," samples")
}
if(debug) {message(fxNa,"eCN4 done"); eCN4 <- list()}
list(sampleNames=colNa, grou=grou2) }
## finish groups of replicates & annotation setupSd
if(debug) { message(fxNa,"cSG0"); cSG0 <- list(gr=gr,abund=abund,sampleNames=sampleNames, setupSd=setupSd) } # sampleNames=sampleNames
## grou2 ... colnames modified to pattern ; grou .. pattern as index, + names (level names)
colNa <- grou <- grou2 <- NULL
iniSaNa <- iniGr <- FALSE
## if valid user-defied sampleNames is given => use
if(length(sampleNames) != ncol(abund)) {
if(debug && length(sampleNames) >0) message(fxNa,"Invalid entry of 'sampleNames' (length= ",length(sampleNames)," but ",ncol(abund)," expected) ...ignoring")
sampleNames <- NULL
} else { iniSaNa <- TRUE
setupSd$sampleNames <- sampleNames }
if(debug) {message(fxNa,"cSG1"); cSG1 <- list()}
## if valid user-defied grouping is given => use
if(length(gr) != ncol(abund)) {
if(debug && length(gr) >0) message(fxNa,"Invalid entry of 'gr' (length= ",length(gr)," but ",ncol(abund)," expected) ...ignoring")
gr <- NULL
} else {
## check if setupSd has 'prioritized' grouping
if("groups" %in% names(setupSd)) {
setupSd$level <- setupSd$groups
} else {
iniGr <- TRUE
setupSd$level <- match(gr, unique(gr))
setupSd$groups <- names(setupSd$level) <- gr } }
if(debug) {message(fxNa,"cSG2"); cSG2 <- list()}
defaultColNa <- defColNa(colN=colnames(abund), meth=quantMeth)
saNa <- .extrColNames(abund, meth=quantMeth, silent=silent,callFrom=fxNa,debug=debug) # sampleNames from colnames of abund & clean
if(debug) {message(fxNa,"cSG3"); cSG3 <- list(sampleNames=sampleNames,gr=gr,abund=abund,iniSaNa=iniSaNa,iniGr=iniGr,setupSd=setupSd,defaultColNa=defaultColNa,saNa=saNa)}
## sampleNames/colnames : use orig colnames if avail (priority to colnames)
#colNa <- if(defaultColNa) NULL else sub(rawExt,"", colnames(abund)) # redundant to saNa$sampleNames
if(length(setupSd$level) ==ncol(abund)) gr <- setupSd$lev <- setupSd$level else {
if(length(setupSd$groups) ==ncol(abund)) gr <- setupSd$lev <- setupSd$groups }
if(debug) { message(fxNa,"cSG3b"); cSG3b <- list()}
#if("lev" %in% names(setupSd)) {
if(length(setupSd$lev) ==ncol(abund)) {
## thus, we do have setupSd
## sampleNames/colnames : use orig colnames if avail (priority to colnames)
if(!iniSaNa && !defaultColNa && length(saNa$sampleNames) ==ncol(abund)) sampleNames <- saNa$sampleNames
if(!iniSaNa && length(setupSd$sampleNames) ==ncol(abund)) sampleNames <- setupSd$sampleNames # setupSd$sampleNames has prioroty (if defined)
## compare grouping of orig colnames to sdrf ?
if(length(sampleNames) !=ncol(abund)) {
## get sampleNames from setupSd (as far as possible)
saNa2 <- extrSamNaSetup(setupSd, quantMeth) # from setupSd$annotBySoft (by quant method)
sampleNames <- if(length(saNa2) ==ncol(abund)) saNa2 else if(length(setupSd$sdrfDat$comment.data.file.)==ncol(abund)) sub(rawExt,"", setupSd$sdrfDat$comment.data.file.)
}
## now for gr
if(length(gr) != ncol(abund)){
if(!iniSaNa && !defaultColNa && length(saNa$grou) ==ncol(abund)) gr <- saNa$grou
if(!iniSaNa && length(setupSd$groups) ==ncol(abund)) gr <- setupSd$groups # setupSd$groups has prioroty (if defined)
}
if(debug) {message(fxNa,"cSG4a"); cSG4a <- list()}
} else {
## (no setupSd)
## get sampleNames from abund (as far as possible)
if(iniSaNa && length(sampleNames) != ncol(abund)) {
if(defaultColNa) { if(length(gr)==ncol(abund)) sampleNames <- wrMisc::correctToUnique(gr) # case of PD : use gr if suitable
} else sampleNames <- saNa$sampleNames # other use colnames of abund
}
## now for gr (no setupSd)
if(!iniGr) {
if(!defaultColNa) { ## standard case (eg MQ)
## guess gr from colnames
if(debug) message(fxNa,"Guess 'gr' from colnames, ",quantMeth,"")
gr <- saNa$grou
} else { ## (PD:) no way to guess groups
if(!silent) message(fxNa,"Difficulty to identify groups of replicates (no setupSd) in case of absence of metadata by method ",quantMeth,"")
}
}
if(debug) {message(fxNa,"cSG4b"); cSG4b <- list()}
}
## case of PD : check if setupSd$annotBySoft$File.Name usable
if(defaultColNa && length(sampleNames) != ncol(abund)) {
chOr <- NA # initialize
if(length(setupSd$annotBySoft$File.Name) ==ncol(abund)) chOr <- match(setupSd$annotBySoft$File.Name, setupSd$sdrfDat$comment.data.file.)
if(!any(is.na(chOr))) {
if(!all(chOr ==1:ncol(abund), na.rm=TRUE)) {
sampleNames <- sub("\\.raw$|\\.RAW$","", setupSd$annotBySoft$File.Name)
## try extracting pattern of replicates
colNa <- sub("\\.raw$|\\.RAW$","", basename(sub(rawExt,"", .corPathW(colnames(sampleNames)))))
sep <- c("_","\\-","\\.")
rmTx <- paste0(c("Sample","Samp","Replicate","Rep"),"$")
rmTx <- paste(paste0(rep(sep, each=length(rmTx)), rep(rmTx, length(sep))), collapse="|")
gr <- sub(rmTx,"", sub(delPat,"", colNa)) # remove tailing enumerators..
if(debug) message(fxNa,"cSG4c Method ",quantMeth," : Extracted ",length(unique(gr)), " groups of replicates based on meta-data")
}
} }
if(debug) {message(fxNa,"cSG5"); cSG5 <- list(sampleNames=sampleNames,gr=gr,abund=abund,iniSaNa=iniSaNa,iniGr=iniGr,setupSd=setupSd,defaultColNa=defaultColNa,saNa=saNa)}
if(length(sampleNames) != ncol(abund) && defaultColNa & !silent) message(fxNa,"Still UNABLE to find suitable colnames")
if(length(gr) != ncol(abund) && defaultColNa && debug) message(fxNa,"Still UNABLE to find suitable groups")
## set result to object
if(!is.list(setupSd)) { if(length(setupSd) >0) warning(fxNa,"BIZZARE format of 'setupSd', it's content will be lost")
setupSd <- list()}
setupSd$sampleNames <- sampleNames
setupSd$groups <- gr
setupSd
}
#' Generic plotting of density distribution for quantitation import-functions
#'
#' This (low-level) function allows (generic) plotting of density distribution for quantitation import-functions
#'
#' @param abund (matrix or data.frame) abundance data, will be plottes as distribution
#' @param quant (matrix or data.frame) optional additional abundance data, to plot 2nd distribution, eg of normalized data
#' @param custLay (matrix) describing sammple-setup, typically produced by
#' @param normalizeMeth (character, length=1) name of normalization method (will be displayed in title of figure)
#' @param softNa (character, length=1) name of quantitation-software (typically 2-letter abbreviation, eg 'MQ')
#' @param refLi (integer) to display number reference lines
#' @param refLiIni (integer) to display initial number reference lines
#' @param notLogAbund (logical) set to \code{TRUE} if \code{abund} is linear but should be plotted as log2
#' @param figMarg (numeric, length=4) custom figure margins (will be passed to \code{\link[graphics]{par}}), defaults to c(3.5, 3.5, 3, 1)
#' @param tit (character) custom title
#' @param las (integer) indicate orientation of text in axes
#' @param cexAxis (numeric) size of numeric axis labels as cex-expansion factor (see also \code{\link[graphics]{par}})
#' @param nameSer (character) custom label for data-sets or columns (length must match number of data-sets)
#' @param cexNameSer (numeric) size of individual data-series labels as cex-expansion factor (see also \code{\link[graphics]{par}})
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns logical value (if data were valid for plotting) and produces a density dustribution figure (if data were found valid)
#' @seealso used in \code{readProtDiscovererFile}, \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' set.seed(2018); datT8 <- matrix(round(rnorm(800) +3,1), nc=8, dimnames=list(paste(
#' "li",1:100,sep=""), paste(rep(LETTERS[1:3],c(3,3,2)),letters[18:25],sep="")))
#' .plotQuantDistr(datT8, quant=NULL, refLi=NULL, tit="Synthetic Data Distribution")
#' @export
.plotQuantDistr <- function(abund, quant, custLay=NULL, normalizeMeth=NULL, softNa=NULL, refLi=NULL, refLiIni=NULL, notLogAbund=NA, figMarg=c(3.5, 3.5, 3, 1), tit=NULL, las=NULL, cexAxis=0.8, nameSer=NULL, cexNameSer=NULL, silent=FALSE, callFrom=NULL, debug=FALSE) {
## generic plotting of densirt distribution for quantitation import-functions
## assume 'abund' (raw, non-normalized) and 'quant' (final normalized) to be valid matrix of data.frame !!
## 'custLay' ..(matrix) for layout()
## 'normalizeMeth' (character)
## 'softNa' .. (char vect, length=1) design method, used in display only
## 'refLi' .. (integer) reference line
## 'refLiIni' .. (integer) initial reference line(s)
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".plotQuantDistr")
oparMar <- graphics::par("mar") # old margins, for rest after figure
oparLayout <- graphics::par("mfcol") # old layout, for rest after figure
on.exit(graphics::par(mar=oparMar, mfcol=oparLayout)) # restore old mar settings
if(debug) {message(fxNa,"pQD0 .. length custLay ", length(custLay)); pQD0 <- list(abund=abund,quant=quant,custLay=custLay,normalizeMeth=normalizeMeth,softNa=softNa,refLi=refLi)}
plotGraph <- TRUE # useful ? (to report if plot can be drawn as output ?)
## check if abund & quant are redundant (while normalizeMeth="none")
abundQuantRed <- FALSE # see if abund and quant are redundant (ie no need to plot twice)
if("none" %in% normalizeMeth && length(abund) >0 && length(quant) >0) {
if(identical(quant, abund) || identical(quant, log2(abund))) { abundQuantRed <- TRUE
if(debug) message(fxNa,"No need for 2nd plot, 'abund' and 'quant' are identical ..") }
}
if(length(custLay) >0) graphics::layout(custLay) else {
if(!identical(normalizeMeth,"none") && length(quant) >0 && !abundQuantRed) { ch1 <- try(graphics::layout(1:2), silent=TRUE)
if(inherits(ch1, "try-error")) message(fxNa,"Problem with figure, Need to restore layout ..") else if(debug) message(fxNa,"Setting layout to 1:2") }}
if(debug) { message(fxNa,"pQD1"); pQD1 <- list(abund=abund,quant=quant,custLay=custLay,normalizeMeth=normalizeMeth,softNa=softNa,refLi=refLi)}
if(length(las) != 1 || !is.integer(las)) {
ch1 <- c(if(length(abund) >0) ncol(abund) >7 || stats::median(nchar(colnames(abund)), na.rm=TRUE) >8 else FALSE,
if(length(quant) >0) ncol(quant) >7 || stats::median(nchar(colnames(quant)), na.rm=TRUE) >8 else FALSE)
las <- if(any(ch1)) 2 else 1 }
if(length(figMarg) != 4 || !is.numeric(figMarg)) {figMarg <- c(3.5, 3.8, 3, 1) # mar: bot,le,top,ri
if(debug) message(fxNa,"Invalid entry for argument 'figArg' (must be umeric of length=4), setting to default")}
ch1 <- try(graphics::par(mar=figMarg), silent=TRUE)
if(inherits(ch1, "try-error")) message(fxNa,"Problem with figure, Need to restore mar ..")
if(is.null(tit)) tit <- paste(softNa," quantification ")
titSu <- if(length(refLi) >0) paste0(c(if(length(refLiIni) ==1) c("'",refLiIni,"'") else c(" by ",length(refLi)," selected lines")),collapse="") else NULL #
usePa <- c("wrGraph","sm")
misPa <- !sapply(usePa, function(pkg) requireNamespace(pkg, quietly=TRUE))
titQ <- if(length(abund) >0) paste(tit, "(initial)",sep=" ") else tit
.findSuplLi <- function(x, n=3) {
if(length(x) >2) pretty(stats::quantile(x, c(0.15, 0.85), na.rm=TRUE), n) else NULL
}
## check log-status, ie notLogAbund
if(length(abund) >0 && length(quant) >0) {
if(length(notLogAbund) <1) { notLogAbund <- NA
if(!silent) message(fxNa,"Invalid entry for 'notLogAbund', setting to default =NA") }
if(is.na(notLogAbund)) {
dAb <- diff(range(abund, na.rm=TRUE))
dQa <- diff(range(quant, na.rm=TRUE))
sugLogAbun <- dAb > 2^(signif(dQa,3) -2) && dAb > 3*dQa
if(debug) message(fxNa,"Trying to figure out if log2 should be taken for 'abund': ",dAb > 2^(signif(dQa,3) -2)," and ", dAb > 3*dQa)
if(sugLogAbun) {
notLogAbund <- TRUE } # abund is very likely linear scale, while quant is likely log-scale
} }
msg <- c("Invalid entry for 'notLogAbund',","Unknown log-status,"," assuming data is log2, ie notLogAbund=FALSE")
if(length(notLogAbund) <1 && !silent) { notLogAbund <- FALSE
if(!silent) message(fxNa, msg[-2])
} else if(any(is.na(notLogAbund))) { notLogAbund <- notLogAbund[which(is.na(notLogAbund))] <- FALSE
if(!silent) message(fxNa, msg[-1]) }
colNa <- if(length(abund) >0) colnames(abund) else colnames(quant)
if(length(colNa) >0) {
ncharAx <- stats::quantile(nchar(colNa), c(0.5,0.9), na.rm=TRUE)
if(length(cexNameSer) !=1 || any(is.na(cexNameSer))) cexNameSer <- sort(round( c(7/ncharAx[2], 1.9/log(length(colNa)), 1, 0.48), 2))[2] # adjust cex series-names to length and ncol
suplLineA <- if(length(abund) >0) {.findSuplLi(if(isTRUE(notLogAbund)) log2(abund) else abund)} else NA
suplLineQ <- if(length(quant) >0) .findSuplLi(quant) else NA
} else { ncharAx <- cexNameSer <- suplLineA <- suplLineQ <- NA
if(debug) message(fxNa," Note : BOTH 'abund' and 'quant' are EMPTY - NOTHING TO PLOT")}
## check axis cex
if(debug) { message(fxNa,"pQD2 .. misPa ", wrMisc::pasteC(misPa,quoteC="'"),"; abund is linear ",notLogAbund); pQD2 <- list(abund=abund,quant=quant,custLay=custLay,normalizeMeth=normalizeMeth,softNa=softNa,refLi=refLi,tit=tit,titQ=titQ,suplLineA=suplLineA,suplLineQ=suplLineQ,ncharAx=ncharAx) }
chNeg <- sum(abund <0, na.rm=TRUE)
if(chNeg >0 && notLogAbund) { notLogAbund <- FALSE
if(!silent) message(fxNa,"Data suggest taking log2 might be useful, but due to presence of ",chNeg," negative values this is not possible")
}
if(any(misPa, na.rm=TRUE)) {
if(!silent) message(fxNa,"Please install package(s) ", wrMisc::pasteC(usePa[which(misPa)])," from CRAN for drawing vioplots")
if(length(abund) >0) {
## wrGraph not available : simple boxplot
ch1 <- try(graphics::boxplot(if(isTRUE(notLogAbund)) suppressWarnings(log2(abund)) else abund, main=titQ, las=1, outline=FALSE), silent=TRUE)
if(inherits(ch1, "try-error")) {plotGraph <- FALSE; if(!silent) message(fxNa,"UNABLE to draw boxplot of distribution !! ", sub("^Error in",":",ch1))} else {
graphics::abline(h=suplLineA, lty=2, col=grDevices::grey(0.6))} }
## plot normalized
if(length(quant) >0 && !abundQuantRed) {
if(identical(normalizeMeth,"none") || length(quant) <0) {
if(debug) {message(fxNa,"pQD3 .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")}
ch1 <- try(graphics::boxplot(quant, main=paste(tit," (",normalizeMeth,"-normalized",titSu,")"), las=1, outline=FALSE), silent=TRUE)
if(inherits(ch1, "try-error")) if(!silent) message(fxNa,"UNABLE to draw boxplot of normalized distribution !! ", sub("^Error in",":",ch1)) else {
graphics::abline(h=suplLineQ, lty=2, col=grDevices::grey(0.6)) } } }
} else { # wrGraph and sm are available
if(debug) { message(fxNa,"pQD4 draw vioplotW , abund is linear ",notLogAbund); pQD4 <- list(abund=abund,quant=quant,tit=tit,normalizeMeth=normalizeMeth,softNa=softNa,refLi=refLi,titQ=titQ,notLogAbund=notLogAbund,titSu=titSu,las=las, cexAxis=cexAxis, nameSer=nameSer, cexNameSer=cexNameSer,notLogAbund=notLogAbund,abundQuantRed=abundQuantRed ) }
if(length(abund) >0) {
if(length(chNeg) <1) abund <- suppressWarnings(log2(abund)) # presume as true 'raw', ie NOT log2
if(debug) message(fxNa," Try 1st Vioplot , is linear abundance data ",notLogAbund,", cexNameSer ",cexNameSer)
ch1 <- try(wrGraph::vioplotW(if(isTRUE(notLogAbund)) log2(abund) else abund, tit=titQ, wex=NULL, las=las, cexAxis=cexAxis, nameSer=nameSer, cexNameSer=cexNameSer, horizontal=FALSE, silent=debug, debug=debug, callFrom=fxNa), silent=TRUE)
if(inherits(ch1, "try-error")) {plotGraph <- FALSE; if(!silent) message(fxNa,"UNABLE to plot vioplotW !! ", sub("^Error in",":",ch1))
} else graphics::abline(h=suplLineA, lty=2, col=grDevices::grey(0.6))
}
## now normalized (and/or log-scale)
if(length(quant) >0 && !abundQuantRed) {
if(debug) {message(fxNa,"pQD5 draw norm/quant as vioplotW() ", length(quant) >0)}
tit1 <- if(length(normalizeMeth) ==1) paste(tit,", ",normalizeMeth,"-normalized",titSu) else paste(tit,", ",titSu)
if(debug) message(fxNa," Try 2nd Vioplot ")
ch1 <- try(wrGraph::vioplotW(quant, tit=tit1, wex=NULL, las=las, cexAxis=cexAxis, nameSer=nameSer, cexNameSer=cexNameSer, horizontal=FALSE, silent=debug, debug=debug,callFrom=fxNa), silent=TRUE)
if(inherits(ch1, "try-error")) { if(!silent) message(fxNa,"UNABLE to plot vioplotW for normalized data !! ", sub("^Error in",":",ch1))
} else graphics::abline(h=suplLineQ, lty=2, col=grDevices::grey(0.6))
}
}
plotGraph }
#' Extract additional information to construct colum SpecType
#'
#' This (low-level) function creates the column annot[,'SpecType'] which may help distinguishing different lines/proteins.
#' This information may, for example, be used to normalize only to all proteins of a common backgroud matrix (species).
#' If $mainSpecies or $conta: match to annot[,"Species"], annot[,"EntryName"], annot[,"GeneName"], if length==1 grep in annot[,"Species"]
#'
#' @param specPref (list) may contain $mainSpecies, $conta ...
#' @param annot (matrix) main protein annotation
#' @param useColumn (factor) columns from annot to use/mine
#' @param suplInp (matrix) additional custom annotation
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging (starting with 'mainSpecies','conta' and others - later may overwrite prev settings)
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a matrix with additional column 'SpecType'
#' @seealso used in \code{readProtDiscovererFile}, \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' .commonSpecies()
#' @export
.extrSpecPref <- function(specPref, annot, useColumn=c("Species","EntryName","GeneName","Accession"), suplInp=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
## create column annot[,'SpecType']
## if $mainSpecies or $conta: match to annot[,"Species"], annot[,"EntryName"], annot[,"GeneName"], if length==1 grep in annot[,"Species"]
## if other : match to annot[,"Species"], annot[,"Accession"], annot[,"EntryName"], annot[,"GeneName"], if length==1 grep in annot[,"EntryName"], annot[,"GeneName"], annot[,"Species"]
## 'suplInp' add'l matrix of annot (really needed ?)
## return results in column annot[,"SpecType"] (starting with 'mainSpecies','conta' and others - later may overwrite prev settings)
## special for PD : optional useColumn[5:6] : look by grep for specPref tags in cols "Majority.protein.IDs" and "Fasta.headers"
## 'specPref' ..(list) may contain $mainSpecies, $conta ...
## 'annot' ..(matrix) main protein annotation
## 'useColumn' ..(character) columns from annot to use/mine
## 'suplInp' ..(matrix) additional custom annotation
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".extrSpecPref")
if(debug){ message(fxNa," eSP0"); eSP0 <- list(specPref=specPref,annot=annot,useColumn=useColumn,suplInp=suplInp)}
if(length(annot) <1 || length(dim(annot)) !=2) stop("invalid 'annot' (must be matrix or data.frame)")
## check suplInp & match to useColumn, add to useColumn
if(length(useColumn) >4 && length(suplInp) >0 && length(dim(suplInp))==2) {
chAnn <- useColumn[5:length(useColumn)] %in% colnames(suplInp)
if(any(!chAnn)) useColumn <- c(useColumn[1:4], useColumn[(5:length(useColumn))[which(chAnn)]])
if(length(useColumn) <5) suplInp <- NULL
} else suplInp <- NULL
## check useColumn
chAnn <- useColumn[1:min(length(useColumn), 4)] %in% colnames(annot) # check for length useCol=0 ?? # nolint
if(any(!chAnn)) stop("Unknown/Non-standard 'annot' (missing colnames ",wrMisc::pasteC(useColumn[which(!chAnn)], quoteC="'"),")")
if(!"SpecType" %in% colnames(annot)) {annot <- cbind(annot, SpecType=rep(NA, nrow(annot))); if(debug) message(fxNa,"Adding column 'SpecType' to 'annot'")}
if(length(specPref) > 0) specPref <- specPref[which(sapply(specPref, length) >0)] # remove empty ..
if(length(specPref) > 0) if(is.list(specPref)) { # remove NA from specPref
chNA <- sapply(specPref, is.na)
if(any(unlist(chNA))) specPref <- sapply(specPref, wrMisc::naOmit)
} else { chNA <- is.na(specPref)
if(all(chNA)) specPref <- NULL else { spNames <- names(specPref[which(!chNA)]); specPref <- as.list(specPref[which(!chNA)]); names(specPref) <- spNames}}
if(length(specPref) > 0) {
if(debug) {message(fxNa,"eSP1"); eSP1 <- list()}
chNa <- c("mainSpecies","conta") %in% names(specPref)
if(any(!chNa) && ! silent) message(fxNa," ",wrMisc::pasteC(c("mainSpecies","conta")[which(!chNa)], quoteC="'")," Seem absent from 'specPref' !")
.MultGrep2 <- function(pat, y) {y <- as.matrix(y); z <- if(length(pat)==1) grepl(pat, y) else rowSums(sapply(pat, grepl, y)) >0
if(length(dim(y)) >1) rowSums(matrix(z, ncol=ncol(y))) >0 else z } # (multiple) grepl() when length of pattern 'pat' >0
mulP <- lapply(specPref, .MultGrep2, annot)
chLe <- sapply(mulP, length)
## assign to annot
for(i in which(chLe >0)) { markLi <- which(as.logical(mulP[[i]]))
if(length(markLi) >0) annot[markLi,"SpecType"] <- names(mulP)[i] }
if(debug) {message(fxNa,"eSP2"); eSP2 <- list(specPref=specPref,annot=annot,useColumn=useColumn,suplInp=suplInp,mulP=mulP)}
rm(mulP)
}
annot
}
#' Get matrix with UniProt abbreviations for selected species as well as simple names
#'
#' This (low-level) function allows accessing matrix with UniProt abbreviations for species frequently used in research.
#' This information may be used to harmonize species descriptions or extract species information out of protein-names.
#'
#' @return This function returns a 2-column matrix with species names
#' @seealso used eg in \code{readProtDiscovererFile}, \code{\link{readMaxQuantFile}}, \code{\link{readProlineFile}}, \code{\link{readFragpipeFile}}
#' @examples
#' .commonSpecies()
#' @export
.commonSpecies <- function() {
## matrix with UniProt abbreviations for common species
cbind(ext=c("_HUMAN","_MOUSE","_RAT","_CAVPO","_PIG","_BOVIN","_RABIT", "_SHEEP",
"_CAEEL", "_DROME","_DANRE","_XENLA","_AMBME", "_ARATH","_SOLTU","_BETVV", "_YEAST", "_ECOLI","_MYCTU"),
name=c("Homo sapiens","Mus muscullus","Rattus norvegicus","Cavia porcellus","Sus scrofa","Bos taurus","Oryctolagus cuniculus","Ovis aries",
"Caenorhabditis elegans","Drosophila melanogaster","Danio rerio","Xenopus laevis","Ambystoma mexicanum",
"Arabidopsis thaliana","Solanum tuberosum","Beta vulgaris", "Saccharomyces cerevisiae", "Escherichia coli", "Mycobacterium tuberculosis"),
simple=c("Human","Mouse","Rat","Pig","Guinea pigs", "Cow","Rabit", "Sheep",
"Celegans","Droso","Zebrafish","Frog","Axolotl", "Arabidopsis","Potato","Sugar beet", "Yeast","Ecoli","Mtuberculosis") )
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readProteomeDiscovererFile.R
|
#' Read Tabulated Files Exported by ProteomeDiscoverer At Peptide Level
#'
#' Initials petide identificationa and quantification results form \href{https://www.thermofisher.com/order/catalog/product/OPTON-30812}{Thermo ProteomeDiscoverer}
#' which were exported as tabulated text can be imported and relevant information extracted.
#' The final output is a list containing 3 elements: \code{$annot}, \code{$raw} and optional \code{$quant}, or returns data.frame with entire content of file if \code{separateAnnot=FALSE}.
#'
#' @details
#' This function has been developed using Thermo ProteomeDiscoverer versions 2.2 to 2.5.
#' The format of resulting files at export also depends which columns are chosen as visible inside ProteomeDiscoverer and subsequently get chosen for export.
#' Using the argument \code{suplAnnotFile} it is possible to specify a specific file (or search for default file) to read for extracting file-names as sample-names and other experiment realted information.
#' Precedent and following aminoacids (relative to identified protease recognition sites) will be removed form peptide sequences and be displayed in $annot as columns 'prec' and 'foll'.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' This function replaces the depreciated function \code{readPDExport}.
#'
#' Besides, ProteomeDiscoverer version number and full raw-file path will be extracted for $notes in final output.
#'
#' @param fileName (character) name of file to be read
#' @param path (character) path of file to be read
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param sampleNames (character) new column-names for quantification data (ProteomeDiscoverer does not automatically use file-names from spectra); this argument has priority over \code{suplAnnotFile}
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data: if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by ProteomeDiscoverer; however, if \code{gr} is provided, \code{gr} gets priority for grouping of replicates;
#' if \code{TRUE} defaults to file '*InputFiles.txt' (needed to match information of \code{sdrf}) which can be exported next to main quantitation results;
#' if \code{character} the respective file-name (relative or absolute path)
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants marked by ProteomeDiscoverer.
#' If a column named \code{contamCol} is found, the data will be lateron filtered to remove all contaminants, set to \code{NULL} for keeping all contaminants
#' @param refLi (character or integer) custom specify which line of data is main species, if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param annotCol (character) column names to be read/extracted for the annotation section (default c("Accession","Description","Gene","Contaminant","Sum.PEP.Score","Coverage....","X..Peptides","X..PSMs","X..Unique.Peptides", "X..AAs","MW..kDa.") )
#' @param FDRCol (list) optional indication to search for protein FDR information
#' @param titGraph (character) custom title to plot
#' @param titGraph (character) depreciated custom title to plot, please use 'tit'
#' @param wex (integer) relative expansion factor of the violin-plot (will be passed to \code{\link[wrGraph]{vioplotW}})
#' @param specPref (character or list) define characteristic text for recognizing (main) groups of species (1st for comtaminants - will be marked as 'conta', 2nd for main species- marked as 'mainSpe',
#' and optional following ones for supplemental tags/species - maked as 'species2','species3',...);
#' if list and list-element has multiple values they will be used for exact matching of accessions (ie 2nd of argument \code{annotCol})
#' @param plotGraph (logical or integer) optional plot of type vioplot of initial and normalized data (using \code{normalizeMeth}); if integer, it will be passed to \code{layout} when plotting
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot}, \code{$counts} an array with number of peptides, \code{$quantNotes}
#' and \code{$notes}; or if \code{separateAnnot=FALSE} the function returns a data.frame with annotation and quantitation only
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readMaxQuantFile}}, \code{\link{readProteomeDiscovererFile}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#'
#' @export
readProteomeDiscovererPeptides <- function(fileName, path=NULL, normalizeMeth="median", sampleNames=NULL, suplAnnotFile=TRUE, gr=NULL, sdrf=NULL, read0asNA=TRUE, quantCol="^Abundances*",
annotCol=NULL, contamCol="Contaminant", refLi=NULL, separateAnnot=TRUE, FDRCol=list(c("^Protein.FDR.Confidence","High"), c("^Found.in.Sample.","High")), plotGraph=TRUE,
titGraph="Proteome Discoverer", wex=1.6, specPref=c(conta="CON_|LYSC_CHICK", mainSpecies="OS=Homo sapiens"), silent=FALSE, debug=FALSE, callFrom=NULL) {
## read ProteomeDiscoverer exported txt
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readProteomeDiscovererPeptides")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- c("^Abundance\\.Count","^Abundances\\.Count","^Abundance\\.Ratio","^Abundances\\.Ratio","^Abundance\\.Grouped","^Abundances\\.Grouped") # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- quant <- counts <- NULL # initialize
modifSensible <- TRUE # separate modified from unmodified peptides (by attaching modif to seq)
.corPathW <- function(x) gsub("\\\\", "/", x)
## check if path & (tsv) file exist
if(!grepl("\\.txt$|\\.txt\\.gz$", fileName)) message(fxNa,"Trouble ahead, expecting tabulated text file (the file'",fileName,"' might not be right format) !!")
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="txt", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
if(debug) message(fxNa,"rPDP2a ..")
## prepare for reading files
if(debug) { message(fxNa,"rPDP3 .. Ready to read", if(length(path) >0) c(" from path ",path[1])," the file ",fileName[1])}
## read (main) file
## future: look for fast reading of files
tmp <- try(utils::read.delim(file.path(paFi), stringsAsFactors=FALSE, header=TRUE), silent=TRUE)
if(inherits(tmp, "try-error")) stop("Unable to read input file ('",paFi,"')!")
if(debug) { message(fxNa,"rPDP3b .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col ")}
## extract peptide sequence
pepSe <- sub("\\.\\[[[:upper:]]\\]$","", sub("^\\[[[:upper:]]\\]\\.","", tmp[,"Annotated.Sequence"]))
precAA <- postAA <- rep("",nrow(tmp))
ch1 <- grep("\\.\\[[[:upper:]]\\]$", tmp[,"Annotated.Sequence"])
if(length(ch1) >0) precAA[ch1] <- substr(tmp[ch1,"Annotated.Sequence"], 2, 2)
ch1 <- grep("^\\[[[:upper:]]\\]\\.", tmp[,"Annotated.Sequence"])
if(length(ch1) >0) postAA[ch1] <- substr(sub(".*\\.\\[","",tmp[ch1,"Annotated.Sequence"]), 1, 1)
## other peptide/protein info
#txtCol <- c("Modifications", "Master.Protein.Accessions","Positions.in.Master.Proteins","Master.Protein.Descriptions")
seqCol <- c("Sequence","Annotated.Sequence","Modifications", "Qvality.PEP","Qvality.q.value", "Number.of.Protein.Groups","Number.of.Proteins","Number.of.PSMs", # no1-8
"Master.Protein.Accessions","Positions.in.Master.Proteins","Modifications.in.Master.Proteins","Master.Protein.Descriptions", #no 9-12; last (ie 12th) missing in data from LN
"Number.of.Missed.Cleavages","Theo.MHplus.in.Da","Contaminant", # no 13-14
"Charge.by.Search.Engine.A5.Sequest.HT","XCorr.by.Search.Engine.A10.Sequest.HT","XCorr.by.Search.Engine.A5.Sequest.HT", "Top.Apex.RT.in.min" ) # no 15-18; 15 & 16 are currently not used, but use grep for 'Charge'
if(debug) {message(fxNa,"rPDP3z length(seqCol) ",length(seqCol))
rPDP3z <- list(tmp=tmp,fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,modifSensible=modifSensible)}
maSeCo1 <- match(seqCol, colnames(tmp))
maSeCo2 <- match(gsub("",".",seqCol), colnames(tmp))
maSeCo <- if(sum(is.na(maSeCo1)) > sum(is.na(maSeCo2))) maSeCo2 else maSeCo1 # switch betw R-friendly and std
#quanCo <- "Abundance.F62.Sample.na"
quantCol <- "^Abundance" # use as pattern
IdentTyCol <- "Found.in.Sample" # use as pattern
## need other example for extracting quantifications ?
#"Confidence.by.Search.Engine.Sequest.HT","Percolator.q.Value.by.Search.Engine.Sequest.HT","Percolator.PEP.by.Search.Engine.Sequest.HT", "XCorr.by.Search.Engine.Sequest.HT","Channel.Occupancy.in.Percent")
if(debug) {message(fxNa,"rPDP4 ")
rPDP4 <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible)}
.chColNa <- function(x, mat, renameTo=NULL, silent=FALSE, fxNa=NULL){
## check in 'matr' for column-name 'x', if required rename best hit (if no direct hit look using grep, then grep wo case); return corrected mat
chX <- x %in% colnames(mat)
if(all(chX)) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[match(x, colnames(mat))] <- renameTo # juste simple rename
} else { # try to localize column to use
chX <- grep(x, colnames(mat))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",x,"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else {
chX <- grep(tolower(x), tolower(colnames(mat)))
if(length(chX) >0) {
if(is.character(renameTo) && length(renameTo) ==1) colnames(mat)[chX[1]] <- renameTo else x
if(!silent && length(chX) >1) message(fxNa,"Found multiple columns containing '",tolower(x),"' : ",wrMisc::pasteC(colnames(mat)[chX], quoteC="'"),", using 1st")
} else stop("Could NOT find column '",x,"' !!\n (available columns ",wrMisc::pasteC(colnames(mat), quoteC="'"),")") }
}
mat }
## EXTRACT PEPTIDE SEQUENCES
## extract peptide sequences
if(debug) {message(fxNa,"rPDP4a .. Ready to start extracting pep seq ")
rPDP4a <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible)}
if(is.na(maSeCo[1])) { if(is.na(maSeCo[2])) {if(!silent) message(fxNa,"Invalid type of data")#; pepSeq <- NULL
} else pepSeq <- tmp[,maSeCo[2]]
#else { pepSeq <- tmp[,maSeCo[2]] } #sub("\\.\\[A-Z\\]$", "", sub("^\\[A-Z\\]\\.", "", tmp[,maSeCo[2]])) }
} else pepSeq <- tmp[,maSeCo[1]]
fxPrecAA <- function(x) { ## separate/extract note of preceeding & following AA; take char vector, returns 3-column matrix
chPre <- grep("^\\[([[:upper:]]|\\-)\\]\\.", x) # has note of preceeding AA
chFoll <- grep("\\.\\[([[:upper:]]|\\-)\\]($|_)", x) # has note of following AA
out <- cbind(pep=sub("\\.\\[([[:upper:]]|\\-)\\]","", sub("^\\[([[:upper:]]|\\-)\\]\\.","", x)), prec=NA, foll=NA, modifSeq=NA)
if(length(chPre) >0) out[chPre,2] <- sub(".*\\[","", sub("\\]\\..+","", x[chPre])) # the preceeding AA
if(length(chFoll) >0) out[chFoll,3] <- sub("\\].*","", sub(".+\\.\\[","", x[chFoll]))
out }
annot1 <- fxPrecAA(pepSeq) # split
pepSeq <- annot1[,4] <- annot1[,1] # also used lateron for rownames of quant
if(modifSensible) { hasMod <- nchar(tmp[,maSeCo[3]]) >0
if(any(hasMod, na.rm=TRUE)) annot1[which(hasMod),4] <- gsub(" ","", paste(annot1[which(hasMod),1], tmp[which(hasMod),maSeCo[3]], sep="_")) # add separator & modification
}
#old#if(any(hasMod, na.rm=TRUE)) pepSeq[which(hasMod)] <- paste(pepSeq[which(hasMod)],tmp[which(hasMod),maSeCo[3]],sep="_") } # modification-separator
if(debug) {message(fxNa,"Done extracting pep seq rPDP4b"); rPDP4b <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,seqCol=seqCol,pepSeq=pepSeq,annot1=annot1,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,maSeCo=maSeCo,modifSensible=modifSensible) }
## ANNOATION (peptide/protein oriented)
usColAnn <- maSeCo[c(3,6:7,9:14)]
if(any(is.na(usColAnn), na.rm=TRUE)) {
if(!silent) message(fxNa,"Note : ",sum(is.na(usColAnn))," protein-annotation columns (typically exported) were NOT found in this data-set !")
usColAnn <- wrMisc::naOmit(usColAnn) }
if(length(usColAnn) >0) { annot <- if(sum(!is.na(usColAnn)) >1) tmp[, wrMisc::naOmit(usColAnn)] else as.matrix(tmp[, wrMisc::naOmit(usColAnn)])
} else annot <- NULL
chPrecAA <- !is.na(annot1[,2])
chFollAA <- !is.na(annot1[,3])
if(any(chPrecAA)) if("precAA" %in% colnames(annot)) annot[,"precAA"] <- annot1[,2] else annot <- cbind(annot, prec.AA=annot1[,2])
if(any(chFollAA)) if("follAA" %in% colnames(annot)) annot[,"follAA"] <- annot1[,3] else annot <- cbind(annot, foll.AA=annot1[,3])
annot <- if(ncol(annot1) >3) cbind(annot, seq=annot1[,1], modifSeq=annot1[,4]) else cbind(annot, seq=annot1[,1])
chDuNa <- duplicated(annot1[,4])
if(any(chDuNa)) { if(!silent) message(fxNa,"Note : Some 'modifSeq' appear duplicated !!")
rownames(annot) <- wrMisc::correctToUnique(annot1[,4], silent=silent, callFrom=fxNa) # "modifSeq"
} else rownames(annot) <- annot1[,4] # "modifSeq"
usColCha <- grep("^charge", tolower(colnames(tmp))) # include charge
if(length(usColCha) >0) { char <- tmp[,usColCha]
if(length(usColCha) >1) { ## more than 1 cols, need to find best col : choose with fewest NAs
usColCha <- usColCha[which.min(colSums(is.na(char)))] }
if(debug) message(fxNa,"Column for Charge found & added", if(debug) " rPDP4c")
annot <- cbind(annot, Charge=tmp[,usColCha])
}
rm(annot1)
if(debug) {message(fxNa,"rPDP4c .. Done extracting peptide annotation ")
rPDP4c <- list(fileName=fileName,path=path, paFi=paFi,normalizeMeth=normalizeMeth,sampleNames=sampleNames,suplAnnotFile=suplAnnotFile,read0asNA=read0asNA,quantCol=quantCol,cleanDescription=cleanDescription,tmp=tmp,seqCol=seqCol,pepSeq=pepSeq,annot=annot,maSeCo=maSeCo,modifSensible=modifSensible, pepSeq=pepSeq,hasMod=hasMod, annot=annot,quantCol=quantCol)}
## ABUNDANCE
## locate & extract abundance/quantitation data
msg <- " CANNOT find ANY quantification columns"
if(length(quantCol) >1) {
## explicit columns (for abundance/quantitation data)
## problem : extract '^Abundances*' but NOT 'Abundances.Count.*'
quantColIni <- quantCol <- grep(quantCol[1], colnames(tmp))
if(length(quantCol) <1) stop(msg," ('",quantCol,"')")
} else {
## pattern search (for abundance/quantitation data)
if(length(quantCol) <1) { quantCol <- "^Abundance"
if(!silent) message(fxNa,"Setting argument 'quantCol' to '^Abundance'")}
quantCol <- grep(quantCol, colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("^abundance", tolower(colnames(tmp)))
if(length(quantCol) <1) quantCol <- grep("Intensity$", colnames(tmp))
if(length(quantCol) <1) quantCol <- grep("intensity$", tolower(colnames(tmp)))
quantColIni <- quantCol
if(length(quantCol) <1) stop(msg," specified in argument 'quantCol' !") }
## check for columns to exclude (like 'Abundances.Count.')
if(length(excluCol) >1) {
excCo <- unique(unlist(lapply(excluCol, grep, colnames(tmp))))
if(length(excCo) >0) {
quantCol <- quantCol[-wrMisc::naOmit(match(excCo, quantCol))]
if(length(quantCol) <1) stop(msg," (all match to 'excluCol')") else {
if(!silent) message(fxNa,"Removed ",length(quantColIni) -length(quantCol)," columns")}
}
}
if(length(quantCol) >0) { abund <- if(length(quantCol) >1) tmp[,quantCol] else {
matrix(tmp[,quantCol], ncol=1, dimnames=list(rownames(tmp),NULL))} # how to know column-name if single sample ?
rownames(abund) <- rownames(annot) #wrMisc::correctToUnique(pepSeq, silent=silent, callFrom=fxNa)
## check for columns to exclude (like 'Abundances.Count.')
if(length(excluCol)==1) {
excCo <- grep(excluCol, colnames(tmp))
if(any(duplicated(excCo, quantCol), na.rm=TRUE)) {
quantCol <- quantCol[-match(excCo, quantCol)]
if(length(quantCol) <1) stop(msg," (all match to 'excluCol')") else {
if(!silent) message(fxNa,"Removed ",length(quantColIni) -length(quantCol)," columns")}
}
}
abund <- as.matrix(tmp[,quantCol]) # abundance val
rownames(abund) <- rownames(annot)
if(debug) {message(fxNa,"rPDP8 .. "); rPDP8 <- list(tmp=tmp,annot=annot,specPref=specPref,abund=abund,quantCol=quantCol)}
## check & clean abudances
chNorm <- grep("\\.Normalized\\.", colnames(abund))
if(length(chNorm)*2 == ncol(abund)) { # in case Normalized makes 1/2 of columns use non-normalized
abund <- abund[,-chNorm]
}
colnames(abund) <- sub("^Abundances\\.Normalized\\._{0,1}|^abundances\\.Normalized\\._{0,1}|^Abundances{0,1}_{0,1}|^abundances{0,1}_{0,1}","",colnames(abund))
chNum <- is.numeric(abund)
if(!chNum) {abund <- apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa)}
## remove heading 'X..' from headers (only if header won't get duplicated
### why here ??? 24mar23
chXCol <- grep("^X\\.\\.",colnames(annot))
if(length(chXCol) >0) {
newNa <- sub("^X\\.\\.","",colnames(annot)[chXCol])
chDu <- duplicated(c(newNa, colnames(annot)), fromLast=TRUE)
if(any(chDu, na.rm=TRUE)) newNa[which(chDu)] <- colnames(annot)[chXCol][which(chDu)]
colnames(annot)[chXCol] <- newNa }
## remove heading/tailing spaces (first look which columns might be subject to this treatment)
ch1 <- list(A=grep("^ +",annot[1,]), B=grep("^ +",annot[2,]), C=grep("^ +",annot[floor(mean(nrow(annot))),]), D=grep("^ +",annot[nrow(annot),]) )
chCo <- unique(unlist(ch1))
annot[,chCo] <- sub("^ +","",sub(" +$","",annot[,chCo])) # remove heading/tailing spaces
if(debug) { message(fxNa,"rPDP9 .. dim annot ",nrow(annot)," and ",ncol(annot)); rPDP9 <- list(annot=annot,tmp=tmp,abund=abund,sampleNames=sampleNames,specPref=specPref,annotCol=annotCol,contamCol=contamCol,infoDat=infoDat) }
## add custom sample names (if provided)
if(length(sampleNames) ==ncol(abund) && ncol(abund) >0) {
if(debug) { message(fxNa,"rPDP9b") }
if(length(unique(sampleNames)) < length(sampleNames)) {
if(!silent) message(fxNa,"Custom sample names not unique, correcting to unique")
sampleNames <- wrMisc::correctToUnique(sampleNames, callFrom=fxNa) }
colnames(abund) <- sampleNames
if(debug) { message(fxNa,"rPDP9c") }
} else {
colnames(abund) <- sub("Abundance\\.F[[:digit:]]+\\.Sample\\.|Abundances\\.F[[:digit:]]+\\.Sample\\.","Sample.", colnames(abund))
}
} else abund <- NULL
## take log2 & normalize
if(length(abund) >0) {
quant <- if(utils::packageVersion("wrMisc") > "1.10") {
try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE)
} else try(wrMisc::normalizeThis(log2(abund), method=normalizeMeth, refLines=refLi, silent=silent, callFrom=fxNa), silent=TRUE) #
if(debug) { message(fxNa,"rPDP9d .. dim quant: ", nrow(quant)," li and ",ncol(quant)," cols; colnames : ",wrMisc::pasteC(colnames(quant))," ")} }
## PD colnames are typically very cryptic, replace ..
if(length(sampleNames)==ncol(abund) && all(!is.na(sampleNames)) ) { # custom sample names given
colnames(abund) <- colnames(abund) <- sampleNames
if(length(counts) >0) colnames(counts) <- sampleNames }
### GROUPING OF REPLICATES AND SAMPLE META-DATA
## META-DATA : read additional annotation & documentation files produced by PD
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth="PD", path=path, abund=utils::head(abund), silent=silent, debug=debug, callFrom=fxNa)
}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="PD", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rPDP14"); rPDP14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa="Proteome Discoverer",
refLi=refLi, refLiIni=nrow(abund), tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod="ProteomeDiscoverer", qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot)
}
#' readProtDiscovererPeptides, depreciated
#'
#' This function has been depreciated and replaced by \code{\link{readProteomeDiscovererPeptides}} (from this package).
#'
#' @param ... Actually, this function doesn't ready any input any more
#' @return This function returns \code{NULL}
#' @seealso \code{\link{readProteomeDiscovererFile}}, \code{\link{readProteomeDiscovererPeptides}}
#' @export
readProtDiscovererPeptides <- function(...) {
.Deprecated(new="readProteomeDiscovererPeptides", package="wrMisc", msg="The function readProtDiscovererPeptides() has been deprecated and replaced by readProteomeDiscovererPeptides() in this package") # only this message will be shown..
NULL
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readProteomeDiscovererPeptides.R
|
#' Read Sample Meta-data from Quantification-Software And/Or Sdrf And Align To Experimental Data
#'
#' Sample/experimental annotation meta-data form \href{https://www.maxquant.org/}{MaxQuant}, ProteomeDiscoverer, FragPipe, Proline or similar, can be read using this function and relevant information extracted.
#' Furthermore, annotation in \href{https://github.com/bigbio/proteomics-sample-metadata}{sdrf-format} can be added (the order of sdrf will be adjated automatically, if possible).
#' This functions returns a list with grouping of samples into replicates and additional information gathered.
#' Input files compressed as .gz can be read as well.
#'
#' @details
#'
#' When initally reading/importing quantitation data, typically very little is known about the setup of different samples in the underlying experiment.
#' The overall aim is to read and mine the corresponding sample-annotation documeneted by the quantitation-software and/or from n sdrf repository and to attach it to the experimental data.
#' This way, in subsequent steps of analysis (eg PCA, statictical tests) the user does not have to bother stuying the experimental setup to figure out which
#' samples should be considered as relicate of whom.
#'
#' Sample annotation meta-data can be obtained from two sources :
#' a) form additional files produced (and exported) by the initial quantitation software (so far MaxQuant and ProteomeDiscoverer have een implemeneted) or
#' b) from the universal sdrf-format (from Pride or user-supplied).
#' Both types can be imported and checked in the same run, if valid sdrf-information is found this will be given priority.
#' For more information about the sdrf format please see \href{https://github.com/bigbio/proteomics-sample-metadata}{sdrf on github}.
#'
#'
#' @param quantMeth (character, length=1) quantification method used; 2-letter abbreviations like 'MQ','PD','PL','FP' etc may be used
#' @param sdrf (character, list or data.frame) optional extraction and adding of experimenal meta-data:
#' if character, this may be the ID at ProteomeExchange or a similarly formatted local file. \code{sdrf} will get priority over \code{suplAnnotFile}, if provided.
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by MaxQuant; if \code{gr} is provided, it gets priority for grouping of replicates
#' if \code{TRUE} in case of \code{method=="MQ"} (MaxQuant) default to files 'summary.txt' (needed to match information of \code{sdrf}) and 'parameters.txt' which can be found in the same folder as the main quantitation results;
#' if \code{character} the respective file-names (relative ro absolute path), 1st is expected to correspond to 'summary.txt' (tabulated text, the samples as given to MaxQuant) and 2nd to 'parameters.txt' (tabulated text, all parameters given to MaxQuant)
#' in case of \code{method=="PL"} (Proline), this argument should contain the initial file-name (for the identification and quantification data) in the first position
#' @param path (character) optional path of file(s) to be read
#' @param abund (matrix or data.frame) experimental quantitation data; only column-names will be used for aligning order of annotated samples
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates);
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$lev} and \code{$level} (grouping of samples given as integer), and \code{$meth} (method by which grouping as determined).
#' If valid \code{sdrf} was given, the resultant list contains in addition \code{$sdrfDat} (data.frame of annotation).
#' Alternatively it may contain a \code{$sdrfExport} if sufficient information has been gathered (so far only for MaxQuant) for a draft sdrf for export (that should be revised and completed by the user).
#' If software annotation has been found it will be shown in \code{$annotBySoft}.
#' If all entries are invalid or entries do not pass the tests, this functions returns an empty \code{list}.
#' @seealso this function is used internally by \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}} etc; use \code{\link{readSdrf}} for reading sdrf-files, \code{\link[wrMisc]{replicateStructure}} for mining annotation columns
#' @examples
#' sdrf001819Setup <- readSampleMetaData(quantMeth=NA, sdrf="PXD001819")
#' str(sdrf001819Setup)
#'
#' @export
readSampleMetaData <- function(quantMeth, sdrf=NULL, suplAnnotFile=NULL, path=".", abund=NULL, groupPref=list(lowNumberOfGroups=TRUE), silent=FALSE, debug=FALSE, callFrom=NULL) {
## sdrf..()
## suplAnnotFile..(character or logical)
## quantMeth..(character)
## abund..(matrix or data.frame) column-names will be used to comapre & align sample meta-data)
#### CONTENT
## 1.1 SOFTWARE specific META-DATA .. read additional annotation & documentation files
## Aim : extract/build 'summaryD' (& parametersD) allowing to match colnames of 'abund' to suplAnnotFile and/or sdrf
## 1.2 basic check of summaryD to quant data, extract supl info for sdrf
## evaluate summaryD to consistent format
## 1.3 TRY CHECKING/ADJUSTING ORDER of summaryD
## 1.4 replicateStructure
### 2 READ SDRF annotation & pick groups of replicates; has priority over grouping based on summary.txt
## 2.1 basic check (distinguish full $sampleSetup) form custom data.frame
## 2.2 need to match lines (samples) of sdrf (setupDat) to summaryD and/or colnames of abund
## 2.3 ready to make setupSd
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readSampleMetaData")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
summaryD <- parametersD <- setupSdSoft <- setupSd <- sdrfInf <- annSh <- parametersSd <- NULL # initialize (setupSd needed ?)
## checks
if(length(suplAnnotFile) >1) if(is.na(suplAnnotFile[1])) suplAnnotFile <- NULL
datOK <- length(sdrf) >0 || length(suplAnnotFile) >0
if(length(quantMeth) <1) quantMeth <- NA
if(length(abund) >0 && any(length(dim(abund)) !=2, dim(abund) < 1, na.rm=TRUE)) { datOK <- FALSE
warning("Invalid argument 'abund'; must be matrix or data.frame with min 1 line and 1 col")}
if(debug) {message(fxNa,"Ready search & extract sample meta-data rSM0"); rSM0 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,path=path,abund=abund)}
.corPathW <- function(x) gsub("\\\\", "/", x)
.adjPat <- function(x) { out <- match(x, unique(x)); names(out) <- names(x); out} # needed ??
.adjTrimPat <- function(x) { x <- wrMisc::rmEnumeratorName(x, incl=c("anyCase","trim0","rmEnum"), sepEnum=c(" ","-","_"), nameEnum=c("Number","No","#","","Replicate","Sample"))
out <- match(x, unique(x)); names(out) <- names(x); out} # used
.redLstToDf <- function(lst) { # transform lst to data.frame; in case some list-entries have different length, choose the entries with most feq common length
leL <- sapply(lst, length)
if(any(duplicated(leL))) { # need to reduce : find most frequent
leL2 <- tabulate(leL)
lst <- lst[which(leL==which.max(leL2))] }
as.data.frame(lst) }
## end suppl fx
path <- if(length(path) <1) "." else path[1]
nSamp0 <- if(length(dim(abund)) >1) ncol(abund) else 0
chSoft <- c("MQ", "PD", "PL", "FP", "MC","NN")
syncColumns <- c(sdrfDat=NA, annotBySoft=NA)
if(datOK) {
if("maxquant" %in% tolower(quantMeth)) quantMeth <- "MQ"
if("proteomediscoverer" %in% tolower(quantMeth)) quantMeth <- "PD"
if("proline" %in% tolower(quantMeth)) quantMeth <- "PL"
if("fragpipe" %in% tolower(quantMeth)) quantMeth <- "FP"
if("masschroq" %in% tolower(quantMeth)) quantMeth <- "MC"
if("dia-nn" %in% tolower(quantMeth) || "diann" %in% tolower(quantMeth)) quantMeth <- "NN"
}
if(datOK) {
if(length(abund) >0) if(is.null(colnames(abund))) { abund <- NULL
if(!silent) message(fxNa,"Invalid 'abund' : has NO colnames !") }
## priority column for groups from sdrf (+ define default colnames for priority)
chGr <- c("sdrfColumn","sdrfCol")
chGrPref <- chGr %in% names(groupPref)
if(any(chGrPref)) {
groupPref$sdrfColumn <- if(length(groupPref[[chGr[which(chGrPref)[1]]]]) >0) groupPref[[chGr[which(chGrPref)[1]]]] else c("factor.value.disease.","characteristics.disease.", "factor.value.treatment.","characteristics.treatment.")
}
### IMPORT SAMPLE META-DATA, if possible GROUPING OF REPLICATES
if(length(suplAnnotFile) ==1) {
if(isFALSE(suplAnnotFile)) suplAnnotFile <- NULL else if(is.na(suplAnnotFile)) suplAnnotFile <- NULL }
## 1.1 SOFTWARE specific META-DATA : read additional annotation & documentation files produced by var software as summaryD & parametersD
if(length(suplAnnotFile) >0) { # read quant software-generated sample annotation
chFiNa <- NULL # initialize
if(debug) {message(fxNa,"rSM1"); rSM1 <- list(sdrf=sdrf,abund=abund,path=path,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth) }
## option 1 : suplAnnotFile has path (do not use default 'path'), use same path for default suplAnnotFile (if applicable)
## option 2 : suplAnnotFile has no path, use 'path' for sdrf & suplAnnotFile
## Aim : extract/build 'summaryD' (& parametersD) allowing to match colnames of 'abund' to suplAnnotFile and/or sdrf
## MaxQuant : (summary.txt & parameters.txt)
if("MQ" %in% quantMeth && length(suplAnnotFile) >0) {
isDir <- if(is.character(suplAnnotFile)) utils::file_test("-d",suplAnnotFile[1]) else FALSE
if(isDir) { path <- suplAnnotFile[1]; suplAnnotFile <- TRUE}
if(isTRUE(suplAnnotFile)) { # automatic search for standard file-names ('summary.txt','parameters.txt') in same dir as main MaxQuant data
chFiNa <- c("summary.txt","summary.txt.gz","parameters.txt","parameters.txt.gz")
chFi <- file.exists(file.path(path, chFiNa))
if(debug) {message(fxNa,"rSM0mq\n"); rSM0mq <- list(path=path,sdrf=sdrf,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,chFi=chFi,chFiNa=chFiNa )}
if(any(chFi, na.rm=TRUE)) { suplAnnotFile <- c(summary=chFiNa[1:2][which(chFi[1:2])[1]], parameters=chFiNa[3:4][which(chFi[3:4])[1]] )
if(all(names(suplAnnotFile)=="parameters")) suplAnnotFile <- c(NA, parameters=suplAnnotFile$parameters) # make length=2
chFi <- c(chFi[1] | chFi[2], chFi[3] | chFi[4]) #needed ?
} else suplAnnotFile <- NULL
} else { # specific/non-default file given
if(length(suplAnnotFile) >2) suplAnnotFile <- suplAnnotFile[1:2] # use max length=2
chFi <- rep(FALSE, 2)
if(!is.na(suplAnnotFile[1])) chFi[1] <- file.exists(file.path(path, suplAnnotFile[1]))
if(!is.na(suplAnnotFile[2])) chFi[2] <- file.exists(file.path(path, suplAnnotFile[2]))
}
if(debug) {message(fxNa,"rSM1mq"); rSM1mq <- list(path=path,sdrf=sdrf,summaryD=summaryD,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,path=path,nSamp0=nSamp0,chFiNa=chFiNa,chFi=chFi )}
## main reading of MQ sample meta-data
if(chFi[1]) summaryD <- try(utils::read.delim(file.path(path, suplAnnotFile[1]), stringsAsFactors=FALSE), silent=TRUE) # 'summary.txt'
if(chFi[2]) parametersD <- try(utils::read.delim(file.path(path, suplAnnotFile[2]), stringsAsFactors=FALSE), silent=TRUE) # 'parameters.txt'
if(inherits(summaryD, "try-error")) {summaryD <- NULL; if(!silent) message(fxNa,"Meta-data: Failed to read '",suplAnnotFile[1],"' for getting additional information about experiment !")} else {
summaryD <- if(nrow(summaryD) >2) summaryD[-nrow(summaryD),] else matrix(summaryD[-nrow(summaryD),], nrow=1,dimnames=list(NULL,colnames(summaryD))) # need to remove last summary-line
if(debug) message(fxNa,"Successfully read sample annotation from '",suplAnnotFile[1],"'") }
if(inherits(parametersD, "try-error")) {if(!silent) message(fxNa,"Meta-data: Failed to read '",suplAnnotFile[2],"' !")} else {
if(debug && chFi[2]) message(fxNa,"Successfully read ",quantMeth," parameters from '",suplAnnotFile[2],"'") }
syncColumns["annotBySoft"] <- FALSE
if(debug) { message(fxNa,"rSM1mq2"); rSM1mq2 <- list(summaryD=summaryD,parametersD=parametersD,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth, sdrf=sdrf,path=path,nSamp0=nSamp0,chSoft=chSoft,syncColumns=syncColumns)}
}
## ProteomeDiscoverer
## uses suplAnnotFile as path for '.InputFiles\\.txt'
if("PD" %in% quantMeth && length(suplAnnotFile) >0) {
if(debug) {message(fxNa,"rSM1pd"); rSM1pd <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth)}
if(length(suplAnnotFile) >1) { if(!silent) message(fxNa,"Only 1st value of argument 'suplAnnotFile' can be used with quantMeth=PD")
suplAnnotFile <- suplAnnotFile[1] }
if(isTRUE(suplAnnotFile)) { # automatic search for standard file-name ('InputFiles.txt') in same dir as main MaxQuant data
suplAnnotFile <- list.files(path=path, pattern=".InputFiles\\.txt$|.InputFiles\\.txt\\.gz$")
if(length(suplAnnotFile) >1) { if(!silent) message(fxNa,"Found ",length(suplAnnotFile)," files matching general patter, using ONLY 1st, ie ",suplAnnotFile[1])
suplAnnotFile <- suplAnnotFile[1] }
chFi <- length(suplAnnotFile) >0
if(!chFi && !silent) message(fxNa,"Note: Unable to (automatically) find sample-annotation file. Maybe it was not exported from ProteomeDiscoverer ?")
} else chFi <- try(file.exists(file.path(path, suplAnnotFile)), silent=TRUE)
if(inherits(chFi, "try-error") & silent) {chFi <- FALSE; message(fxNa,"Meta-data: Failed to see file '",suplAnnotFile[1]," ! (check if file exists or rights to read directory ?)")}
if(debug) {message(fxNa,"rSM1pd2") }
## main reading of PD sample meta-data
if(chFi) summaryD <- try(utils::read.delim(file.path(path, suplAnnotFile[1]), stringsAsFactors=FALSE), silent=TRUE)
if(inherits(summaryD, "try-error")) {summaryD <- NULL; if(!silent) message(fxNa,"Meta-data: Failed to read '",suplAnnotFile[1],"' !")
} else {
syncColumns["annotBySoft"] <- FALSE
if(debug) message(fxNa,"ProteomeDiscoverer Meta-data successfully read '",suplAnnotFile[1])}
if(debug) {message(fxNa,"rSM1pd3"); rSM1pd3 <- list(summaryD=summaryD,parametersD=parametersD,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth, sdrf=sdrf,path=path,nSamp0=nSamp0,chSoft=chSoft,syncColumns=syncColumns)}
}
## Proline
## so far only for reading out of xslx
if("PL" %in% quantMeth && length(suplAnnotFile) >0) {
if(debug) {message(fxNa,"rSM0pl"); rSM0pl <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth)}
summaryD <- NULL
## need init filename given via suplAnnotFile
if(length(grep("\\.xlsx$", suplAnnotFile[1])) >0) { # won't enter here if suplAnnotFile==NULL
## Extract out of Excel
reqPa <- c("readxl")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) message(fxNa,"package( '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN") else {
sheets <- if(debug) try(readxl::excel_sheets(suplAnnotFile[1]), silent=TRUE) else suppressMessages(try(readxl::excel_sheets(suplAnnotFile[1]), silent=TRUE))
if(debug) {message(fxNa,"rSM2pl"); rSM2pl <- list()}
if(inherits(sheets, "try-error")) { message(fxNa,"Unable to read file '",suplAnnotFile,"' ! Returning NULL; check format & rights to read")
} else {
annShe <- c("Import and filters", "Search settings and infos") # sheets from xslx to try reading for sample/meta-information
annSh <- wrMisc::naOmit(match(annShe, sheets))
annSh <- grep("Import", if(length(annSh) <1) sheets else sheets[annSh])
if(length(annSh) >1) {
if(!silent) message(fxNa,"Multipe sheets containing 'Import' found, using 1st :",sheets[annSh[1]])
annSh <- annSh[1]
} else if(length(annSh) <1 && !silent) {
message(fxNa,"Note: NONE of ANNOTATION SHEETS (",wrMisc::pasteC(annShe),") in '",suplAnnotFile,"' FOUND ! Can't check Matching order of samples to sdrf-anotation !")
}
summaryD <- as.matrix(as.data.frame(if(debug) readxl::read_xlsx(suplAnnotFile[1], sheet=annSh, col_names=FALSE) else suppressMessages(readxl::read_xlsx(suplAnnotFile[1], sheet=annSh, col_names=FALSE))))
rownames(summaryD) <- summaryD[,1]
summaryD <- t(summaryD[,-1])
rownames(summaryD) <- 1:nrow(summaryD)
#syncColumns["annotBySoft"] <- FALSE
}
}
} else if(debug) message(fxNa,"Unknown type of sample/experiment annotation file ('",suplAnnotFile[1],"') for Proline, ignoring !!")
} # finish PL
## FragPipe
##
if("FP" %in% quantMeth && length(suplAnnotFile) >0) {
if(debug) { message(fxNa,"rSM1fp1"); rSM1fp1 <- list()}
## option 1 : suplAnnotFile has path (do not use default 'path'), use same path for default suplAnnotFile (if applicable)
## option 2 : sdrf has no path, use 'path' for sdrf & suplAnnotFile
## Aim : extract/build 'summaryD' allowing to match colnames of 'abund' to suplAnnotFile and/or sdrf
## filelist_ionquant.txt & fragpipe-files.fp-manifest
isDir <- if(is.character(suplAnnotFile)) utils::file_test("-d", suplAnnotFile[1]) else FALSE
if(isDir) { path <- suplAnnotFile[1]; suplAnnotFile <- TRUE}
if(isTRUE(suplAnnotFile)) { # automatic search for standard file-names ('summary.txt','parameters.txt') in same dir as main MaxQuant data
chFiNa <- c("doNotUseDoNotUse","doNotUseDoNotUse", "fragpipe-files.fp-manifest","fragpipe-files.fp-manifest.gz", "fragpipe.workflow","fragpipe.workflow.gz")
chFi <- file.exists(file.path(path, chFiNa))
if(debug) {message(fxNa,"rSM1fp2"); rSM1fp2 <- list(path=path,sdrf=sdrf,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,chFi=chFi,chFiNa=chFiNa )}
if(any(chFi, na.rm=TRUE)) { suplAnnotFile <- c(summary=chFiNa[1:4][which(chFi[1:4])[1]], parameters=chFiNa[5:6][which(chFi[5:6])[1]] )
if(all(names(suplAnnotFile)=="parameters")) suplAnnotFile <- c(NA, parameters=suplAnnotFile$parameters) # make length=2
chFi <- c(chFi[1] || chFi[2] || chFi[3] || chFi[4], chFi[5] || chFi[6]) # reduce to length=2 (1st for summary, 2nd for parameters)
} else suplAnnotFile <- NULL
} else { # specific/non-default file given (1st for summary, 2nd for parameters)
if(length(suplAnnotFile) >2) suplAnnotFile <- suplAnnotFile[1:2] # use max length=2
chFi <- rep(FALSE, 2)
if(!is.na(suplAnnotFile[1])) chFi[1] <- file.exists(file.path(path, suplAnnotFile[1]))
if(!is.na(suplAnnotFile[2])) chFi[2] <- file.exists(file.path(path, suplAnnotFile[2]))
}
if(debug) {message(fxNa,"rSM1fp3"); rSM1fp3 <- list()}
## main reading of FP sample meta-data
if(chFi[1]) summaryD <- try(utils::read.delim(file.path(path, suplAnnotFile[1]), header=FALSE, stringsAsFactors=FALSE), silent=TRUE)
if(chFi[2]) parametersD <- try(utils::read.delim(file.path(path, suplAnnotFile[2]), header=FALSE, stringsAsFactors=FALSE), silent=TRUE)
if(inherits(summaryD, "try-error")) { summaryD <- NULL; if(!silent) message(fxNa,"Meta-data: Failed to read '",suplAnnotFile[1],"' for getting additional information about experiment !")
} else if(!is.null(summaryD)) {
msg <- c("File '",suplAnnotFile[1],"' is NOT good annotation file ! Ignoring")
if(identical(summaryD[1,], c("flag","value"))) { warning(fxNa, msg); summaryD <- NULL}
if(sum(dim(summaryD) >1) <2) { warning(fxNa, msg); summaryD <- NULL}
if(length(summaryD) >0) {
colnames(summaryD) <- c("file","experiment","bioreplicate","dataType")[1:min(ncol(summaryD), 4)]
summaryD <- as.matrix(summaryD)
summaryD[,1] <- .corPathW(summaryD[,1])
}
#syncColumns["annotBySoft"] <- FALSE
if(debug) message(fxNa,"Successfully read sample annotation from '",suplAnnotFile[1],"'") }
if(inherits(parametersD, "try-error")) {if(!silent) message(fxNa,"Meta-data: Failed to read '",suplAnnotFile[2],"' !")
} else if(!is.null(parametersD)) {
parametersD <- sub("\\\\:",":", gsub("\\\\\\\\","/", as.character(as.matrix(parametersD))[-(2:3)]))
if(debug && chFi[2]) message(fxNa,"Successfully read ",quantMeth," parameters from '",suplAnnotFile[2],"'") }
if(debug) { message(fxNa,"rSM1fp4")}
}
## MassChroq
if("MC" %in% quantMeth && length(suplAnnotFile) >0) {
warning(fxNa,"Reading supplemental meta-data from MassChroq is currently not implemented") }
## FragPipe
if("FP" %in% quantMeth && length(suplAnnotFile) >0) {
warning(fxNa,"Reading supplemental meta-data from FragPipe is currently not implemented") }
## Dia-NN
if("NN" %in% quantMeth && length(suplAnnotFile) >0) {
warning(fxNa,"Reading supplemental meta-data from Dia-NN is currently not implemented") }
## OTHER software ? ..
if(!any(quantMeth %in% chSoft, !silent, na.rm=TRUE)) message(fxNa,"Note: No specific procedure has been implemented so far for gathering meta-data by the analysis-software/method '",quantMeth,"'")
} ## finished main reading of suplAnnotFile into summaryD
if(debug) { message(fxNa,"rSM2"); rSM2 <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,summaryD=summaryD,parametersD=parametersD,suplAnnotFile=suplAnnotFile,syncColumns=syncColumns) }
## 1.2 basic check of summaryD to quant data, extract supl info for sdrf
if(length(summaryD) >0) { ## more checks
if(length(abund) <1) message(fxNa,"Can't verify/correct names of annotation since content of 'abund' has was not given (ie NULL) or has no colnames") else {
if(!identical(ncol(abund), nrow(summaryD))) { summaryD <- NULL
if(!silent) message(fxNa,"Note : Number of columns of 'abund' does NOT FIT to number of samples in annotation-data !") }
}
if(length(dim(summaryD)) !=2) summaryD <- matrix(summaryD, ncol=1, dimnames=list(names(summaryD),NULL))
}
if(debug) { message(fxNa,"rSM3"); rSM3 <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,summaryD=summaryD,parametersD=parametersD,suplAnnotFile=suplAnnotFile,syncColumns=syncColumns) }
## continue evaluating summaryD to consistent format
if(length(summaryD) >0) { ## define setupSdSoft
## need to match colnames(abund) to (MQ:) $Raw.file or $Experiment .. need to find best partial match
MStype <- "FTMS" # used for extracting (more) sdrf info out of parametersSd
if("MQ" %in% quantMeth) { ## NOT IN SAME ORDER !!
useMQSuCol <- c("Raw.file","Experiment","Enzyme","Variable.modifications", "Fixed.modifications","Multi.modifications")
summaryD <- summaryD[,wrMisc::naOmit(match(useMQSuCol, colnames(summaryD)))] # cor 21oct22, more cols 7jun23
chSd <- length(abund) >0 && nrow(summaryD) == ncol(abund)
## normally colnames(abund) and summaryD should alread be in correct order
if(isTRUE(chSd)) {
if(!silent && length(abund) >0) if(nrow(summaryD) == ncol(abund)) { message(fxNa,"PROBLEM : Meta-data and abundance data do not match ! ",
"Number of samples from ",suplAnnotFile[1]," (",nrow(summaryD),") and from main data (",ncol(abund),") do NOT match !! .. ignoring") }
#if(debug) save(sdrf,abund,suplAnnotFile,quantMeth,summaryD,quantMeth,syncColumns, file="C:\\E\\projects\\TCAmethods\\wrProteoRamus\\rSM4mq.RData")
}
if(length(parametersD) >0) { ## create 'parametersSd' for sdrf
parametersCol <- paste0(c("MS/MS tol.","MS/MS deisotoping tolerance","MS/MS deisotoping tolerance unit")," (",MStype,")") # also "Top MS/MS peaks per Da interval." ?
parametersCol <- c("Modifications included in protein quantification","Match between runs","Fasta file", parametersCol)
parametersSd <- if(parametersCol[4] %in% parametersD[,1]) parametersD[match(parametersCol[4],parametersD[,1]) ,2] else NA # eg '20 ppm'
if(!is.na(parametersSd)) if(grepl("ppm$", parametersSd)) parametersSd <- paste0(1/as.numeric(sub(" ppm$","",parametersSd))," Da")
fragMassT <- if(all(parametersCol[5:6] %in% parametersD[,1])) paste0( parametersD[match(parametersCol[5:6],parametersD[,1]) ,2], collapse=" ") else NA
supPar <- parametersD[match(c("Modifications included in protein quantification","Match between runs"), parametersD[,1]), 2]
parametersSd <- c(precMassTol=parametersSd, fragMassTol=fragMassT, modifs=supPar[1], matchBetwRun=toupper(supPar[2]) )
} else parametersSd <- c(precMassTol=NA, fragMassTol=NA)
parametersSd <- c(assayName="run1", label="NT=label free sample (check if correct)", instrum=NA, parametersSd, cleavAgent=paste0("NT=",summaryD[2,"Enzyme"]) )
## add PTM modifs ...
if(debug) { message(fxNa," .. rSM4mq"); rSM4mq <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns,parametersSd=parametersSd,MStype=MStype)}
}
if("PD" %in% quantMeth) { useCo <- c("Input.Files.","File.ID","File.Name","Instrument.Name") # no suitable 2nd column ...
useCo <- wrMisc::naOmit(match(useCo, colnames(summaryD)))
summaryD <- if(length(useCo) >1) summaryD[,useCo] else matrix(summaryD, ncol=1, dimnames=list(rownames(summaryD), colnames(summaryD)[useCo]))
## presume that filenames (from summaryD) are in same order as abund, then trim to file-names (if all in same path)
if(debug) { message(fxNa,"rSM4pd"); rSM4pd <- list(sdrf=sdrf,useCo=useCo,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,chFiNa=chFiNa) }
colNa <- wrMisc::trimRedundText(gsub("\\\\","/",as.character(summaryD[,"File.Name"])), silent=silent, debug=debug, callFrom=fxNa)
if(length(colNa) < ncol(abund)) warning(fxNa,"Trouble ahead : Sample annotation data from ProteomeDiscoverer has FEWER samples than data read !") else {
if(length(colNa) > ncol(abund)) { message(fxNa,"note : Sample annotation data from ProteomeDiscoverer has MORE samples than data read, using only first (might be incorrect)")
colNa <- colNa[1:ncol(abund)]
summaryD <- summaryD[1:ncol(abund),]
} }
colnames(abund) <- colNa # no possibility to match colnames at this point
summaryD <- cbind(summaryD, filePath= summaryD[,"File.Name"]) # copy filename+path first to new column
summaryD[,"File.Name"] <- basename(.corPathW(summaryD[,"File.Name"])) # correct to filename only
syncColumns["annotBySoft"] <- TRUE
if(debug) { message(fxNa," .. rSM4pd")}
}
if("PL" %in% quantMeth) { ## order OK ?
chSd <- length(abund) >0 && nrow(summaryD) == ncol(abund)
## normally colnames(abund) and summaryD should alread be in correct order
if(chSd) {
# still need to develope extra verification ?
chCol <- match(c("result_file_name" ,"quant_channel_name","import_params"), colnames(summaryD))
if(debug) { message(fxNa,"rSM4pl"); rSM4pl <- list(sdrf=sdrf,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD)
if(all(is.na(chCol))) summaryD <- NULL else {
parametersD <- summaryD[1, 3:ncol(summaryD)] # how to integrate this later ??
summaryD <- summaryD[, chCol]
summaryD[,1] <- sub("\\.mzDB\\.t\\.xml", "", summaryD[,1] ) # remove Proline spefic file-format extensons
chFiNa <- colnames(summaryD) %in% "result_file_name"
if(any(chFiNa)) colnames(summaryD)[which(chFiNa)] <- "File.Name" # this column should be called 'File.Name'
summaryD <- as.data.frame(summaryD)
} # adjust to original raw names
} else {
if(!silent && nrow(summaryD) != ncol(abund)) message(fxNa,"PROBLEM : Invalid meta-data ! ", "Number of samples from ",
suplAnnotFile[1]," (",nrow(summaryD),") and from main data (",ncol(abund),") do NOT match !! .. ignoring") }
}
syncColumns["annotBySoft"] <- TRUE
}
if("FP" %in% quantMeth) { ## NOT IN SAME ORDER !!
mat1 <- match(c("file","experiment"), colnames(summaryD))
if(all(is.na(mat1))) { message(fxNa,"UNABLE to interpret content of ",suplAnnotFile[1]); summaryD <- NULL
} else {
summaryD <- cbind(path=dirname(summaryD[,mat1[1]]), Raw.file= basename(summaryD[,mat1[2]]), Experiment=summaryD[,mat1[2]], trimExp=NA)
summaryD[,4] <- gsub("_+$|-+$|\\.+$| +$|","", sub("[[:digit:]]+$","", wrMisc::trimRedundText(summaryD[,3], side="right", callFrom=fxNa, silent=silent))) # remove tailing numbers (and tailing redundant text to get to numbers)
chSd <- length(abund) >0 && nrow(summaryD) == ncol(abund)
if(length(chSd) <1) chSd <- FALSE
## normally colnames(abund) and summaryD should alread be in same/correct order
if(!chSd) {
if(!silent && length(abund) >0) if(nrow(summaryD) == ncol(abund)) { message(fxNa,"PROBLEM : meta-data and abundance data do not match ! ",
"Number of samples from ",suplAnnotFile[1]," (",nrow(summaryD),") and from main data (",ncol(abund),") do NOT match !! .. ignoring") }
#if(debug) save(sdrf,abund,suplAnnotFile,quantMeth,summaryD,quantMeth, file="C:\\E\\projects\\TCAmethods\\wrProteoRamus\\rSM4mq.RData")
}
}
syncColumns["annotBySoft"] <- TRUE
if(debug) { message(fxNa," .. rSM4mq"); rSM4fp <- list()}
}
## other software ? ...
if(debug) { message(fxNa,"rSM4d"); rSM4d <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## 1.3 TRY CHECKING/ADJUSTING ORDER of summaryD
if(length(abund) >0 && length(summaryD) >0) {
## some software specific options otherwise check if filenames can be matched to colnames ?
## PD not much possible since colnames ".F1.Sample",".F2.Sample",".F3.Sample",...
## most other software has summaryD in same order as abund
if("MQ" %in% quantMeth) { # colnames of abund not necessarly found in summaryD
summaryD <- wrMisc::matchMatrixLinesToRef(mat=summaryD, ref=colnames(abund), inclInfo=TRUE, silent=TRUE, debug=debug, callFrom=fxNa)
syncColumns["annotBySoft"] <- length(summaryD$newOrder) >0
summaryD <- summaryD$mat }
if(any(c("PL","FP") %in% quantMeth, na.rm=TRUE)) {
summaryD <- wrMisc::matchMatrixLinesToRef(mat=summaryD, ref=colnames(abund), inclInfo=TRUE, silent=TRUE, debug=debug, callFrom=fxNa)
syncColumns["annotBySoft"] <- length(summaryD$newOrder) >0
summaryD <- summaryD$mat }
}
## 1.4 replicateStructure
setupSdSoft <- wrMisc::replicateStructure(summaryD, silent=silent, debug=debug, callFrom=fxNa)
if(debug) { message(fxNa,"rSM4e"); rSM4e <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## so far no direct information about groups (all filenames are different), need to try to find out (remove enumerators)
if(length(abund) >0) {
grpA <- wrMisc::trimRedundText(txt=colnames(abund), spaceElim=TRUE, silent=silent, debug=debug, callFrom=fxNa) # 26oct22
colNaGrp <- wrMisc::rmEnumeratorName(grpA, incl=c("anyCase","trim0","rmEnum"), sepEnum=c(" ","-","_"), nameEnum=c("Number","No","#","","Replicate","Sample"), silent=silent, debug=debug, callFrom=fxNa)
colNaGrPref <- TRUE ## preferential to colnames for searching groups
if(any(duplicated(colNaGrp)) && colNaGrPref) { # colnames may be used for designing groups
grp <- .adjPat(colNaGrp)
names(grp) <- colNaGrp
setupSdSoft$lev <- grp
} else {
if(all(setupSdSoft$lev ==1:ncol(abund))) {
## note : .adjTrimPat() does NOT allow keeping names of levels
grp2 <- if(ncol(summaryD) >1) apply(summaryD, 2, .adjTrimPat) else as.matrix(.adjTrimPat(summaryD))
if(ncol(summaryD) >1) { grp3 <- apply(grp2, 2, function(x) length(unique(x)))
if(any(grp3 < ncol(abund))) {
if(length(grp3) >0) { useCol <- if(isTRUE(groupPref$lowNumberOfGroups)) which.min(grp3) else which(grp3 ==stats::median(grp3))[1]
setupSdSoft$lev <- grp2[,useCol]
names(setupSdSoft$lev) <- wrMisc::rmEnumeratorName(wrMisc::trimRedundText(txt=summaryD[,useCol], spaceElim=TRUE, silent=silent, debug=debug, callFrom=fxNa),
incl=c("anyCase","trim0","rmEnum"), sepEnum=c(" ","-","_"), nameEnum=c("Number","No","#","","Replicate","Sample"), silent=silent, debug=debug, callFrom=fxNa)
} }
} else {
names(grp2) <- wrMisc::rmEnumeratorName(wrMisc::trimRedundText(txt=as.character(summaryD), spaceElim=TRUE, silent=silent, debug=debug, callFrom=fxNa),
incl=c("anyCase","trim0","rmEnum"), sepEnum=c(" ","-","_"), nameEnum=c("Number","No","#","","Replicate","Sample"), silent=silent, debug=debug, callFrom=fxNa)
grp <- setupSdSoft$lev <- grp2
}
}
}
if(debug) { message(fxNa,"rSM4f"); rSM4f <- list() }
summaryD <- as.data.frame(cbind(summaryD, grp=names(setupSdSoft$lev), grpInd=setupSdSoft$lev)) # add presumed grouping to summaryD
} else { if(!silent) message(fxNa,"Note : Abundance data are absent, adjust order of annotation to abundance data")}
}
if(debug) { message(fxNa,"rSM5"); rSM5 <- list(sdrf=sdrf,abund=abund,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,setupSdSoft=setupSdSoft) }
### 2 READ SDRF annotation & pick groups of replicates; has priority over grouping based on summary.txt
if(length(sdrf) >0) {
## check if 'functional' sdrf (ie list) is provided -> use as is
if(is.list(sdrf) && all(c("sdrfDat","col","lev") %in% names(sdrf), na.rm=TRUE)) {
if(debug) message(fxNa,"Custom setupSd povdided as sdrf")
sdrfDat <- sdrf$sdrfDat
} else {
## may be : character vector (length <3) => assume path or sdrf accession, 2nd as sdrf-column to use
## may be : (matrix or) data.frame to use as table to exploit
if(is.character(sdrf) && length(sdrf) <3 && length(dim(sdrf)) <2) {
## read sdrf from file or github
sdrfDat <- readSdrf(sdrf, silent=silent, debug=debug, callFrom=fxNa)
## check for priority columns
if(length(groupPref$sdrfColumn) ==1 && length(sdrf) <2) { ch1 <- groupPref$sdrfColumn %in% colnames(sdrfDat)
if(any(ch1)) sdrf[2] <- which(ch1)[1]
}
} else {
## user provided custom sample annotation object
if(length(dim(sdrf)) <2 && !silent) message(fxNa,"Note: 'sdrf' looks bizarre (trouble ahead ?), expecting either file, data.frame or complete list")
sdrfDat <- sdrf
sdrf <- "user provided custom object"}
}
if(debug) { message(fxNa,"rSM6 dim sdrfDat ",nrow(sdrfDat)," ",ncol(sdrfDat)); rSM6 <- list(sdrf=sdrf,sdrfDat=sdrfDat,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## 2.1 basic check (distinguish full $sampleSetup) form custom data.frame
if(length(sdrfDat) >0) {
syncColumns["sdrfDat"] <- FALSE # initialize
if(is.list(sdrfDat) && "sdrfDat" %in% names(sdrfDat)) { sdrfDat <- sdrfDat$sdrfDat
if("groups" %in% names(sdrfDat)) groupPref$groups <- sdrfDat$groups
if(debug) message(fxNa,"It seems a full $sampleSetup may have been given") }
if(length(dim(sdrfDat)) <2) sdrfDat <- as.matrix(sdrfDat)
if(length(abund) >0 && nrow(sdrfDat) != ncol(abund)) {
if(!silent) message(fxNa,"Note : Ignoring 'sdrf' : it does NOT have the expected number or rows (",nrow(sdrfDat)," given but ",ncol(abund)," expected !)")
sdrf <- sdrfDat <- NULL }}
if(debug) {message(fxNa,"rSM6a"); rSM6a <- list(sdrf=sdrf,sdrfDat=sdrfDat,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## 2.2 need to match lines (samples) of sdrf (setupDat) to summaryD and/or colnames of abund
if(length(sdrfDat) >0) {
if(length(summaryD) >0) { ## summaryD exist try matching by file-names
chFiNames <- c("File.Name","File","FileName","Raw.file") # search in summaryD
chFiNa <- chFiNames %in% colnames(summaryD)
if(debug) {message(fxNa,"rSM6a1") }
if(any(chFiNa, na.rm=TRUE) && "comment.file.uri." %in% colnames(sdrfDat)) {
## align by filenames
chFi <- match(sub("\\.zip$|\\.gz$","", basename(.corPathW(summaryD[,chFiNames[which(chFiNa)[1]]]))),
sub("\\.zip$|\\.gz$","", basename(.corPathW(sdrfDat[,"comment.file.uri."])))) # new order
if(any(is.na(chFi)) && any(grepl("\\.raw",sdrfDat[,"comment.file.uri."]), na.rm=TRUE)) {
sumDaFiNa <- sub("\\.raw","",sub("\\.zip$|\\.gz$","", basename(.corPathW(summaryD[,chFiNames[which(chFiNa)[1]]]))))
sdrfFiNa <- sub("\\.raw","",sub("\\.zip$|\\.gz$","", basename(.corPathW(sdrfDat[,"comment.file.uri."]))))
chFi <- match(sumDaFiNa, sdrfFiNa) # new order
if(any(is.na(chFi)) && "FP" %in% quantMeth) {
sumDaFiNa <- wrMisc::rmEnumeratorName(wrMisc::trimRedundText(txt=sumDaFiNa, spaceElim=TRUE, silent=silent, debug=debug, callFrom=fxNa), newSep="_", incl=c("anyCase","trim0"), silent=silent, debug=debug, callFrom=fxNa)
sdrfFiNa <- wrMisc::rmEnumeratorName(wrMisc::trimRedundText(txt=sdrfFiNa, spaceElim=TRUE, silent=silent, debug=debug, callFrom=fxNa), newSep="_", incl=c("anyCase","trim0"), silent=silent, debug=debug, callFrom=fxNa)
chFi <- match(sumDaFiNa, sdrfFiNa) # new order
if(debug) { message(fxNa,"rSM6aa dim sdrfDat ",nrow(sdrfDat)," ",ncol(sdrfDat))}
}
rmRaw <- TRUE
} else rmRaw <- FALSE
if(sum(is.na(chFi)) >0) warning(fxNa,"UNABLE to match all filenames from sdrf and ",basename(.corPathW(suplAnnotFile)),
" ! \n ++ BEWARE : Grouping of replicates may be incorrect !! \n") else {
if(!silent && rmRaw) message(fxNa,"Note : Some filenames contain '.raw', others do NOT; solved inconsistency ..")
# sdrfDat[chFi, c(10,22:24)]
sdrfDat <- sdrfDat[chFi,]
if(!silent) message(fxNa,"Successfully adjusted order of sdrf to content of ",basename(.corPathW(suplAnnotFile)))
}
syncColumns["sdrfDat"] <- TRUE
} else if(!silent) message(fxNa, if(debug) "rSM6a "," summaryD exists, but unable to find file-names")
if(debug) { message(fxNa,"rSM6a1 dim sdrfDat ",nrow(sdrfDat)," ",ncol(sdrfDat)); rSM6a1 <- list() }
} else { # no summaryD, try clanames of abund
if(length(abund) >0 && length(dim(abund)) >1) {
if(debug) { message(fxNa,"rSM6b dim sdrfDat ",nrow(sdrfDat)," ",ncol(sdrfDat)); rSM6b <- list(sdrf=sdrf,sdrfDat=sdrfDat,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## requires utils::packageVersion("wrMisc") > "1.11.1"
sdrfDaIni <- sdrfDat
sdrfDat <- wrMisc::matchMatrixLinesToRef(mat=sdrfDat, ref=colnames(abund), addRef=TRUE, silent=silent, debug=debug, callFrom=fxNa) # 2way-grep
## check matching ?
if(length(sdrfDat) <1) { ## failed to align - further trim names
if(debug) { message(fxNa,"Failed to align - further trim names rSM6a3 ")}
## now look for bad separator '.' before text and remove
colNaAbund <- colnames(abund)
ch1 <- grep("[[:digit:]]\\.[[:alpha:]]", colNaAbund)
if(any(ch1)) {
selLoc <- sapply(gregexpr("[[:digit:]]\\.[[:alpha:]]", colNaAbund[ch1]), function(x) x[[1]])
colNaAbund[ch1] <- paste0(substr(colNaAbund[ch1],1,selLoc), substring(colNaAbund[ch1], selLoc+2)) }
sdrfDat <- wrMisc::matchMatrixLinesToRef(mat=sdrfDaIni, ref=colNaAbund, addRef=TRUE, silent=silent, debug=debug, callFrom=fxNa) # 2way-grep
if(length(sdrfDat) <1) {
colNaEnum <- all(grepl("_[[:digit:]]+$", colNaAbund))
if(colNaEnum) { tm1 <- sub("_[[:digit:]]+$","", colNaAbund)
colNaAbund2 <- sub("\\..+","", substr(colNaAbund, 1, nchar(tm1)))
colNaAbund3 <- paste0(colNaAbund2,substring(colNaAbund, nchar(colNaAbund) -1),"$") # without repeated text after 1st '.'
sdrfDat <- wrMisc::matchMatrixLinesToRef(mat=sdrfDaIni, ref=colNaAbund3, addRef=TRUE, silent=silent, debug=debug, callFrom=fxNa) # 2way-grep
}
}
if(length(sdrfDat) <1 && !silent) message(fxNa,"PROBLEM : FAILED to align sdrf to actual colnames of data !!!")
}
rm(sdrfDaIni)
syncColumns["sdrfDat"] <- TRUE # really sure that synchronization successful ?
} else message(fxNa, if(debug) "Note : NO Additional information on filenames-order found, can't correct/adjust sdrf (ie sdrfDat) !! rSM6b")
}
}
if(debug) { message(fxNa,"rSM6c dim sdrfDat ",nrow(sdrfDat)," ",ncol(sdrfDat)); rSM6c <- list(sdrf=sdrf,sdrfDat=sdrfDat,abund=abund,uplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,parametersD=parametersD,syncColumns=syncColumns) }
## 2.3 ready to make setupSd
if(length(sdrfDat) >0) {
if(TRUE) {
if(length(sdrf) >1 ) { # sdrf as data.frame (or list) # && length(dim(sdrf)) >1
replStrOpt <- c("highest","lowest","min","max","median","combAll","combNonOrth")
## is it risky to search in 2nd value of sdrf ?
chCol <- grep(paste0(sdrf[2],"$"), sub("\\.$","", colnames(sdrfDat)))
if(length(chCol) >0) {
if(debug) message(fxNa,"Using column '",colnames(sdrfDat)[chCol[1]],"' for setupSd")
sdrf[2] <- chCol[1]
} else if(!any(replStrOpt %in% sdrf[2])) {
if(!silent) message(fxNa,"Invalid entry for 'sdrf[2]', resetting to default ('median')")
sdrf[2] <- "median"
}
setupSd <- try(wrMisc::replicateStructure(sdrfDat, method=if(length(sdrf) >1) sdrf[2], silent=silent, callFrom=fxNa, debug=debug), silent=TRUE)
if(inherits(setupSd, "try-error")) {message(fxNa,"Unable to figure out replicate Structure"); setupSd <- NULL; syncColumns["sdrfDat"] <- FALSE}
} else { # sdrf given as file-name (eg git)
combMeth <- if(length(groupPref$combMeth) ==1) groupPref$combMeth else "combNonOrth"
setupSd <- list(combNonOrth=try(wrMisc::replicateStructure(sdrfDat, method=combMeth, silent=silent, callFrom=fxNa, debug=debug)),
lowest=try(wrMisc::replicateStructure(sdrfDat, method="lowest", silent=silent, callFrom=fxNa, debug=debug)))
ch1 <- sapply(setupSd, inherits, "try-error")
if(all(ch1)) {message(fxNa,"UNABLE to understand replicate-structure from sdrf !!"); setupSd <- NULL; syncColumns["sdrfDat"] <- FALSE
} else if(any(ch1)) {setupSd <- setupSd[which(!ch1)]; message(fxNa,"REMOVING one attempt of understanding replicate-structure")}
if(debug) {message(fxNa,if(debug) "rSM6d ","length setupSd ", length(setupSd)); rSM6d <- list() }
## choose among multiple options for grouping (number of groups)
ch1 <- sapply(setupSd, function(x) length(x$lev[which(!duplicated(x$lev))]))
lowNumberOfGroups <- if(length(groupPref) >0 && is.list(groupPref)) isTRUE(groupPref$lowNumberOfGroups) && length(groupPref$combMeth) !=1 else TRUE
useSe <- if(any(ch1 ==1, na.rm=TRUE)) which(ch1 !=1) else if(isTRUE(lowNumberOfGroups)) which.min(ch1) else which.max(ch1)
if(!silent) message(fxNa,"Choosing model '",names(useSe),"' for evaluating replicate-structure (ie ",ch1[useSe[1]]," groups of samples)" )
setupSd <- setupSd[[useSe[1]]] # select appropriate model
if(debug) {message(fxNa,if(debug)"rSM6e ","length setupSd ", length(setupSd)); rSM6e <- list() }
}
if("setupSd" %in% names(setupSd)) {setupSd <- wrMisc::partUnlist(setupSd, callFrom=fxNa,debug=debug);
if(debug) message(fxNa,"rSM6e - not expecting list of list for setupSd ! .. correcting"); rSM6e <- list(setupSd=setupSd,sdrf=sdrf,sdrfDat=sdrfDat,abund=abund,uplAnnotFile=suplAnnotFile,summaryD=summaryD,parametersD=parametersD,setupSdSoft=setupSdSoft,quantMeth=quantMeth)}
## try to find usable names for setupSd$lev
## still need usable names for factor-levels
newLe <- as.character(match(setupSd$lev, unique(setupSd$lev)))
names(newLe) <- gsub("[[:space:]]|[[:punct:]]", "_", sdrfDat[,setupSd$col[1]]) # recuperate names out of sdrfDat
if(debug) message(fxNa, if(debug)"rSM6f ","Using as names for groups of replicates/levels : ",wrMisc::pasteC(utils::head(unique(names(newLe)), 5)))
setupSd[["lev"]] <- newLe
if("setupSd" %in% names(setupSd)) { setupSd <- wrMisc::partUnlist(setupSd, callFrom=fxNa,debug=debug); if(debug) message(fxNa," rSM6g - not expecting list of list for setupSd ! .. correcting")}
## NOTE : names of levels may not be very meaningful/optimal
## option (future) : search in file-names for similar pattern
if(debug) {message(fxNa,"rSM6h names setupSd : ", wrMisc::pasteC(names(setupSd))); rSM6h <- list() }
}
if(debug) {message(fxNa,"rSM6i names setupSd : ", wrMisc::pasteC(names(setupSd))); rSM6i <- list() }
if(!is.list(setupSd)) {setupSd <- as.list(setupSd); if(debug) message(fxNa,"rSM6f 'setupSd' should be list, but was NOT !!")}
if(!"sdrfDat" %in% names(setupSd)) setupSd$sdrfDat <- sdrfDat
## re-adjust numbers of levels
iniNa <- names(setupSd$lev)
newLev <- match(setupSd$lev, unique(setupSd$lev))
chNa1 <- paste0("Sample_", sort(unique(sub("^Sample_","", iniNa))))
chNa2 <- paste0("Group_", sort(unique(sub("^Group_","", iniNa))))
if(all(chNa1 %in% iniNa) || all(chNa2 %in% iniNa)) { names(newLev) <- paste0("Group_", newLev)
} else names(newLev) <- iniNa
setupSd$lev <- newLev
if(length(abund) >0) if(length(groupPref$groups) == ncol(abund)) { setupSd$lev <- setupSd$groups <- groupPref$groups
if(debug) message(fxNa,"Using custom groups (for extracted from full $sampleSetup) for setupSd$lev ")
}
setupSd$annotBySoft <- as.data.frame(summaryD)
setupSd$syncColumns <- syncColumns
} else { ## sdrf was given - but NOT conform : (no soft-generated sample annot available) try to match colnames of abund
if(debug) message(fxNa, if(debug) "rSM6j ","NO valid sdrf found")
## ie single source of info
if(length(summaryD) <1) { ## ie no summaryD
if(debug) message(fxNa, if(debug) "rSM6k ","NO valid sdrf and NO valid information (summaryD) from quant-software found")
} else { # ie summaryD is available
setupSd <- setupSdSoft
#message(fxNa,"Reading of sdrf was NOT successful and no summaryD => nothing can be done to mine experimental setup...")
}
}
if(debug) { message(fxNa,"rSM7 head of setupSd$lev : ",wrMisc::pasteC(utils::head(setupSd$lev))); rSM7 <- list(setupSd=setupSd,sdrf=sdrf,sdrfDat=sdrfDat,suplAnnotFile=suplAnnotFile,quantMeth=quantMeth,abund=abund,summaryD=summaryD,nSamp0=nSamp0)}
if(length(setupSd) >0) if(length(setupSd$lev) != nSamp0 && length(abund) >0) { ## keep this ? - redundant !
if(!silent) warning(fxNa, if(debug) "rSM7 ","Invalid information from sample meta-data or wrong experiment ! Number of samples from sdrf ",
" (",length(setupSd$lev),") and from experimental data (",ncol(abund),") don't match !")
setupSd <- NULL } else {
if(length(abund) <1 && !silent) message(fxNa,"Note: Order of lines in sdrf not ajusted since no valid 'abund' given...")
setupSd$level <- setupSd$lev
}
} else { setupSd <- setupSdSoft; setupSd$annotBySoft <- summaryD; setupSd$lev <- setupSd$lev }
## allow export of sdrf-draft
if(length(parametersSd) >0 && length(setupSd$sdrfDat) <1) {
setupSd$sdrfExport <- parametersSd
setupSd$summaryD <- summaryD
}
if(debug) { message(fxNa,"rSM8 head of setupSd$lev : ",wrMisc::pasteC(utils::head(setupSd$lev))); rSM8 <- list(setupSd=setupSd)}
}
## finished readSampleMetaData
setupSd }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readSampleMetaData.R
|
#' Read proteomics meta-data as sdrf file
#'
#' @description This function allows reading proteomics meta-data from sdrf file, as they are provided on https://github.com/bigbio/proteomics-sample-metadata.
#' A data.frame containing all annotation data will be returned. To stay conform with the (non-obligatory) recommendations, columnnames are shown as lower caps.
#'
#' @details The packages utils and wrMisc must be installed.
#' Please note that reading sdrf files (if not provided as local copy) will take a few seconds, depending on the responsiveness of github.
#'
#' @param fi (character) main input; may be full path or url to the file with meta-annotation. If a short project-name is given,
#' it will be searched based at the location of \code{urlPrefix}
#' @param chCol (character, length=1) optional checking of column-names
#' @param urlPrefix (character, length=1) prefix to add to search when no complete path or url is given on \code{fi}, defaults to proteomics-metadata-standard on github
#' @param silent (logical) suppress messages
#' @param callFrom (character) allows easier tracking of messages produced
#' @param debug (logical) display additional messages for debugging
#' @return This function returns the content of sdrf-file as data.frame (or \code{NULL} if the corresponding file was not found)
#' @seealso in \code{\link[utils]{read.table}}
#' @examples
#' ## This may take a few sconds...
#' sdrf001819 <- readSdrf("PXD001819")
#' str(sdrf001819)
#'
#'
#' @export
readSdrf <- function(fi, chCol="auto", urlPrefix="github", silent=FALSE, callFrom=NULL, debug=FALSE) {
## read proteomics meta-data as sdrf file
## see https://github.com/bigbio/proteomics-sample-metadata
## return data.frame, testing for
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readSdrf")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
datOK <- if(length(fi) <1) FALSE else TRUE
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) { datOK <- FALSE; if(!silent) message(fxNa,"package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")}
if(datOK) { if(length(fi) >1) fi <- fi[1] # make length=1
if(is.na(fi)) { datOK <- FALSE
if(!silent) message(fxNa," argument 'fi' is NA - nothing to do")} }
if(datOK) {
flexFileNa <- TRUE
chFi <- file.exists(fi)
if(!chFi && length(urlPrefix)==1 && !grepl("^https?://",fi)) {
if(debug) {message(fxNa," rs1"); rs1 <- list(fi=fi,chCol=chCol,urlPrefix=urlPrefix,datOK=datOK,chFi=chFi)}
if(identical(urlPrefix,"github")) urlPrefix <- "https://github.com/bigbio/proteomics-sample-metadata/blob/master/annotated-projects/"
if(grepl("/",fi)) { # if abs or relative path- do not adjust lower/upper case
if(flexFileNa) fi <- c(fi, file.path(dirname(fi),"sdrf.tsv"))
} else { # simple name, need to add folder of pxd project
projN <- toupper(sub("\\.sdrf\\.tsv$","",tolower(fi)))
fi <- paste0(projN,".sdrf.tsv") # set 'pxd'-part as upper case
if(isTRUE(flexFileNa)) fi <- c(fi,"sdrf.tsv")
fi <- paste0(urlPrefix,"/",projN,"/", fi )
}
if(debug) message(fxNa,"Could not find as file, content of 'fi' expanded to url ",wrMisc::pasteC(fi)," ")
} }
if(debug) {message(fxNa," rs1b"); rs1b <- list(fi=fi,chCol=chCol,urlPrefix=urlPrefix,datOK=datOK,chFi=chFi)}
## Main reading
if(datOK) {
if(debug && !grepl("\\.sdrf",fi[1])) message(fxNa,"Trouble ahead ? '",fi,"' does not contain '.sdrf' ...")
out <- suppressWarnings(try(utils::read.delim(wrMisc::gitDataUrl(fi[1]), sep='\t', header=TRUE, fill=TRUE), silent=!isTRUE(debug)))
if(inherits(out, "try-error")) {
if(any(grepl("pxd",fi))) {
if(debug) message(fxNa,"First try not successful; trying rather as '", gsub("pxd","PXD", fi[1])," (instead of '",fi[1],"')")
fi[1] <- gsub("pxd","PXD", fi[1])
out <- suppressWarnings(try(utils::read.delim(wrMisc::gitDataUrl(fi[1]), sep='\t', header=TRUE, fill=TRUE), silent=!isTRUE(debug)))}
if(inherits(out, "try-error") && length(fi) >1) {
if(debug) message(fxNa,"So far not successful; trying rather as '",fi[2],"')")
out <- suppressWarnings(try(utils::read.delim(wrMisc::gitDataUrl(fi[2]), sep='\t', header=TRUE, fill=TRUE), silent=!isTRUE(debug)))
if(!inherits(out, "try-error")) fi <- fi[2] }
}
if(debug) {message(fxNa," rs2"); rs2 <- list(fi=fi,out=out,chCol=chCol,urlPrefix=urlPrefix,datOK=datOK,chFi=chFi)}
if(inherits(out, "try-error")) { message(fxNa," FAILED reading '",fi[1],"' (possibly bad url/path ?)"); return(NULL)
} else {
fi2 <- sub("[[:print:]]+/","", sub("\\.sdrf\\.tsv","",fi)) # pxd part
if(debug) {message(fxNa," rs3"); rs3 <- list(fi=fi,out=out,fi2=fi2,chCol=chCol,urlPrefix=urlPrefix,datOK=datOK,chFi=chFi)}
if(any(length(dim(out)) !=2, dim(out) < 1, na.rm=TRUE)) { message(fxNa," data in bad format") # min 1 line, 1 col
} else {
## diagnostic for expected column-names
colnames(out) <- tolower(colnames(out))
if(any(sapply(c("auto","def","default"), identical, chCol), na.rm=TRUE)) chCol <- c("source.name", "assay.name",
"characteristics.biological.replicate.","characteristics.organism.", "comment.data.file.","comment.file.uri." )
if(length(chCol) >0) locCol <- match(chCol, colnames(out))
if(any(is.na(locCol)) && !silent) message(fxNa,"Data-Annotation ",fi2," Can't find column(s) ",wrMisc::pasteC(chCol[which(is.na(locCol))], quoteC="'"))
if(!silent) message(fxNa,"Successfully read ",ncol(out)," annotation columns for ",nrow(out)," samples")
return(out) } }
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readSdrf.R
|
#' Read annotation files from UCSC
#'
#' This function allows reading and importing genomic \href{https://genome.ucsc.edu/cgi-bin/hgTables}{UCSC-annotation} data.
#' Files can be read as default UCSC exprot or as GTF-format.
#' In the context of proteomics we noticed that sometimes UniProt tables from UCSC are hard to match to identifiers from UniProt Fasta-files, ie many protein-identifiers won't match.
#' For this reason additional support is given to reading 'Genes and Gene Predictions': Since this table does not include protein-identifiers, a non-redundant list of ENSxxx transcript identifiers
#' can be exprted as file for an additional stop of conversion, eg using a batch conversion tool at the site of \href{https://www.uniprot.org/id-mapping/}{UniProt}.
#' The initial genomic annotation can then be complemented using \code{\link{readUniProtExport}}.
#' Using this more elaborate route, we found higher coverage when trying to add genomic annotation to protein-identifiers to proteomics results with annnotation based on an initial Fasta-file.
#'
#' @param fiName (character) name (and path) of file to read
#' @param exportFileNa (character) optional file-name to be exported, if \code{NULL} no file will be written
#' @param gtf (logical) specify if file \code{fiName} in gtf-format (see \href{https://genome.ucsc.edu/cgi-bin/hgTables}{UCSC})
#' @param simplifyCols (character) optional list of column-names to be used for simplification (if 6 column-headers are given) : the 1st value will be used to identify the column
#' used as refence to summarize all lines with this ID; for the 2nd (typically chromosome names) will be taken a representative value,
#' for the 3rd (typically gene start site) will be taken the minimum,
#' for the 4th (typically gene end site) will be taken the maximum, for the 5th and 6th a representative values will be reported;
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a matrix, optionally the file 'exportFileNa' may be written
#' @seealso \code{\link{readUniProtExport}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' gtfFi <- file.path(path1, "UCSC_hg38_chr11extr.gtf.gz")
#' # here we'll write the file for UniProt conversion to tempdir() to keep things tidy
#' expFi <- file.path(tempdir(), "deUcscForUniProt2.txt")
#' UcscAnnot1 <- readUCSCtable(gtfFi, exportFileNa=expFi)
#'
#' ## results can be further combined with readUniProtExport()
#' deUniProtFi <- file.path(path1, "deUniProt_hg38chr11extr.tab")
#' deUniPr1 <- readUniProtExport(deUniProtFi, deUcsc=UcscAnnot1,
#' targRegion="chr11:1-135,086,622")
#' deUniPr1[1:5,-5]
#' @export
readUCSCtable <- function(fiName, exportFileNa=NULL, gtf=NA, simplifyCols=c("gene_id","chr","start","end","strand","frame"), silent=FALSE, debug=FALSE, callFrom=NULL) {
## read & parse ensGene.gtf type file from UCSC, (optional) export to file for batch conversion on UniProt, return annotation (matrix)
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readUCSCtable")
if(length(fiName) >1) fiName <- fiName[1] else {if(length(fiName) < 1) stop(" argument 'fiName' seems empty")}
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
chFi <- file.exists(fiName)
if(!chFi) stop(" File '",fiName,"' not found ! (maybe you are not pointing to the correct direcory ?)")
chPa1 <- try(find.package("utils"), silent=TRUE)
chPa2 <- try(find.package("R.utils"), silent=TRUE)
if(inherits(chPa1, "try-error")) stop(fxNa,"Package 'utils' not found ! Please install first ..")
## check/find out if file is gtf
if(is.na(gtf)) { # try to guess/check if gtf=TRUE
gtfColNa <- c("#bin name","chrom","strand","txStart","txEnd","cdsStart","cdsEnd","exonCount","exonStarts","exonEnds","score","name2",
"cdsStartStat","cdsEndStat","exonFrame","#chrom","chromStart","chromEnd","score","thickStart","thickEnd","match","misMatch","repMatch",
"nCount","acc","status","ensGene","ensTrans")
tmp <- try(utils::read.table(fiName, header=FALSE, stringsAsFactors=FALSE, nrows=7))
if(inherits(tmp, "try-error")) {
gtf <- length(grep("\\.gtf$|\\.gtf\\.gz$", tolower(basename(fiName)))) >0
if(!silent) message(fxNa," Quick guess by file-name if compressed file '",basename(fiName),"' is gtf : ",gtf)
} else {
gtf <- sum(gtfColNa %in% tmp[1,], na.rm=TRUE) <7
if(!silent) message(fxNa," File '",basename(fiName),"' is gtf : ",gtf)
}
}
if(debug) message(fxNa,"rUC1")
if(inherits(chPa2, "try-error")) { # R.utils not available for reliable test if file is gz zipped
if(!silent) message(fxNa," Package 'R.utils' not available, assuming by file-extension if file is gz-compressed")
chGz <- length(grep("\\.gz$", tolower(fiName))) >0
} else chGz <- R.utils::isGzipped(fiName)
## main reading
ensG1 <- try(utils::read.table(fiName, header=!gtf, stringsAsFactors=FALSE))
if(inherits(chFi, "try-error")) {
## try other format ? ... use wrMisc::readVarColumns()
errMsg <- "' - please check format or see if file is readable"
if(chGz) { # need to decompress first : copy to tempdir, uncompress, read
if(inherits(chPa2, "try-error")) stop(fxNa," Package 'R.utils' for decompressing .gz not found ! Please install first or decompress file ",fiName," manually ..")
file.copy(fiName, file.path(tempdir(),basename(fiName)))
## decompress
tmp <- try(R.utils::gunzip(file.path(tempdir(),basename(fiName))), silent=TRUE)
if(inherits(tmp, "try-error")) stop(" Failed to decompress file ", fiName)
## read
if(!silent) message(fxNa," try reading decompressed file using readVarColumns() ...")
ensG1 <- try(wrMisc::readVarColumns(file.path(tempdir(),basename(fiName)), header=!gtf, silent=silent, callFrom=fxNa, callFrom=fxNa), silent=TRUE)
rmFi <- file.path(tempdir(), c(basename(fiName), sub("\\.gz$","",basename(fiName))))
sapply(rmFi,function(x) if(file.exists(x)) file.remove(x)) # clean up files
} else ensG1 <- try(wrMisc::readVarColumns(fiName, header=!gtf, silent=silent, callFrom=fxNa), silent=TRUE)
if(inherits(ensG1, "try-error")) stop(" Could not succed to read file '",basename(fiName),errMsg)
}
## correct colnames, export ENSG if available
if(gtf) {
## gtf-format, add standard colnames
colnames(ensG1) <- c("chr","source","type","start","end","score","strand","frame","features")[1:ncol(ensG1)]
## now split last column to gene_id and transcript_id
chTId <- base::grep("; transcript_id [[:alpha:]]+[[:digit:]]+" , as.character(ensG1[1:30,9]), fixed=FALSE, perl=FALSE)
chGId <- base::grep("^gene_id [[:alpha:]]+[[:digit:]]+", as.character(ensG1[1:30,9]), fixed=FALSE, perl=FALSE)
chId <- base::grep("transcript_id [[:punct:]]{0,1}ENS[[:upper:]]+[[:digit:]][[:print:]]+", as.character(ensG1[1:30,9]), fixed=FALSE, perl=FALSE)
if(length(chTId) >0 & length(chGId) >0) {
## need to split column
newC <- matrix(unlist( strsplit( sub("^gene_id ","",as.character(ensG1[,9]), fixed=FALSE, perl=FALSE),"; transcript_id ",
fixed=FALSE, perl=FALSE)), byrow=TRUE, ncol=2, dimnames=list(NULL,c("gene_id","transcript_id")))
newC <- sub("; $","", newC)
ensG1 <- cbind(ensG1[,-9], newC)
}
}
## check for column 'gene_id' (first of simplifyCols)
chG <- colnames(ensG1) %in% simplifyCols[1]
if(!any(chG)) {
## try to use the columns wo colnames as 'gene_id'
NAcol <- colnames(ensG1) %in% "NA" | is.na(colnames(ensG1))
if(any(NAcol)) { chGeId <- which.max(apply(utils::head(ensG1), 2, function(x) sum(x==simplifyCols[1])))
if(length(chGeId) >0 && chGeId[1] +1 %in% which(NAcol)) colnames(ensG1)[chGeId[1] +1] <- simplifyCols[1] else {
if(!silent) message(fxNa," failed to locate column to use as '",simplifyCols[1],"'")}
} }
## option to summarize by first column of 'simplifyCols'
if(length(simplifyCols) >5) { simplifyCols <- simplifyCols[which(simplifyCols %in% colnames(ensG1))]
if(length(simplifyCols) <6 && !silent) message(fxNa," Cannot find sufficient column-names given in argument 'simplifyCols', ignoring ..") }
if(length(simplifyCols) >5) {
iniDim <- dim(ensG1)
ensG1 <- matrix(unlist(by(ensG1[,simplifyCols[1:6]], ensG1[,simplifyCols[1]],
function(x) { if(length(dim(x)) <2) x <- matrix(x, ncol=6, dimnames=list(NULL,simplifyCols[1:6]))
c(x[1,1], x[1,2], min(x[,3],na.rm=TRUE), max(x[,4],na.rm=TRUE), x[1,5], x[1,6])} )),
byrow=TRUE, ncol=6, dimnames=list(NULL,simplifyCols))
if(!silent) message(fxNa," simplifed from ",iniDim[1]," to ",nrow(ensG1)," non-redundant ",simplifyCols[1])
## export ENSRNOT for onversion at UniProt site
if(length(exportFileNa) >0) { # also need to remove ENST-version-tags (since UniProt won't recognize Ensemble gene IDs with version tags)
exportFileNa <- gsub("\\\\", "/", exportFileNa) #"
forFile <- unique(sub("\\.[[:digit:]]+$", "", ensG1[,"gene_id"], fixed=FALSE, perl=FALSE))
msg <- c("' for conversion on https://www.uniprot.org/id-mapping/") # was "https://www.uniprot.org/id-mapping/"
if(!silent) message(fxNa," Write to file : ",paste(utils::head(forFile,4),collapse=", ")," ...")
if(!silent) if(file.exists(exportFileNa[1])) message(fxNa," Beware, file '",exportFileNa[1],"' will be overwritten !") else message(fxNa,
" Exporting file '",exportFileNa,msg[1]) # export to file for batch conversion on UniProt
tmp <- try(cat(forFile, file=exportFileNa[1], sep="\n"), silent=TRUE)
if("try-error" %in% class(tmp)) warning(fxNa," Beware: Did not succed to write results to file")}
}
##
ensG1 }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readUCSCtable.R
|
#' Read protein annotation as exported from UniProt batch-conversion
#'
#' This function allows reading and importing protein-ID conversion results from \href{https://www.uniprot.org/id-mapping/}{UniProt}.
#' To do so, first copy/paste your query IDs into \href{https://www.uniprot.org/id-mapping/}{UniProt} 'Retrieve/ID mapping' field called '1. Provide your identifiers' (or upload as file), verify '2. Select options'.
#' In a typical case of 'enst000xxx' IDs you may leave default settings, ie 'Ensemble Transcript' as input and 'UniProt KB' as output. Then, 'Submit' your search and retreive results via
#' 'Download', you need to specify a 'Tab-separated' format ! If you download as 'Compressed' you need to decompress the .gz file before running the function \code{readUCSCtable}
#' In addition, a file with UCSC annotation (Ensrnot accessions and chromosomic locations, obtained using \code{\link{readUCSCtable}}) can be integrated.
#' @details
#' In a typicall use case, first chromosomic location annotation is extracted from UCSC for the species of interest and imported to R using \code{\link{readUCSCtable}} .
#' However, the tables provided by UCSC don't contain Uniprot IDs. Thus, an additional (batch-)conversion step needs to get added.
#' For this reason \code{\link{readUCSCtable}} allows writing a file with Ensemble transcript IDs which can be converted tu UniProt IDs at the site of \href{https://www.uniprot.org/id-mapping/}{UniProt}.
#' Then, UniProt annotation (downloaded as tab-separated) can be imported and combined with the genomic annotation using this function.
#' @param UniProtFileNa (character) name (and path) of file exported from Uniprot (tabulated text file inlcuding headers)
#' @param deUcsc (data.frame) object produced by \code{readUCSCtable} to be combined with data from \code{UniProtFileNa}
#' @param targRegion (character or list) optional marking of chromosomal locations to be part of a given chromosomal target region,
#' may be given as character like \code{chr11:1-135,086,622} or as \code{list} with a first component characterizing the chromosome and a integer-vector with start- and end- sites
#' @param useUniPrCol (character) optional declaration which colums from UniProt exported file should be used/imported (default 'EnsID','Entry','Entry.name','Status','Protein.names','Gene.names','Length').
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a data.frame (with columns $EnsID, $Entry, $Entry.name, $Status, $Protein.names, $Gene.names, $Length; if \code{deUcsc} is integrated plus: $chr, $type, $start, $end, $score, $strand, $Ensrnot, $avPos)
#' @seealso \code{\link{readUCSCtable}}
#' @examples
#' path1 <- system.file("extdata",package="wrProteo")
#' deUniProtFi <- file.path(path1,"deUniProt_hg38chr11extr.tab")
#' deUniPr1a <- readUniProtExport(deUniProtFi)
#' str(deUniPr1a)
#'
#' ## Workflow starting with UCSC annotation (gtf) files :
#' gtfFi <- file.path(path1,"UCSC_hg38_chr11extr.gtf.gz")
#' UcscAnnot1 <- readUCSCtable(gtfFi)
#' ## Results of conversion at UniProt are already available (file "deUniProt_hg38chr11extr.tab")
#' myTargRegion <- list("chr1", pos=c(198110001,198570000))
#' myTargRegion2 <-"chr11:1-135,086,622" # works equally well
#' deUniPr1 <- readUniProtExport(deUniProtFi,deUcsc=UcscAnnot1,
#' targRegion=myTargRegion)
#' ## Now UniProt IDs and genomic locations are both available :
#' str(deUniPr1)
#' @export
readUniProtExport <- function(UniProtFileNa, deUcsc=NULL, targRegion=NULL, useUniPrCol=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read annotation exported from https://www.uniprot.org/id-mapping/ (was https://www.uniprot.org/uploadlists/) upload Ensemble Transcript => UniprotKB => export
## targRegion : list('chr1',pos=c(198110001,198570000)) or 'chr11:1-135,086,622'
fxNa <- wrMisc::.composeCallName(callFrom,newNa="readUniProtExport")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(length(UniProtFileNa) >1) UniProtFileNa <- UniProtFileNa[1] else {if(length(UniProtFileNa) < 1) stop(" argument 'UniProtFileNa' seems empty")}
chFi <- file.exists(UniProtFileNa)
if(!chFi) stop(" file '",UniProtFileNa,"' not found !")
chExt <- length(grep("\\.gz$", UniProtFileNa, fixed=FALSE, perl=FALSE)) >0
chPa <- try(find.package("utils"),silent=TRUE)
if(inherits(chPa, "try-error")) stop("Package 'utils' not found ! Please install first")
## main
deUniProt <- try(utils::read.delim(UniProtFileNa,stringsAsFactors=FALSE), silent=TRUE)
errMsg1 <- " seems not to be in UniProt 'tab-separated' format (does not contain sufficent number of columns) !"
if(inherits(deUniProt, "try-error")) {
deUniProt <- try(wrMisc::readVarColumns(if(chExt) unz(UniProtFileNa) else UniProtFileNa,callFrom=fxNa), silent=TRUE)
if(inherits(deUniProt, "try-error")) stop("Can't read file '",UniProtFileNa,"' - please check format !") else {
if(!silent) message(fxNa,"Managed to read file using readVarColumns()") }
if(ncol(deUniProt) <9) stop("file ",UniProtFileNa,errMsg1)
colnames(deUniProt)[1:9] <- c("EnsTraID","xx","UniprotID",colnames(deUniProt)[c(2:7)]) # initial colnames by readVarColumns are shifted
}
if(ncol(deUniProt) <7) stop("file ",UniProtFileNa,errMsg1) # check if (in)sufficient numer of columns
if(nrow(deUniProt) <2 && !silent) message(fxNa," CAUTION, file '",UniProtFileNa,"' contains only ",nrow(deUniProt)," lines !")
## correct colnames
chCol <- c(grep("yourlist.",colnames(deUniProt)[1]) >0, grep("isomap.",colnames(deUniProt)[2]) >0, "Entry" %in% colnames(deUniProt))
if(chCol[1]) colnames(deUniProt)[1] <- "EnsTraID"
if(chCol[2]) colnames(deUniProt)[2] <- "xx" # this column contains almost no information
colnames(deUniProt)[3] <- "UniProtID"
## combine with data initially/previously read from Ucsc
multID <- NULL
colnames(deUniProt) <- sub(" ",".",colnames(deUniProt))
if(length(useUniPrCol) <1) useUniPrCol <- c("EnsTraID","UniProtID","Entry.name","Status","Protein.names","Gene.names","Length")
useUniPrCo <- wrMisc::extrColsDeX(deUniProt, useUniPrCol, doExtractCols=FALSE, callFrom=fxNa, silent=silent)
## treat multi-Ensemble entries : need to replicate lines of table for multiple concatenated (eg ENSRNOT00000031808,ENSRNOT00000093745)
splitExtendConcat <- function(mat,useCol=1,sep=",",sep2="[[:digit:]],[[:alpha:]]+"){
## extend matrix or data.frame by additional lines if column 'useCol' contains multiple concatenated terms (content of other columns will be duplicated)
## 'sep' used with strsplit() and grep() to identify lines and split, also used to construct (generic) term for keeping just first
## 'sep2' optional custom pattern used with grep() to identify lines; will be used instead of 'generic' sep to identify entries to split lateron
## main
chMult <- grep(if(length(sep2) >0) sep2 else sep, mat[,useCol], fixed=FALSE, perl=FALSE)
if(length(chMult) >0) {
##
spl1 <- strsplit(mat[chMult,useCol],sep, fixed=FALSE, perl=FALSE)
spl2 <- unlist(lapply(spl1, function(x) x[-1]), use.names=FALSE)
toLine <- rep(chMult, sapply(spl1,length) -1)
mat[,useCol] <- sub(paste(sep,"[[:print:]]*$",sep=""),"",mat[,useCol], fixed=FALSE, perl=FALSE)
mat2 <- cbind(spl2,mat[c(toLine),-1])
colnames(mat2)[1] <- colnames(mat)[1]
mat <- rbind(mat,mat2)
}
mat }
deUniProt <- splitExtendConcat(deUniProt, sep=",", sep2="[[:digit:]],[[:upper:]]+")
if(length(deUcsc) >0) {
chGeneId <- which(colnames(deUcsc) =="gene_id")
if(length(chGeneId) <1) stop("Invalid file-content: The file '",UniProtFileNa,"' does not conatain a column 'gene_id' ! Please check the input file")
deUcsc[,"gene_id"] <- sub("\\.[[:digit:]]+$","",deUcsc[,"gene_id"])
useUcCol <- wrMisc::naOmit(match(c("gene_id","chr","start","end","strand","frame"),colnames(deUcsc)))
deUcsc <- wrMisc::convMatr2df(deUcsc[,useUcCol], addIniNa=FALSE, callFrom=fxNa,silent=silent)
matchUniprInUcsc <- match(deUniProt[,1], deUcsc[,"gene_id"])
if(sum(!is.na(matchUniprInUcsc)) <4) {
if(!silent) message(fxNa," low yield matching ",wrMisc::pasteC(deUniProt[1:3,1],quoteC="'")," and ",
wrMisc::pasteC(deUcsc[1:3,"gene_id"],quoteC="'"), " convert all to lower case and remove version numbers ('xxx.2') for better matching")
matchUniprInUcsc <- match(sub("\\.[[:digit:]]+$","", tolower(deUniProt[,1])), sub("\\.[[:digit:]]+$","", tolower(deUcsc[,"gene_id"])))
if(sum(!is.na(matchUniprInUcsc)) <4) warning(fxNa," Matching failed : Very few or no matches between UniProtFile and deUcsc !")}
if(!silent) message(fxNa," intergrating genomic information for ",length(matchUniprInUcsc)," entries (",sum(is.na(matchUniprInUcsc))," not found)")
## add chrom Loc to deUniProt => combined DB
combAllChrDB <- cbind(deUniProt[,useUniPrCo], deUcsc[matchUniprInUcsc,]) ## add Ensrnot c(1,3:5,7,10)
if(!silent) message(fxNa," ",nrow(combAllChrDB)," IDs in output")
combAllChrDB <- cbind(combAllChrDB,avPos=if(all(c("start","end") %in% colnames(combAllChrDB))) {
round(rowMeans(combAllChrDB[,c("start","end")])) } else NA) # add mean gene-position for easier sorting
## mark if genimic positions in targer region
if(!all(c("chr","start") %in% colnames(combAllChrDB))) targRegion <- NULL
if(length(targRegion) >0) if(is.character(targRegion) && length(targRegion) ==1) {
targRegion <- unlist(strsplit(targRegion,":"))
targRegion <- list(targRegion[1],gsub(",","",unlist(strsplit(targRegion[2],"-")))) }
combAllChrDB <- cbind(combAllChrDB,inTarg=if(length(targRegion) >0) {
combAllChrDB[,"chr"]==targRegion[[1]] & as.integer(combAllChrDB[,"start"]) >targRegion[[2]][1] & as.integer(combAllChrDB[,"end"]) <targRegion[[2]][2]} else NA)
} else combAllChrDB <- deUniProt[,useUniPrCo]
## convert factor-columns to character
chFa <- rep(NA,ncol(combAllChrDB))
for(i in 1:ncol(combAllChrDB)) chFa[i] <- is.factor(combAllChrDB[,i])
if(any(chFa)) for(i in which(chFa)) combAllChrDB[,i] <- as.character(combAllChrDB[,i])
chEnsID <- "gene_id" %in% colnames(combAllChrDB)
if(chEnsID) combAllChrDB <- combAllChrDB[,-1*which(colnames(combAllChrDB)=="gene_id")]
combAllChrDB }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readUniProtExport.R
|
#' Read (Normalized) Quantitation Data Files Produced By Wombat At Protein Level
#'
#' Protein quantification results from \href{https://github.com/wombat-p}{Wombat-P} using the Bioconductor package Normalizer can be read using this function and relevant information extracted.
#' Input files compressed as .gz can be read as well.
#' The protein abundance values (XIC), peptide counting get extracted. Since protein annotation is not very extensive with this format of data, the function allows reading the
#' initial fasta files (from the directory above the quantitation-results) allowing to extract more protein-annotation (like species).
#' Sample-annotation (if available) can be extracted from sdrf files, which are typically part of the Wombat output, too.
#' The protein abundance values may be normalized using multiple methods (median normalization as default), the determination of normalization factors can be restricted to specific proteins
#' (normalization to bait protein(s), or to invariable matrix of spike-in experiments).
#' The protein annotation data gets parsed to extract specific fields (ID, name, description, species ...).
#' Besides, a graphical display of the distribution of protein abundance values may be generated before and after normalization.
#'
#' @details
#' By standard workflow of Wombat-P writes the results of each analysis-method/quantification-algorithm as .csv files
#' Meta-data describing the proteins may be available from two sources :
#' a) The 1st column of the Wombat/normalizer output.
#' b) Form the .fasta file in the directory above the analysis/quantiication results of the Wombar-workflow
#'
#'
#' Meta-data describing the samples and experimental setup may be available from a sdrf-file (from the directory above the analysis/quantiication results)
#' If available, the meta-data will be examined for determining groups of replicates and
#' the results thereof can be found in $sampleSetup$levels.
#' Alternatively, a dataframe formatted like sdrf-files (ie for each sample a separate line, see also function \code{readSdrf}) may be given, too.
#'
#' This import-function has been developed using Wombat-P version 1.x.
#' The final output is a list containing these elements: \code{$raw}, \code{$quant}, \code{$annot}, \code{$counts}, \code{$sampleSetup}, \code{$quantNotes}, \code{$notes}, or (if \code{separateAnnot=FALSE}) data.frame
#' with annotation- and main quantification-content. If \code{sdrf} information has been found, an add-tional list-element \code{setup}
#' will be added containg the entire meta-data as \code{setup$meta} and the suggested organization as \code{setup$lev}.
#'
#'
#' @param fileName (character) name of file to be read (default 'proteinGroups.txt' as typically generated by Compomics in txt folder). Gz-compressed files can be read, too.
#' @param path (character) path of file to be read
#' @param quantSoft (character) qunatification-software used inside Wombat-P
#' @param fasta (logical or character) if \code{TRUE} the (first) fasta from one direcory higher than \code{fileName} will be read as fasta-file to extract further protein annotation;
#' if \code{character} a fasta-file at this location will be read/used/
#' @param isLog2 (logical) typically data read from Wombat are expected to be \code{isLog2=TRUE}
#' @param normalizeMeth (character) normalization method, defaults to \code{median}, for more details see \code{\link[wrMisc]{normalizeThis}})
#' @param quantCol (character or integer) exact col-names, or if length=1 content of \code{quantCol} will be used as pattern to search among column-names for $quant using \code{grep}
#' @param contamCol (character or integer, length=1) which columns should be used for contaminants
#' @param pepCountCol (character) pattern to search among column-names for count data (1st entry for 'Razor + unique peptides', 2nd fro 'Unique peptides', 3rd for 'MS.MS.count' (PSM))
#' @param read0asNA (logical) decide if initial quntifications at 0 should be transformed to NA (thus avoid -Inf in log2 results)
#' @param sampleNames (character) custom column-names for quantification data; this argument has priority over \code{suplAnnotFile}
#' @param extrColNames (character) column names to be read (1st position: prefix for LFQ quantitation, default 'LFQ.intensity'; 2nd: column name for protein-IDs, default 'Majority.protein.IDs'; 3rd: column names of fasta-headers, default 'Fasta.headers', 4th: column name for number of protein IDs matching, default 'Number.of.proteins')
#' @param specPref (character) prefix to identifiers allowing to separate i) recognize contamination database, ii) species of main identifications and iii) spike-in species
#' @param refLi (character or integer) custom specify which line of data should be used for normalization, ie which line is main species; if character (eg 'mainSpe'), the column 'SpecType' in $annot will be searched for exact match of the (single) term given
#' @param remRev (logical) option to remove all protein-identifications based on reverse-peptides
#' @param remConta (logical) option to remove all proteins identified as contaminants
#' @param separateAnnot (logical) if \code{TRUE} output will be organized as list with \code{$annot}, \code{$abund} for initial/raw abundance values and \code{$quant} with final normalized quantitations
#' @param gr (character or factor) custom defined pattern of replicate association, will override final grouping of replicates from \code{sdrf} and/or \code{suplAnnotFile} (if provided) \code{}
#' @param sdrf (logical, character, list or data.frame) optional extraction and adding of experimenal meta-data:
#' if \code{sdrf=TRUE} the 1st sdrf in the directory above \code{fileName} will be used
#' if character, this may be the ID at ProteomeExchange,
#' the second element may give futher indicatations for automatic organization of groups of replicates.
#' Besides, the output from \code{readSdrf} or a list from \code{defineSamples} may be provided; if \code{gr} is provided, \code{gr} gets priority for grouping of replicates
#' @param suplAnnotFile (logical or character) optional reading of supplemental files produced by Compomics; if \code{gr} is provided, it gets priority for grouping of replicates
#' if \code{TRUE} default to files 'summary.txt' (needed to match information of \code{sdrf}) and 'parameters.txt' which can be found in the same folder as the main quantitation results;
#' if \code{character} the respective file-names (relative ro absolute path), 1st is expected to correspond to 'summary.txt' (tabulated text, the samples as given to Compomics) and 2nd to 'parameters.txt' (tabulated text, all parameters given to Compomics)
#' @param groupPref (list) additional parameters for interpreting meta-data to identify structure of groups (replicates), will be passed to \code{readSampleMetaData}.
#' May contain \code{lowNumberOfGroups=FALSE} for automatically choosing a rather elevated number of groups if possible (defaults to low number of groups, ie higher number of samples per group)
#' @param plotGraph (logical) optional plot vioplot of initial and normalized data (using \code{normalizeMeth}); alternatively the argument may contain numeric details that will be passed to \code{layout} when plotting
#' @param titGraph (character) custom title to plot of distribution of quantitation values
#' @param wex (numeric) relative expansion factor of the violin in plot
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a list with \code{$raw} (initial/raw abundance values), \code{$quant} with final normalized quantitations, \code{$annot} (columns ), \code{$counts} an array with 'PSM' and 'NoOfRazorPeptides',
#' \code{$quantNotes}, \code{$notes} and optional \code{setup} for meta-data from \code{sdrf}; or a data.frame with quantitation and annotation if \code{separateAnnot=FALSE}
#' @seealso \code{\link[utils]{read.table}}, \code{\link[wrMisc]{normalizeThis}}) , \code{\link{readProteomeDiscovererFile}}; \code{\link{readProlineFile}} (and other import-functions), \code{\link{matrixNAinspect}}
#' @examples
#' path1 <- system.file("extdata", package="wrProteo")
#' # Here we'll load a short/trimmed example file (originating from Compomics)
#' fiNa <- "tinyWombCompo1.csv.gz"
#' dataWB <- readWombatNormFile(file=fiNa, path=path1, tit="tiny Wombat/Compomics, Normalized ")
#' summary(dataWB$quant)
#' @export
readWombatNormFile <- function(fileName, path=NULL, quantSoft="(quant software not specified)", fasta=NULL, isLog2=TRUE, normalizeMeth="none", quantCol="abundance_", contamCol=NULL,
pepCountCol=c("number_of_peptides"), read0asNA=TRUE, refLi=NULL, sampleNames=NULL,
extrColNames=c("protein_group"), specPref=NULL,
remRev=TRUE, remConta=FALSE, separateAnnot=TRUE, gr=NULL, sdrf=NULL, suplAnnotFile=NULL, groupPref=list(lowNumberOfGroups=TRUE),
titGraph=NULL, wex=1.6, plotGraph=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## prepare
fxNa <- wrMisc::.composeCallName(callFrom, newNa="readWombatNormFile")
oparMar <- if(plotGraph) graphics::par("mar") else NULL # only if figure might be drawn
remStrainNo <- TRUE # if TRUE extract Species in very stringent pattern
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
fixSpeciesNames <- TRUE
oparMar <- graphics::par("mar")
## functions
.cleanMQann <- function(x, sep="\\|", silent=FALSE, debug=FALSE, callFrom=NULL) {
## split multiple protein entries as with 1st column of MaxQuant data
## return matrix with
## example ann1 <- read.delim(file.path(system.file("extdata", package="wrProteo"), "tinyWombCompo1.csv.gz"), sep=",", stringsAsFactors=FALSE)[,1]
## .cleanMQann(ann1)
# x=rWB4a$tmp[c(5,31:32,81:82,111:114),1]
xIni <- x # keep backup for recuperating bizzare nonparsed
isCont <- grepl("CON__", x)
mult <- nchar(x) - nchar(gsub(";", "", x))
chMult <- mult >0
if(any(chMult)) {
spl1 <- strsplit(x[which(chMult)], ";")
## use entry with most separators (when multiple entries, eg 'sp|P00761|CON__TRYP_PIG;CON__P00761')
spl1 <- sapply(spl1, function(y) { nSep <- nchar(y) - nchar(gsub("|","",y)); y[which.max(nSep)] })
x[which(chMult)] <- spl1 }
## split separators
chSpl <- function(y) {chID <- grepl("^[[:upper:]]{1,3}[[:digit:]]{2,}|^[[:upper:]]{1,3}[[:digit:]]+[[:upper:]]+[[:digit:]]*", y); chName <- grepl("[A-Z0-9]_[[:upper:]]",y); # extract db, ID & prot-name
c(dbIni= if((length(y) >1 && grepl("^[[:lower:]]{1,8}$", y[1])) || length(y) >2 && grepl("^[[:lower:]]{2}|[[:lower:]]{2}$",
y[1])) y[1] else NA, IDini=if(any(chID)) y[which(chID)[1]] else NA, nameIni=if(any(chName)) y[which(chName)[1]] else NA) }
x <- t(sapply(strsplit(x, sep), chSpl))
nColIni <- ncol(x)
cleanID <- function(y, useCol=c(db=1, ID=2, name=3)) {
ext <- grepl("[[:lower:]]+$", y[,useCol[2]]) # look for extension like 'P08758ups'
extNoDb <- which(ext & is.na(y[,useCol[1]]))
if(any(ext)) { cleanID <- sub("[[:lower:]]+$","", y[which(ext), useCol[2]])
if(length(extNoDb) >0) y[which(ext), useCol[1]] <- substring(y[which(ext), useCol[2]], nchar(cleanID) +1 )
y[which(ext), useCol[2]] <- cleanID }
prefi <- grepl("^[[:upper:]]+__[[:upper:]]", y[,useCol[3]]) # look for prefix like 'CON__FA5_BOVIN'
if(any(prefi)) { ch2 <- grepl("[A-Z0-9]_[[:upper:]]", y[which(prefi), useCol[3]]); if(any(ch2)) {
y[which(prefi)[which(ch2)], useCol[1]] <- tolower(sub("__[[:upper:]].+","", y[which(prefi)[which(ch2)], useCol[3]]))
y[which(prefi)[which(ch2)], useCol[3]] <- sub("^[[:upper:]]+__","", y[which(prefi)[which(ch2)], useCol[3]])}}
colnames(y) <- c("db","ID","name")
y }
x <- cbind(x, cleanID(x, useCol=c(db=1, ID=2, name=3)))
x <- cbind(x, conta=grepl("^con|^REV_", x[,"db"]) | grepl("__CON__",xIni))
## recuperate all (bizarre) non-parsed into ID
isNa <- rowSums(is.na(x)) > nColIni -2
if(any(isNa)) x[which(isNa),c(2+nColIni)] <- xIni[which(isNa)]
cbind(x[,c((nColIni+1):ncol(x), 1:nColIni)], iniSoftAnn=xIni) }
.cleanPLann <- function(anno, inclLowerCaseExt=TRUE) {
## parse annotation from Proline quantification sheet (2nd col 'protein_group')
## entries like c("sp|P06169|PDC1_YEAST","sp|P02994|EF1A_YEAST","P00549|KPYK1_YEAST","sp|P00925|ENO2_YEAST")
ann2 <- matrix(ncol=3, nrow=length(anno), dimnames=list(NULL,c("database","uniqueIdentifier","proteinName")))
## use longest of multiple ID-entries (eg "Q14CN4-2|CON__K2C72_HUMAN; sp|Q14CN4-3|CON__K2C72_HUMAN; sp|Q14CN4|CON__K2C72_HUMAN")
## split concatenated multiple IDs, use longest
chMult <- nchar(sub(":","", anno)) < nchar(anno)
if(any(chMult)) { spl1 <- strsplit(as.character(anno[chMult]),";")
anno[which(chMult)] <- sapply(spl1, function(x) x[which.max(nchar(x))]) }
## database identifier
chDB <- grep("^[[:lower:]]+\\|.", anno)
if(length(chDB) >0) { ann2[chDB,1] <- sub("\\|.+", "", anno[chDB])
anno[chDB] <- sub("^[[:lower:]]+\\|", "", anno[chDB])}
## protein/uniProtID
chPat0 <- "^[[:upper:]]+[[:digit:]]+[0-9A-Z]*\\|."
chPat <- "^[[:upper:]]+[[:digit:]]+[0-9A-Z]*(\\-[[:digit:]]{1,3}){0,1}\\|." # like 'P10636|', 'P10636-8|'
chPat2 <- "^[[:upper:]]+[[:digit:]]+[0-9A-Z]*(\\-[[:digit:]]{1,3}){0,1}[[:lower:]]{0,12}\\|." # like 'P10636|', 'P10636-8|', 'P10636-8ups|'
chID <- grep(if(isTRUE(inclLowerCaseExt)) chPat2 else chPat, anno)
if(length(chID) >0) { ann2[chID,2] <- sub("\\|.+", "", anno[chID]) # retreive entry (until next separator)
anno[chID] <- sub(".+\\|", "", anno[chID])}
## last entry
nch <- which(nchar(anno) >0)
if(length(nch) >0) ann2[nch,3] <- anno[nch]
ann2 <- cbind(database=ann2[1], protein_group=sub("[[:lower:]]+$","", ann2[,2]), ann2[,-1], iniSoftAnn=anno)
ann2 }
.cleanSingleAnn <- function(anno, inclLowerCaseExt=TRUE) {
## parse annotation from single column (ie character vector) , eg Compomics quatification sheet (2nd col 'protein_group')
## return 3 columns ; ID, IDext and iniSoftAnn
if(length(dim(anno)) >1) {warning(".cleanSingleann : expecting text vector and NOT matrix"); anno <- try(as.character(as.matrix(anno)))}
iniSoftAnn <- WOext <- anno
chPat3 <- "[[:upper:]][0-9A-Z]+(\\-[[:digit:]]{1,3}){0,1}[[:lower:]]*$" # recognize IDs eg 'P00915ups', 'P00915-3ups',
## still need to split multiple and use longest (eg from 'P02768,P02768ups')
chMult <- grep(",", anno)
if(length(chMult) >0) {
spl <- strsplit(as.character(anno[chMult]), ",")
anno[chMult] <- sapply(spl, function(x) x[which.max(nchar(x))])
}
## split lower-case extenstions from IDs
if(isTRUE(inclLowerCaseExt)) { chExt <- grep(chPat3, anno)
if(length(chExt) >0) {
WOext <- anno
WOext[chExt] <- sub("[[:lower:]]+$", "", anno[chExt])
ext <- rep(NA, length(anno))
ext[chExt] <- substring(anno[chExt], nchar(WOext[chExt]) +1)
anno <- cbind(iniSoftAnn=iniSoftAnn, ID=WOext, IDext=ext)
} }
anno }
## end functions
## init check
reqPa <- c("utils","wrMisc")
chPa <- sapply(reqPa, requireNamespace, quietly=TRUE)
if(any(!chPa)) stop("Package(s) '",paste(reqPa[which(!chPa)], collapse="','"),"' not found ! Please install first from CRAN")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
excluCol <- "^Abundances.Count" # exclude this from quantifications columns
cleanDescription <- TRUE # clean 'Description' for artifacts of truncated text (tailing ';' etc)
infoDat <- infoFi <- setupSd <- parametersD <- annot <- annotMQ <- annotPL <- NULL # initialize
## check if path & file exist
paFi <- wrMisc::checkFilePath(fileName, path, expectExt="csv", compressedOption=TRUE, stopIfNothing=TRUE, callFrom=fxNa, silent=silent,debug=debug)
## read (main) file
## future: look for fast reading of files
# read.delim("C:\\E\\projects\\MassSpec\\smallProj\\ElixirBenchmark\\deWombat\\deGit24may23\\PXD009815dev\\dev\\stand_prot_quant_mergedcompomics.csv", sep=",")
tmp <- try(utils::read.delim(paFi, sep=",", stringsAsFactors=FALSE), silent=TRUE)
if(length(tmp) <1 || inherits(tmp, "try-error") || length(dim(tmp)) <2) {
if(inherits(tmp, "try-error")) warning("Unable to read input file ('",paFi,"')! (check format or if rights to read)") else {
if(!silent) message(fxNa,"Content of file '",paFi,"' seeps empty or non-conform ! Returning NULL; check if this is really a Compomics-file") }
tmp <- NULL
return(NULL)
} else {
## start checking format of initial read of Normalizer-output
if(debug) { message(fxNa,"rWB1 .. dims of initial data : ", nrow(tmp)," li and ",ncol(tmp)," col "); rWB1 <- list(fileName=fileName,path=path,paFi=paFi,tmp=tmp,normalizeMeth=normalizeMeth,read0asNA=read0asNA,quantCol=quantCol,
refLi=refLi,separateAnnot=separateAnnot )} # annotCol=annotCol,FDRCol=FDRCol
## check which columns can be extracted (for annotation)
if(length(extrColNames) <1) { extrColNames <- colnames(tmp)[1] # default : use only 1st col for annotation
} else {
if(is.integer(contamCol) && length(contamCol) >0) contamCol <- colnames(tmp)[contamCol]
extrColNames <- union(extrColNames, contamCol) # add contamCol if not included in extrColNames
chCol <- extrColNames %in% colnames(tmp)
if(!any(chCol, na.rm=TRUE)) { extrColNames <- gsub("\\."," ",extrColNames)
chCol <- extrColNames %in% colnames(tmp) }
if(all(!chCol, na.rm=TRUE)) stop("Problem locating annotation columns (",wrMisc::pasteC(extrColNames, quoteC="''"),")")
if(any(!chCol, na.rm=TRUE)) {
if(!silent) message(fxNa,"Note: Can't find columns ",wrMisc::pasteC(extrColNames[!chCol], quoteC="'")," !")}
}
if(debug) {message(fxNa,"rWB1c"); rWB1c <- list(tmp=tmp,paFi=paFi,fasta=fasta,quantCol=quantCol,extrColNames==extrColNames)}
}
if(length(tmp) >0) {
## check for lines with absent IDs => eliminate
chNa <- which(if("protein_group" %in% colnames(tmp)) is.na(tmp[,"protein_group"]) else rowSums(is.na(tmp))==ncol(tmp))
if(length(chNa) >0) {
if(!silent) message(fxNa,"Removing ",length(chNa)," lines since absent ID or all NA (won't be able to do anything lateron withour ID ..)")
tmp <- tmp[-chNa,]
}
if(debug) {message(fxNa,"rWB1d"); rWB1d <- list(tmp=tmp,paFi=paFi,fasta=fasta,quantCol=quantCol,extrColNames==extrColNames)}
## further extracting : quantitation
useDCol <- grep(paste0("^",quantCol), colnames(tmp))
if(length(useDCol) <1) stop("NO columns matching term ",wrMisc::pasteC(quantCol, quoteC="'")," from argument 'quantCol' found !")
abund <- as.matrix(tmp[,useDCol]) # normalized log2 abundances
if(debug) {message(fxNa,"rWB1e"); rWB1e <- list(tmp=tmp,paFi=paFi,fasta=fasta,abund=abund)}
#iniAbundColNa <- colnames(abund)
chNum <- try(is.numeric(abund), silent=TRUE)
if(!chNum) {abund <- try(apply(tmp[,quantCol], 2, wrMisc::convToNum, convert="allChar", silent=silent, callFrom=fxNa), silent=TRUE)
if(inherits(abund, "try-error")) {datOK <- FALSE; warning(fxNa,"CANNOT transform 'abund' to numeric data !'")} }
if(length(dim(abund)) <2 && !is.numeric(abund)) abund <- matrix(as.numeric(abund), ncol=ncol(abund), dimnames=dimnames(abund))
ch1 <- grepl("^abundance_CT\\.mixture\\.QY\\.", colnames(abund))
if(all(ch1)) colnames(abund) <- sub("abundance_CT\\.mixture\\.QY\\.", "", colnames(abund))
ch1 <- grepl("^abundance_", colnames(abund))
if(all(ch1)) colnames(abund) <- sub("abundance_", "", colnames(abund))
ch1 <- grepl("\\.CV\\.Standards\\.Research\\.Group", colnames(abund))
if(all(ch1)) colnames(abund) <- sub("\\.CV\\.Standards\\.Research\\.Group", "", colnames(abund))
trimColNames <- FALSE ## further trim quantitation colnames
if(trimColNames) { ## further trim
colnames(abund) <- wrMisc::.trimFromStart(wrMisc::.trimFromEnd( sub(paste0("^",quantCol),"", colnames(abund))))
## no trim needed for Wombat ?
}
if(debug) {message(fxNa,"rWB3"); rWB3 <- list(abund=abund,paFi=paFi,path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,remConta=remConta,pepCountCol=pepCountCol,fasta=fasta)}
## convert 0 to NA
## note tpp abundance values are all neg !!
chNa <- !all(is.na(abund))
if(chNa) {
chRa <- range(abund, na.rm=TRUE)
if(all(chRa <0) & isTRUE(read0asNA)) {read0asNA <- FALSE
if(debug) message(fxNa,"All abundance values are <0 ! (omit transforming neg values to NA)") } }
if(!isFALSE(read0asNA)) {
ch1 <- abund <= 0
if(any(ch1, na.rm=TRUE)) { abund[which(ch1)] <- NA
if(!silent) message(fxNa,"Transform ",sum(ch1),"(",100*round(sum(ch1)/length(ch1),3),"%) initial '0' values to 'NA'")}}
## further extracting : prepare for countig data
ch1 <- grep(pepCountCol[1], colnames(tmp))
if(length(ch1) ==ncol(abund)) {
counts <- array(dim=c(nrow(tmp), ncol(abund), 1), dimnames=list(NULL, colnames(abund), pepCountCol))
counts[,,1] <- suppressWarnings(as.numeric(as.matrix(tmp[,ch1])))
} else {
counts <- NULL
if(!silent) message(fxNa,"Could not find column(s) with peptide per protein counts (argument 'pepCountCol') matching to '",pepCountCol,"'") }
if(debug) {message(fxNa,"rWB4"); rWB4 <- list(abund=abund,counts=counts,annot=annot,paFi=paFi,path=path,chPa=chPa,tmp=tmp,pepCountCol=pepCountCol,extrColNames=extrColNames,chCol=chCol,remConta=remConta,quantSoft=quantSoft,fasta=fasta,annotMQ=annotMQ)}
## add more measure-types if avail
if(length(pepCountCol) >1) {
for(i in 2:length(pepCountCol)) {
ch1 <- grep(pepCountCol[i], colnames(tmp))
if(length(ch1) ==ncol(abund)) {
counts[,,i] <- suppressWarnings(as.numeric(as.matrix(tmp[,ch1]))) }
}
}
if(debug) {message(fxNa,"rWB4a"); rWB4a <- list(abund=abund,counts=counts,annot=annot,paFi=paFi,path=path,chPa=chPa,tmp=tmp,pepCountCol=pepCountCol,extrColNames=extrColNames,chCol=chCol,remConta=remConta,quantSoft=quantSoft,fasta=fasta,annotMQ=annotMQ)}
## Annotation
if(any(c("CP","Compomics","compomics") %in% quantSoft) && extrColNames[1] %in% colnames(tmp)) { # special case PL: parse annot from column 'protein_group'
annotPL <- .cleanSingleAnn(tmp[,extrColNames[1]]) #"protein_group"
colnames(annotPL)[2] <- "protein_group"
}
if(any(c("MQ","MaxQuant","maxquant") %in% quantSoft) && extrColNames[1] %in% colnames(tmp)) { # special case MQ: parse annot from column 'protein_group'
annotMQ <- .cleanMQann(tmp[,extrColNames[1]]) #"protein_group"
}
if(any(c("PL","Proline","proline") %in% quantSoft) && extrColNames[1] %in% colnames(tmp)) { # special case PL: parse annot from column 'protein_group'
annotPL <- .cleanPLann(tmp[,extrColNames[1]]) #"protein_group"
}
if(any(c("TPP","tpp") %in% quantSoft) && extrColNames[1] %in% colnames(tmp)) { # special case CP: remove lower-case extensions from 'uniqueIdentifier'
annotPL <- .cleanPLann(tmp[,extrColNames[1]]) #"protein_group"
}
if(debug) {message(fxNa,"rWB4aa"); rWB4aa <- list(annotMQ=annotMQ,annotPL=annotPL,abund=abund,counts=counts,annot=annot,paFi=paFi,path=path,chPa=chPa,tmp=tmp,pepCountCol=pepCountCol,extrColNames=extrColNames,chCol=chCol,remConta=remConta,quantSoft=quantSoft,fasta=fasta,annotMQ=annotMQ)}
## read fasta from higher dir (specific to Wombat)
if(length(fasta) >0) {fasta <- fasta[1]; if(isFALSE(fasta) || is.na(fasta)) fasta <- NULL}
if(isTRUE(fasta)) {
hiDir <- dir(file.path(dirname(paFi),".."))
chFa <- grep("\\.fasta$", hiDir)
faFi <- if(length(chFa) >0) file.path(dirname(paFi),"..",hiDir[chFa[1]]) else NULL
} else faFi <- fasta
if(length(faFi) >0) { # has fasta for recuperating annotation
fasta <- try(readFasta2(filename=faFi, tableOut=TRUE, silent=silent,debug=debug,callFrom=fxNa), silent=TRUE)
## Potential problem with inconsistent format of fasta
if(inherits(fasta, "try-error")) { fasta <- NULL
if(!silent) message(fxNa,"Unable to read/open fasta file '",faFi,"' (check rights to read ?)")
} else {
tmpAnn <- if(length(annotMQ) >0) annotMQ[,2] else {if(length(annotPL) >0) annotPL[,2] else tmp[,extrColNames[1]]} # 'P02768' still missing
tm2 <- wrMisc::concatMatch(tmpAnn, fasta[,2], sepPattern=NULL, globalPat="digitExtension", silent=silent, debug=debug, callFrom=fxNa) # clean protein-names (eg digit extensions, concateneated IDs) & match to data
iniAnn <- if(length(annotMQ) >0) annotMQ else {if(length(annotPL) >0) annotPL else cbind(iniSoftAnn=tmp[,extrColNames[1]])}
colnames(iniAnn) <- c("iniSoftAnn", if(ncol(iniAnn) >1) paste0(colnames(iniAnn)[-1],".",quantSoft))
useFaCol <- match(c("uniqueIdentifier","entryName","proteinName","OS","OX","GN","database"), colnames(fasta)) # do not export full 'sequence'
annot <- cbind(trimIdentifier=names(tm2), fasta[tm2, useFaCol], iniAnn=tmpAnn)
if(debug) {message(fxNa,"rWB4ab"); rWB4ab <- list(abund=abund,counts=counts,annot=annot,fasta=fasta,tmp=tmp,chNa=chNa,annotPL=annotPL,annotMQ=annotMQ,extrColNames=extrColNames,extrColNames=extrColNames) }
foundFastaCol <- !is.na(useFaCol)
if(any(foundFastaCol)) colnames(annot)[1:sum(foundFastaCol)] <- c("Accession","AccessionFull","Description","EntryName","Species","OX","GeneName","Database")[which(foundFastaCol)]
## strip species details
if("Species" %in% annot) annot[,"Species"] <- sub(" \\(.+", "", annot[,"Species"])
}
} else {
if(debug) message(fxNa,"NO fasta available !")
annot <- if(length(annotMQ) >0) annotMQ else {if(length(annotPL) >0) annotPL else tmp[,extrColNames[1]]}
if(length(annotMQ) <1 && length(annotPL) <1) {
sep <- ","
if(debug) message(fxNa,"rWB4ab")
## if no other annot : pick 1st of multiple, clean from lowerCase extension, clean from -digit extension
## need to test further ?
chMult <- nchar(tmp[,extrColNames[1]]) - nchar(sub(sep, "", annot))
if(any(chMult) >0) annot <- sapply(strsplit(annot, sep), function(x) {nCha <- nchar(x); if(length(x) >1) x[which.max(nCha)] else x })
annot <- cbind(Accession=sub("\\-[[:digit:]]+$","", sub("[[:lower:]]+$","", annot)), IniAccession=tmp[,extrColNames[1]])
} else {
## need to arrange columns to proper order & names
warning(fxNa,"NOTE : Supplemental arranging of columns to proper order & names not yet implemented")
}
if(debug) message(fxNa,"NOTE : No fasta-file found in main directory ...")
}
if(debug) {message(fxNa,"dim annot ",nrow(annot)," ",ncol(annot)," rWB4b"); rWB4b <- list(annot=annot,faFi=faFi,abund=abund,tmp=tmp,fasta=fasta,annotMQ=annotMQ)}
## check ID col of annot
chID <- match(c("Accession","protein_group","uniqueIdentifier"), colnames(annot))
if(all(is.na(chID))) { chID <- wrMisc::naOmit(match(c("protein_group","ID"), colnames(annot)))
if(length(chID) < 1) warning("PROBLEM : UNEXPECTED colnames in annot") #else colnames(annot)[chID][1] <- "Accession"
}
if(!identical(wrMisc::naOmit(chID), 1) || length(wrMisc::naOmit(chID)) >0) {
annot <- annot[,c(wrMisc::naOmit(chID)[1], (1:ncol(annot))[-wrMisc::naOmit(chID)[1]] )] # adjust order to have ID in 1st column
colnames(annot)[1] <- "Accession"
if(!silent) message(fxNa,"Adjusting order of annot to have ID in 1st column") }
## remove lines wo IDs
chNa <- is.na(annot[,1])
if(any(chNa)) {
if(!silent) message(fxNa,"Removing ",sum(chNa)," out of ",nrow(abund)," lines wo ID")
rmLi <- which(chNa)
tmp <- tmp[-rmLi,]
annot <- annot[-rmLi,]
if(length(dim(annot)) <2) annot <- matrix(annot, ncol=1, dimnames=list(NULL,colnames(tmp)[1]))
abund <- abund[-rmLi,]
if(length(counts) >0) counts <- if(length(dim(counts))==3) counts[-rmLi,,] else counts[-rmLi,]
}
if(debug) {message(fxNa,"dim annot",nrow(annot)," ",ncol(annot)," rWB4d"); rWB4d <- list(annot=annot,faFi=faFi,abund=abund,tmp=tmp)}
## unique ID
chD <- duplicated(annot[,1])
uniqueID <- if(any(chD, na.rm=TRUE)) wrMisc::correctToUnique(annot[,1], silent=silent, callFrom=fxNa) else annot[,1] # extrColNames[1]
rownames(annot) <- rownames(abund) <- uniqueID
if(length(counts) >0) rownames(counts) <- uniqueID
if(debug) {message(fxNa,"rWB4e"); rWB4e <- list(paFi=paFi,path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,specPref=specPref)}
## remove Wombat contaminants
#useColumn <- wrMisc::naOmit(match(c("Accession","protein_group"), colnames(annot)))
conLi <- grep("CON__[[:alnum:]]", annot[, if(ncol(annot) >1) wrMisc::naOmit(match(c("Accession","protein_group"), colnames(annot)))[1] else 1])
if(remConta) {
if(length(conLi) >0) {
iniLi <- nrow(annot)
annot <- annot[-conLi,]
abund <- abund[-conLi,]
counts <- if(length(dim(counts))==3) counts[-conLi,,] else counts[-conLi,,]
if(debug) message(fxNa,"Removing ",length(conLi)," instances of contaminants to final ",nrow(annot)," lines/IDs")}
}
## split Annotation
if(debug) {message(fxNa,"rWB4f"); rWB4f <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,specPref=specPref)}
## finalize annotation
chCols <- c("EntryName","GeneName","Species","Contam","Description")
chCol2 <- chCols %in% colnames(annot)
if(any(!chCol2)) annot <- cbind(annot, matrix(NA, nrow=nrow(annot), ncol=sum(!chCol2), dimnames=list(NULL, chCols[which(!chCol2)]))) # add columns so far not present
if(!remConta && length(conLi) >0) annot[conLi, "Contam"] <- "TRUE"
if(debug) {message(fxNa,"rWB5"); rWB5 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,remStrainNo=remStrainNo, specPref=specPref)}
## extract species according to custom search parameters 'specPref'
if(remStrainNo && any(!is.na(annot[,"Species"]))) {
annot[,"Species"] <- sub(" \\(strain [[:alnum:]].+","", annot[,"Species"])
}
## complete species annot by info extracted from fasta : ' OS='
.completeSpeciesAnnot <- function(spe=c("Homo sapiens", "_HUMAN"), anno=annot, exCoNa=c("Species", "EntryName","name","proteinName")) { # re-written 12jun23
## complete species if missing in anno[,exCoNa[2]] but found in anno[,exCoNa[1]]; return corrected anno
chNa <- is.na(anno[,exCoNa[1]]) | nchar(anno[,exCoNa[1]]) <1 # missing (species) annotation
if(any(chNa, na.rm=TRUE)) { # suppose that all 'exCoNa' are present as colnames in 'annot'
useColumn <- if(all(is.na(anno[,exCoNa[2]]))) wrMisc::naOmit(match(exCoNa[3:length(exCoNa)], colnames(anno))) else exCoNa[2]
if(length(useColumn) >1) useColumn <- useColumn[1]
chS <- grep(spe[1], anno[,useColumn])
if(length(chS) >0) anno[chS, exCoNa[1]] <- spe[2]
}
anno }
if(isTRUE(fixSpeciesNames)) { # try to recuperate/fix non-given/bad formatted species
chNa <- is.na(annot[,"Species"])
if(any(chNa)) {
commonSpec <- .commonSpecies()
for(i in 1:nrow(commonSpec)) annot[which(chNa),] <- .completeSpeciesAnnot(commonSpec[i,], annot[which(chNa),], exCoNa=c("Species","EntryName","name","proteinName")) }
if(debug) {message(fxNa,"rWB6"); rWB6 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,counts=counts,
quantCol=quantCol,abund=abund,chNum=chNum,annot=annot,remConta=remConta,remStrainNo=remStrainNo, specPref=specPref)}
## check/complete for truncated species names (ie names found inside other ones)
chSpe <- which(!is.na(annot[,"Species"]) & nchar(annot[,"Species"]) >0)
if(length(chSpe) >0) {
OS <- gsub(";{1,5}$", "", annot[chSpe,"Species"]) # remove tailing separators
OSna <- unique(OS)
ch1 <- nchar(OSna) <1
if(debug) {message(fxNa,"rWB6b")}
if(any(ch1, na.rm=TRUE)) OSna <- OSna[which(nchar(OSna) >0)] # (just in case) remove empty tags
ch2 <- lapply(OSna, grep, OSna)
chTr <- sapply(ch2, length) >1
if(any(chTr, na.rm=TRUE)) { if(!silent) message(fxNa,"Found ",sum(chTr)," species name(s) appearing inside other ones, assume as truncated (eg ",OSna[which(chTr)[1]],")")
for(i in which(chTr)) OS[which(OS==OSna[i])] <- OSna[ch2[[i]][1]]
}
annot[chSpe,"Species"] <- OS }
}
## in case "Accession" is avail not "EntryName" is not
if(debug) {message(fxNa,"rWB7"); rWB7 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,quantCol=quantCol,remStrainNo=remStrainNo,
abund=abund,chNum=chNum,specPref=specPref, annot=annot,remConta=remConta,counts=counts)}
## look for tags from specPref
if(length(specPref) >0) {
## set annot[,"specPref"] according to specPref
annot <- .extrSpecPref(specPref, annot, useColumn=c("Description","Species","EntryName","GeneName"), silent=silent, debug=debug, callFrom=fxNa)
} else if(debug) message(fxNa,"Note: Argument 'specPref' not specifed (empty)")
if(debug) {message(fxNa,"rWB7b") }
if(!silent) { chSp <- sum(is.na(annot[,"Species"]))
if(chSp >0) message(fxNa,"Note: ",chSp," proteins with unknown species")
tab <- table(annot[,"Species"])
if(length(tab) >0) {
tab <- rbind(names(tab), paste0(": ",tab,", "))
if(!silent) message(" data by species : ", apply(tab, 2, paste)) } } # all lines assigned
if(debug) {message(fxNa,"rWB8"); rWB8 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,quantCol=quantCol,remStrainNo=remStrainNo,
abund=abund,chNum=chNum, annot=annot,remConta=remConta,counts=counts) }
## look for unique col from $annot to use as rownames
if(nrow(annot) <1) warning("annot is empty (NO lines)")
## maybe annot is empty ?
chAn <- colSums(apply(annot[,c(1:min(ncol(annot),7))], 2, duplicated), na.rm=TRUE) # look at first 6 cols : how many elements per column duplicated
if(!silent) message(fxNa,"Use column '",colnames(annot)[which.min(chAn)],"' as identifyer (has fewest, ie ",chAn[which.min(chAn)]," duplicated entries) as rownames")
rownames(abund) <- rownames(annot) <- if(any(chAn==0)) annot[,which(chAn==0)[1]] else wrMisc::correctToUnique(annot[,which.min(chAn)], callFrom=fxNa)
if(length(counts) >0) rownames(counts) <- rownames(annot)
if(debug) {message(fxNa,"rWB9"); rWB9 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,quantCol=quantCol,abund=abund,chNum=chNum,
annot=annot,refLi=refLi,remConta=remConta)}
## check for reference for normalization
refLiIni <- refLi
if(is.character(refLi) && length(refLi)==1) {
refLi <- which(annot[,"SpecType"]==refLi)
if(length(refLi) <1 ) { refLi <- 1:nrow(abund)
if(!silent) message(fxNa,"Could not find any proteins matching argument 'refLi=",refLiIni,"', ignoring ...")
} else {
if(!silent) message(fxNa,"Normalize using (custom) subset of ",length(refLi)," lines specified as '",refLiIni,"'")}} # may be "mainSpe"
## take log2 & normalize
quant <- try(wrMisc::normalizeThis(if(isLog2) abund else log2(abund), method=normalizeMeth, mode="additive", refLines=refLi, silent=silent, debug=debug, callFrom=fxNa), silent=TRUE)
if(inherits(quant, "try-error")) { warning(fxNa,"PROBLEMS ahead : Unable to normalize as log2-data !!") }
if(debug) {message(fxNa,"rWB10"); rWB10 <- list(path=path,chPa=chPa,tmp=tmp,extrColNames=extrColNames,chCol=chCol,quantCol=quantCol,abund=abund,chNum=chNum,
quant=quant,annot=annot,remConta=remConta,groupPref=groupPref,quantSoft=quantSoft,suplAnnotFile=suplAnnotFile, sdrf=sdrf,paFi=paFi )}
### GROUPING OF REPLICATES AND SAMPLE META-DATA
## prepare for sdrf (search in directory above)
if(isTRUE(sdrf)) {
hiDir <- dir(file.path(dirname(paFi),".."))
chFa <- grep("^sdrf.+\\.tsv$", hiDir)
if(length(chFa) >0) sdrf <- file.path(dirname(paFi),"..",hiDir[chFa[1]]) else {sdrf <- NULL
if(!silent) message(fxNa,"NO sdrf file found in directory above main data !")}
}
if(length(suplAnnotFile) >0 || length(sdrf) >0) {
headAbund <- utils::head(quant)
chX <- grepl("^X[[:digit:]]",colnames(quant)) #check for heading X in all colnames
if(any(chX)) colnames(headAbund)[which(chX)] <- sub("^X", "", colnames(headAbund)[which(chX)])
## check for matching : (as done within readSampleMetaData) - can't , sdrf not read yet ...
setupSd <- readSampleMetaData(sdrf=sdrf, suplAnnotFile=suplAnnotFile, quantMeth=paste0("WB",quantSoft), path=NULL, abund=headAbund, groupPref=groupPref, silent=silent, debug=debug, callFrom=fxNa)
}
if(debug) {message(fxNa,"rWB13 .."); rWB13 <- list(sdrf=sdrf,gr=gr,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,sampleNames=sampleNames)}
## finish groups of replicates & annotation setupSd
setupSd <- .checkSetupGroups(abund=abund, setupSd=setupSd, gr=gr, sampleNames=sampleNames, quantMeth="WB", silent=silent, debug=debug, callFrom=fxNa)
colNa <- if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames else setupSd$groups
chGr <- grepl("^X[[:digit:]]", colNa) # check & remove heading 'X' from initial column-names starting with digits
if(any(chGr)) colNa[which(chGr)] <- sub("^X","", colNa[which(chGr)]) #
colnames(quant) <- colnames(abund) <- colNa
if(length(setupSd$sampleNames)==ncol(abund)) setupSd$sampleNames <- colNa else setupSd$groups <- colNa
if(length(dim(counts)) >1 && length(counts) >0) colnames(counts) <- colNa
if(debug) {message(fxNa,"Read sample-meta data, rWB14"); rWB14 <- list(sdrf=sdrf,suplAnnotFile=suplAnnotFile,abund=abund, quant=quant,refLi=refLi,annot=annot,setupSd=setupSd,plotGraph=plotGraph,normalizeMeth=normalizeMeth,isLog2=isLog2)}
## main plotting of distribution of intensities
custLay <- NULL
if(is.numeric(plotGraph) && length(plotGraph) >0) {custLay <- as.integer(plotGraph); plotGraph <- TRUE} else {
if(!isTRUE(plotGraph)) plotGraph <- FALSE}
if(plotGraph) .plotQuantDistr(abund=if(isFALSE(isLog2) || "none" %in% normalizeMeth) NULL else abund, quant=quant, custLay=custLay, normalizeMeth=normalizeMeth, softNa=paste("Wombat-P",quantSoft),
refLi=refLi, refLiIni=refLiIni, tit=titGraph, silent=silent, callFrom=fxNa, debug=debug)
## meta-data
notes <- c(inpFile=paFi, qmethod=paste("Wombat-P",quantSoft), qMethVersion=if(length(infoDat) >0) unique(infoDat$Software.Revision) else NA,
rawFilePath= if(length(infoDat) >0) infoDat$File.Name[1] else NA, normalizeMeth=normalizeMeth, call=deparse(match.call()),
created=as.character(Sys.time()), wrProteo.version=utils::packageVersion("wrProteo"), machine=Sys.info()["nodename"])
## final output
if(isTRUE(separateAnnot)) list(raw=abund, quant=quant, annot=annot, counts=counts, sampleSetup=setupSd, quantNotes=parametersD, notes=notes) else data.frame(quant,annot) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/readWombatNormFile.R
|
#' Remove Samples/Columns From list of matrixes
#'
#' @description
#' Remove samples (ie columns) from every instance of list of matrixes.
#' Note: This function assumes same order of columns in list-elements 'listElem' !
#'
#' @param dat (list) main input to be filtered
#' @param remSamp (integer) column number to exclude
#' @param listElem (character) names of list-elements where columns indicated with 'remSamp' should be removed
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a matrix including imputed values or list of final and matrix with number of imputed by group (plus optional plot)
#' @seealso \code{\link{testRobustToNAimputation}}
#' @examples
#' set.seed(2019)
#' datT6 <- matrix(round(rnorm(300)+3,1), ncol=6, dimnames=list(paste("li",1:50,sep=""),
#' letters[19:24]))
#' datL <- list(raw=datT6, quant=datT6, annot=matrix(nrow=nrow(datT6), ncol=2))
#' datDelta2 <- removeSampleInList(datL, remSam=2)
#' @export
removeSampleInList <- function(dat, remSamp, listElem=c("raw","quant","counts","sampleSetup"), silent=FALSE, debug=FALSE, callFrom=NULL) {
##
fxNa <- wrMisc::.composeCallName(callFrom, newNa="removeSampleInList")
msg <- c("'dat' should be list or S3-object with $raw, $quant, $annot","; invalid entry - can't do anything ...","'remSamp' should be index of columns to remove")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
out <- transpEl <- NULL
datOK <- TRUE
if(length(dat) <1 || !is.list(dat)) { datOK <- FALSE
if(!silent) message(fxNa, msg[1:2])}
if(length(remSamp) <1) {datOK <- FALSE
if(!silent) message(fxNa, msg[3:2])}
## main
if(datOK) {
chLst <- listElem %in% names(dat)
if(sum(chLst) <1) {
warning("Can't find any of the list-elements defined via 'listElem' - nothing to do")
datOK <- chRm <- FALSE
} else { # (some) list-elements fit dat
listElemI <- wrMisc::naOmit(match(listElem, names(dat))) # remove non-existing list-elements
if(debug) { message(fxNa," rSIL1"); rSIL1 <- list(dat=dat,remSamp=remSamp,listElem=listElem,listElemI=listElemI)}
if(length(listElemI) >1) {
liDim <- lapply(dat[listElemI], dim)
chLiDim <- sapply(liDim, length)
if(any(chLiDim <1)) { # remove linear (or lists like $sampleSetup)
if(debug) message(fxNa,"Don't treat list-elements ",wrMisc::pasteC(names(dat)[which(chLiDim <2)], quoteC="'")," (not matrix, data.frame or array)")
listElemI <- listElemI[-which(chLiDim <2)]
liDim <- liDim[-which(chLiDim <2)]
} }
if(debug) { message(fxNa," rSIL2"); rSIL2 <- list()}
if(length(listElemI) >1) {
nCol <- sapply(liDim, function(x) x[2])
chDupNC <- duplicated(nCol, fromLast=FALSE)
if(any(!chDupNC[-1])) {
useNCol <- names(which.max(table(nCol))) # assume most frequent is good one !
if(debug) message(fxNa,"Variable number of columns, using most frequent : ",useNCol," columns in ",wrMisc::pasteC(names(dat)[listElemI[which(nCol==useNCol)]], quoteC="'"))
nRow <- sapply(liDim, function(x) x[1])
chTra <- nCol == nRow & nCol != useNCol
if(any(chTra)) { # transpose case
transpEl <- which(chTra)
listElemI <- listElemI[-which(chTra)]
if(debug) message(fxNa,"Matrix(es) ",wrMisc::pasteC(names(dat)[which(chTra)], quoteC="'"," to be trated as transposed ") )
} else listElemI <- listElemI[which(nCol==useNCol)]
}
}
if(debug) { message(fxNa," rSIL3"); rSIL3 <- list(dat=dat,remSamp=remSamp,listElem=listElem,listElemI=listElemI,transpEl=transpEl)}
if(length(listElemI) >1) { ## remove columns
## convert text-entries to index ?
if(!is.integer(remSamp)) remSamp <- try(as.integer(remSamp), silent=TRUE)
if(inherits(remSamp, "try-error")) stop("Invalid argument 'remSamp' (must be integer to design column(s) to be removed) !")
chRm <- remSamp %in% 1:ncol(dat[[listElemI[1]]])
if(all(!chRm)) stop("Invalid columns selected !")
if(any(!chRm)) {
if(!silent) message(fxNa,"Removing column(s) ",wrMisc::pasteC(remSamp[which(!chRm)], quoteC="'"," from 'remSamp' (not existing in data !)"))
remSamp <- remSamp[which(chRm)]
}
if(debug) { message(fxNa," rSIL4"); rSIL4 <- list()}
## main removing of columns
for(i in listElemI) { dat[[i]] <- if(length(dim(dat[[i]])) ==2) {if(length(remSamp) < ncol(dat[[i]]) -1) dat[[i]][,-remSamp] else {
matrix(dat[[i]][,-remSamp], ncol=1, dimnames=list(rownames(dat[[i]]), colnames(dat[[i]])[-remSamp])) }
} else if(length(remSamp) < ncol(dat[[i]]) -1) dat[[i]][,-remSamp,] else array(dat[[i]][,-remSamp,], dim=c(nrow(dat[[i]]),1, dim(dat[[i]])[3])) # not fully tested ? }
}
if(debug) message(fxNa,"Removed column(s) number ",wrMisc::pasteC(remSamp)," from matrix content")
if(length(transpEl) >0) for(i in transpEl) dat[[i]] <- dat[[i]][-remSamp,]
if(debug) { message(fxNa," rSIL5"); rSIL5 <- list(dat=dat,remSamp=remSamp,listElem=listElem,listElemI=listElemI,transpEl=transpEl,chRm=chRm)}
if("sampleSetup" %in% listElem) { # special case...
if(debug) message(fxNa,"list-element $sampleSetup found, treat ..")
for(i in c("lev","level","groups","sampleNames")) if(i %in% names(dat$sampleSetup)) dat$sampleSetup[[i]] <- dat$sampleSetup[[i]][-remSamp]
if(length(dat$sampleSetup$sdrfDat) >0) dat$sampleSetup$sdrfDat <- if(length(remSamp) < nrow(dat$sampleSetup$sdrfDat) -1) dat$sampleSetup$sdrfDat[-remSamp,] else {
matrix(dat$sampleSetup$sdrfDat[-remSamp,], nrow=1, dimnames=list(rownames(dat$sampleSetup$sdrfDat)[-remSamp], colnames(dat$sampleSetup$sdrfDat))) }
if(length(dat$sampleSetup$annotBySoft) >0) dat$sampleSetup$annotBySoft <- if(length(remSamp) < nrow(dat$sampleSetup$annotBySoft) -1) dat$sampleSetup$annotBySoft[-remSamp,] else {
matrix(dat$sampleSetup$annotBySoft[-remSamp,], nrow=1, dimnames=list(rownames(dat$sampleSetup$annotBySoft)[-remSamp], colnames(dat$sampleSetup$annotBySoft))) }
}
}
}
}
if(datOK) dat else NULL }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/removeSampleInList.R
|
#' Complement Missing EntryNames In Annotation
#'
#' This function helps replacing missing EntryNames (in $annot) after reading quantification results.
#' To do so the comumn-names of \code{annCol} will be used :
#' The content of 2nd element (and optional 3rd element) will be used to replace missing content in column defined by 1st element.
#'
#' @param x (list) output of \code{readMaxQuantFile}, \code{readProtDiscovFile} or \code{readProlineFile}.
#' This list must be a matrix and contain $annot with the columns designated in \code{annCol}.
#' @param annCol (character) the column-names form \code{x$annot}) which will be used : The first column designs the
#' column where empty fields are searched and the 2nd and (optional) 3rd will be used to fill the empty spots in the st column
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a list (like as input), but with missing elments of $annot completed (if available in other columns)
#' @seealso \code{\link{readMaxQuantFile}}, \code{\link{readProtDiscovFile}}, \code{\link{readProlineFile}}
#' @examples
#' dat <- list(quant=matrix(sample(11:99,9,replace=TRUE), ncol=3), annot=cbind(EntryName=c(
#' "YP010_YEAST","",""),Accession=c("A5Z2X5","P01966","P35900"), SpecType=c("Yeast",NA,NA)))
#' replMissingProtNames(dat)
#' @export
replMissingProtNames <- function(x, annCol=c("EntryName","Accession","SpecType"), silent=FALSE, debug=FALSE, callFrom=NULL) {
## replace in $annot missing EntryNames by concatenating Accession + SpecType (ie 2nd & 3rd of annCol)
## move to wrProteo ?
fxNa <- wrMisc::.composeCallName(callFrom, newNa="replreplMissingProtNames")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
msg <- "Argument 'x' should be list containg list-element called 'annot' (matrix), as produced by readMaxQuantFile(), readProtDiscovFile() etc"
if(!is.list(x) | length(x) <1) stop(msg)
if(!"annot" %in% names(x)) stop(msg)
if(any(length(dim(x$annot)) !=2, dim(x$annot) < 2:3)) stop("x$annot must be matrix or data.frame with min 2 lines and 3 cols")
## main
chCol <- annCol %in% colnames(x$annot)
if(all(chCol[1:2])) {
if(length(chCol) >2) {if(!chCol[3]) chCol <- chCol[1:2] } # omit 3rd column-name if not present in x$annot
chNA1 <- is.na(x$annot[,annCol[1]]) | x$annot[,annCol[1]]==""
chNA2 <- is.na(x$annot[,annCol[2]]) | x$annot[,annCol[2]]==""
if(length(chCol) >2) {chNA3 <- is.na(x$annot[,annCol[3]]) | x$annot[,annCol[3]]==""
chNA <- chNA1 & (!chNA2 | !chNA3)
} else chNA <- chNA1 & !chNA2
if(any(chNA)) {
if(!silent) message(fxNa," ..trying to replace ",sum(chNA)," '",annCol[1],"'")
if(length(chCol) >2) {
x$annot[which(chNA),annCol[1]] <- paste(x$annot[which(chNA),annCol[2]], x$annot[which(chNA),annCol[3]], sep="_")
x$annot[which(chNA),annCol[1]] <- sub("_","", sub("_NA","", sub("NA_","", sub("NA NA","NA", x$annot[which(chNA),annCol[1]]))))
} else x$annot[which(chNA),annCol[1]] <- x$annot[which(chNA),annCol[2]] }
} else message(fxNa," Nothing to do. Column-names ",wrMisc::pasteC(annCol[which(!chCol)],quoteC="'")," not found in x$annot !")
x }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/replMissingProtNames.R
|
#' Get Short Names of Proteomics Quantitation Software
#'
#' Get/convert short names of various proteomics quantitation software names.
#' A 2-letter abbreviation will be returned
#'
#' @param x (character) 'mono' or 'average'
#' @param tryAsLower (logical)
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allow easier tracking of messages produced
#' @return This function returns a vector with masses for all amino-acids (argument 'massTy' to switch from mono-isotopic to average mass)
#' @seealso \code{\link{massDeFormula}}, \code{\link[wrMisc]{convToNum}}
#' @examples
#' shortSoftwName(c("maxquant","DIANN"))
#' @export
shortSoftwName <- function(x, tryAsLower=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## convert software-algorith names to 2-letter appreviation
fxNa <- wrMisc::.composeCallName(callFrom, newNa="shortwSoftName")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
y <- cbind(softna=c("DIA-NN","ProteomeDiscoverer","Compomics","MaxQuant","Proline","TPP","FragPipe","MassChroQ","OpenMS"),
shortna= c("DN","PD","CP","MQ","PL","TP","FP","MC","OM") )
out <- y[match(x, y[,1]), 2]
chNa <- is.na(out)
if(any(chNa) && tryAsLower) out[which(chNa)] <- y[match(tolower(sub("\\-","",x[which(chNa)])), tolower(y[,1])), 2]
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/shortSoftwName.R
|
#' Summarize statistical test result for plotting ROC-curves
#'
#' This function takes statistical testing results (obtained using \code{\link{testRobustToNAimputation}} or \code{\link[wrMisc]{moderTest2grp}},
#' based on \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}) and calculates specifcity and sensitivity values for plotting ROC-curves along a panel of thresholds.
#' Based on annotation (from test$annot) with the user-defined column for species (argument 'spec') the counts of TP (true positives), FP (false positves), FN (false negatives) and TN are determined.
#' In addition, an optional plot may be produced.
#'
#' @details
#' Determining TP and FP counts requires 'ground trouth' experiments, where it is known in advance which proteins are expected to change abundance between two groups of samples.
#' Typically this is done by mixing proteins of different species origin, the first species noted by argument 'spec' designes the species to be considered constant (expected as FN in statistical tests).
#' Then, one or mutiple additional spike-in species can be defined. As the spike-in cocentration should have been altered between different gruops of samples, they are expected as TP.
#'
#' The main aim of this function consists in providing specifcity and sensitivity values, plus counts of TP (true positives), FP (false positves), FN (false negatives) and TN (true negatives),
#' along various thrsholds (specified in column 'alph') for statistical tests preformed prior to calling this function.
#'
#' Note, that the choice of species-annotation plays a crucial role who the counting results are obtained.
#' In case of multiple spike-in species the user should pay attention if they all are expected to change abundance at the same ratio.
#' If not, it is advised to run this function multiple times sperately only with the subset of those species expected to change at same ratio.
#'
#' The dot on the plotted curve shows the results at the level of the single threshold alpha=0.05.
#' For plotting multiple ROC curves as overlay and additional graphical parameters/options you may use \code{\link{plotROC}}.
#'
#' See also \href{https://en.wikipedia.org/wiki/Receiver_operating_characteristic}{ROC on Wkipedia} for explanations of TP,FP,FN and TN as well as examples.
#' Note that numerous other packages also provide support for building and plotting ROC-curves : Eg \href{https://CRAN.R-project.org/package=dlstats}{rocPkgShort},
#' \href{https://CRAN.R-project.org/package=ROCR}{ROCR}, \href{https://CRAN.R-project.org/package=pROC}{pROC} or \href{https://CRAN.R-project.org/package=ROCit}{ROCit}
#'
#'
#' @param test (list or class \code{MArrayLM}, S3-object from limma) from testing (eg \code{\link{testRobustToNAimputation}} or \code{\link{test2grp}}
#' @param useComp (character or integer) in case multiple comparisons (ie multiple columns 'test$tyThr'); which pairwise comparison to used
#' @param tyThr (character,length=1) type of statistical test-result to be used for sensitivity and specificity calculations (eg 'BH','lfdr' or 'p.value'), must be list-element of 'test'
#' @param thr (numeric) stat test (FDR/p-value) threshold, if \code{NULL} a panel of 108 p-value threshold-levels values will be used for calculating specifcity and sensitivity
#' @param columnTest depreciated, please use 'useComp' instead
#' @param FCthrs (numeric) Fold-Change threshold (display as line) give as Fold-change and NOT as log2(FC), default at 1.5, set to \code{NA} for omitting
#' @param spec (character) labels for those species which should be matched to column \code{annotCol} ('spec') of test$annot and used for sensitivity and specificity calculations. Important : 1st entry for species designed as constant (ie matrix) and subsequent labels for spike-ins (expected variable)
#' @param annotCol (character, length=1) column name of \code{test$annot} to use to separate species
#' @param filterMat (character) name (or index) of element of \code{test} containing matrix or vector of logical filtering results
#' @param batchMode (logical) if \code{batchMode=TRUE} the function will return an empty matrix if no proteins qualify for computing ROC (eg all spike-proteins not passig filters), and \code{plotROC} will be set to \code{FALSE}
#' @param tit (character) optinal custom title in graph
#' @param color (character or integer) color in graph
#' @param plotROC (logical) toogle plot on or off
#' @param pch (integer) type of symbol to be used (see \code{\link[graphics]{par}})
#' @param bg (character) backgroud in plot (see \code{\link[graphics]{par}})
#' @param overlPlot (logical) overlay to existing plot if \code{TRUE}
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) allows easier tracking of messages produced
#' @return This function returns a numeric matrix containing the columns 'alph', 'spec', 'sens', 'prec', 'accur', 'FD' plus two columns with absolute numbers of lines (genes/proteins) passing the current threshold level alpha (1st species, all other species)
#' @seealso replot the figure using \code{\link{plotROC}}, calculate AUC using \code{\link{AucROC}}, robust test for preparing tables \code{\link{testRobustToNAimputation}}, \code{\link[wrMisc]{moderTest2grp}}, \code{\link{test2grp}}, \code{eBayes} in package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}, \code{\link[stats]{t.test}}
#' @examples
#' set.seed(2019); test1 <- list(annot=cbind(Species=c(rep("b",35), letters[sample.int(n=3,
#' size=150, replace=TRUE)])), BH=matrix(c(runif(35,0,0.01), runif(150)), ncol=1))
#' tail(roc1 <- summarizeForROC(test1, spec=c("a","b","c"), annotCol="Species"))
#'
#' @export
summarizeForROC <- function(test, useComp=1, tyThr="BH", thr=NULL, columnTest=NULL, FCthrs=NULL, spec=c("H","E","S"), annotCol="Species", filterMat="filter",
batchMode=FALSE, tit=NULL, color=1, plotROC=TRUE, pch=1, bg=NULL, overlPlot=FALSE, silent=FALSE, debug=FALSE, callFrom=NULL) {
## summarize esting result by species (3rd is supposed as reference)
argN <- deparse(substitute(test))
fxNa <- wrMisc::.composeCallName(callFrom, newNa="summarizeForROC")
inclFilter <- TRUE # use $filtFin
badFCtoNA <- FALSE # how to disqualify FC not passing filter
## checking
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!isFALSE(plotROC)) plotROC <- TRUE
if(!isTRUE(overlPlot)) overlPlot <- FALSE
if(isTRUE(batchMode)) plotROC <- FALSE
if(!"annot" %in% names(test)) stop("test$annot is needed to map content of 'spec'")
chLst <- tyThr %in% names(test)
if(any(!chLst, na.rm=TRUE)) stop("Don't know what kind of test-results to use. Can't find element '",tyThr[!chLst],"' in elements of 'test' !!")
if(length(dim(test[[tyThr]])) !=2) stop("Can't find data.frame or matrix as list-elment '",annotCol,"' in 'test'")
if(length(annotCol) <1) stop("Argument 'annotCol' must be specified (should be name of column of 'test$annot' to check for species from argument 'spec')")
if(length(spec) <1) stop("Argument 'spec' must be specified (should contain species names present in tst$annot$",annotCol,")")
if(is.numeric(useComp)) { msg <- "Argument 'useComp' points to comparison not existing, resetting to 1"
if(useComp > ncol(test[[tyThr]])) { warning(msg); useComp <- 1 }
} else if(!(useComp %in% test[[tyThr]])) { warning(msg); useComp <- 1 }
if(debug) {message(fxNa,"sROC0"); sROC0 <- list(test=test, chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,annotCol=annotCol,spec=spec)}
if(length(annotCol) >1) { annotCol <- annotCol[1]
if(!silent) message(fxNa,"NOTE : Argument 'annotCol' may not be longer than 1, using only 1st")}
if(!(annotCol %in% colnames(test$annot))) stop("Can't find column '",annotCol,"' from argument 'annotCol' in test$annot")
chSpec <- spec %in% test$annot[,annotCol]
if(!any(chSpec, na.rm=TRUE)) stop("None of the elements of argument 'spec' found in column '",annotCol,"' !!")
if(any(!chSpec, na.rm=TRUE)) message(fxNa,"Note : Species-types ",wrMisc::pasteC(unique(spec[which(!chSpec)]),quoteC="'")," will be ignored") # check for species tags not specified, ie ignored
if(debug) {message(fxNa,"sROC1"); sROC1 <- list(test=test,useComp=useComp,chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,annotCol=annotCol,spec=spec)}
if(is.null(thr)) thr <- signif(c(as.numeric(sapply((1:4)*2, function(x) x*c(1e-4,1e-5,1e-6,1e-7))),
seq(0,1,length.out=50)^5, seq(0,1,length.out=50)^2, seq(0,1,length.out=61), 4^(-2:-10),1.01), 2)
thr <- sort(unique(abs(wrMisc::naOmit(thr)))) # 151 -> 108 values for default
pp <- matrix(nrow=length(thr), ncol=4, dimnames=list(NULL,c("TP","FP","FN","TN")))
oriKeep <- matrix(rep(TRUE,nrow(test[[tyThr]])), nrow=nrow(test[[tyThr]]), ncol=2, dimnames=list(NULL,c("filtFin","passFC"))) # the lines passing various filtering (filtFin, FCthrs)
if(debug) {message(fxNa,"sROC1b")}
## look for (global) filtering (from test[[filterMat]])
if(length(filterMat) ==1) if(filterMat %in% names(test) && inclFilter) {
chLe <- (if(length(dim(test[[filterMat]])) >1) nrow(test[[filterMat]]) else length(test[[filterMat]])) ==nrow(test[[tyThr]]) # check if filtFin seems to fit to testing results
if(chLe) {
if(length(dim(test[[filterMat]])) >1) test[[filterMat]] <- test[[filterMat]][,useComp]
if(sum(test[[filterMat]]) <1) {test[[filterMat]] <- rep(TRUE,length(test[[filterMat]])); message(fxNa,"Nothing passing filtering, ignoring filter !")
} else if(!silent) message(fxNa,"Filtering: ",sum(test[[filterMat]])," out of ",length(test[[filterMat]])," pass filtering")
## apply filtFin
if(any(!test[[filterMat]], na.rm=TRUE)) { oriKeep[which(!test[[filterMat]]),] <- FALSE
test[[tyThr]][which(!test[[filterMat]]),useComp] <- NA }
if(debug) {
message(fxNa,"Filter away ",sum(is.na(test[[tyThr]]))," instances (",round(100*sum(is.na(test[[tyThr]]))/prod(dim(test[[tyThr]])),1),"%); sROC1b")}
} else if(!silent) message(fxNa," ",argN,"[[",filterMat,"]] does not have same length as ",argN,"$",tyThr," , ignoring filter")
}
if(debug) {message(fxNa,"sROC2"); sROC2 <- list(test=test, chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,chSpec=chSpec,pp=pp,oriKeep=oriKeep,filterMat=filterMat,inclFilter=inclFilter)}
## FC-filtering
if(length(FCthrs) ==1) if(is.numeric(FCthrs) && !is.na(FCthrs)) {
## FC-threshold, need to locate means to construct FC
chM <- "means" %in% names(test)
if("means" %in% names(test)) {
if(nrow(test$means) ==nrow(test[[tyThr]])) {
## identify sample-groups to comparison(s) - needed lateron
pairwCol <- wrMisc::sampNoDeMArrayLM(test, useComp, lstMeans="means", lstP=which(names(test)==tyThr),callFrom=fxNa,silent=silent)
grpMeans <- cbind(mean1=test$means[,pairwCol[1]], mean2=test$means[,pairwCol[2]])
FCval <- grpMeans[,2] - grpMeans[,1]
## FC-filter
chFC <- abs(FCval) >= log2(FCthrs)
if(any(!chFC, na.rm=TRUE)) {
oriKeep[which(!chFC),2] <- FALSE # update oriKeep
## disqualify FDR for those not passing FCthrs as FDR=1.0 so they won't get counted (appear only at end)
if(is.logical(badFCtoNA)) test[[tyThr]][which(!chFC ),useComp] <- if(badFCtoNA) NA else 1
## need also to explore other ways of dynamic combining FCthrs to FDRthrs
}
if(debug) {message(fxNa,"sROC2b"); sROC2b <- list(test=test, chM=chM,pairwCol=pairwCol,grpMeans=grpMeans,FCval=FCval, chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,chSpec=chSpec,pp=pp,oriKeep=oriKeep,filterMat=filterMat,inclFilter=inclFilter)}
if(!silent) message(fxNa,"FC-filter: ",sum(chFC)," out of ",length(FCval)," pass 'FCthrs'=",FCthrs )
} else warning(argN,"$means has not correct number of rows, ignoring")
} else warning("Could not find suitable field '$means' in '",argN,"'")
} else { FCval <- NULL } # needed ?
if(sum(oriKeep[,2]) <2) warning("TROUBLE AHEAD: ",sum(oriKeep)," out of ",length(oriKeep)," elements pass filtering")
spiSpec <- if(ncol(test$annot) >1) {test$annot[,annotCol] %in% spec[-1]} else {test$annot[,1] %in% spec[-1]} # locate who NOT is 1st species (ie Spike and NOT matrix)
matrSpec <- if(ncol(test$annot) >1) {test$annot[,annotCol] %in% spec[1]} else {test$annot[,1] %in% spec[1]} # locate who 1st/ref species (ie matrix)
if(debug) {message(fxNa,"sROC4"); sROC4 <- list(test=test, spiSpec=spiSpec,matrSpec=matrSpec,spec=spec,annotCol=annotCol, chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,chSpec=chSpec,pp=pp,oriKeep=oriKeep,filterMat=filterMat,inclFilter=inclFilter)}
if(any(!oriKeep[,2], na.rm=TRUE)) spiSpec[which(!oriKeep[,2])] <- matrSpec[which(!oriKeep[,2])] <- NA
msg2 <- " Trouble ahead ? Could not find any element annotated as "
if(!any(wrMisc::naOmit(matrSpec), na.rm=TRUE)) message(fxNa, msg2, "matrix species to search for (ie, TN will always remain 0)")
if(!any(wrMisc::naOmit(spiSpec), na.rm=TRUE)) message(fxNa, msg2, "spike-species to search for (ie, TP will always remain 0)")
if(length(dim(test[[tyThr]])) ==2) if(ncol(test[[tyThr]])==2 & identical(colnames(test[[tyThr]])[1],"(Intercept)") & useComp !=2) {
## single comparison, thus 2 cols of p-val => use 2nd
useComp <- 2 }
teSpi <- if(length(dim(test[[tyThr]])) ==2) test[[tyThr]][which(spiSpec & oriKeep[,2]), useComp] else test[[tyThr]][which(spiSpec & oriKeep[,2])] # spike,ie non-ref species
teMat <- if(length(dim(test[[tyThr]])) ==2) test[[tyThr]][which(matrSpec & oriKeep[,2]), useComp] else test[[tyThr]][which(matrSpec & oriKeep[,2])] # ref-species
if(debug) {message(fxNa,"sROC5"); sROC5 <- list(test=test, spiSpec=spiSpec,matrSpec=matrSpec,teSpi=teSpi,teMat=teMat, chLst=chLst,tyThr=tyThr,thr=thr,useComp=useComp,chSpec=chSpec,pp=pp,oriKeep=oriKeep,filterMat=filterMat,inclFilter=inclFilter)}
if(all(is.na(teSpi)) || length(teSpi) <1) {
message(fxNa," PROBLEM :\n *** None of the spike-elments is passing filters ! Unable to construct TP ! ***")
keyVal <- if(isTRUE(batchMode)) matrix(nrow=0, ncol=8, dimnames=list(NULL, c("alph","spec","sens","prec","accur","FDR","n.pos.Yeast","n.pos.spike"))) else NULL
} else {
pp <- cbind(
TP= sapply(thr, function(x) sum(teSpi <=x, na.rm=TRUE)),
FP= sapply(thr, function(x) sum(teMat <=x, na.rm=TRUE)),
FN= sapply(thr, function(x) sum(teSpi >x, na.rm=TRUE)),
TN= sapply(thr, function(x) sum(teMat >x, na.rm=TRUE)) )
rownames(pp) <- thr
keyVal <- cbind(alph=thr,
spec=as.numeric(pp[,"TN"]/(pp[,"TN"] +pp[,"FP"])), sens=as.numeric(c(pp[,"TP"]/(pp[,"TP"] +pp[,"FN"]))),
prec=as.numeric(pp[,"TP"]/(pp[,"FP"] +pp[,"TP"])), accur=as.numeric((pp[,"TP"] +pp[,"TN"])/rowSums(pp)),
FDR=as.numeric(pp[,"FP"]/(pp[,"TP"] +pp[,"FP"])))
chNaN <- colSums(is.nan(keyVal)) ==nrow(keyVal)
if(any(chNaN, na.rm=TRUE)) keyVal[,which(chNaN)] <- 0
## add no of lines/prot retained for each species
tmp3 <- test$annot[,annotCol] %in% spec[2:length(spec)]
tmp3 <- if(length(dim(test[[tyThr]])) >1) test[[tyThr]][which(tmp3),useComp] else test[[tyThr]][which(tmp3)] # pvalues for 1st spike-in species (eg E)
if(debug) {message(fxNa,"sROC5b")}
tmp3 <- cbind(Sp1Pos=pp[,"FP"], Sp2Pos=sapply(thr, function(x) sum(tmp3 <=x,na.rm=TRUE)))
colnames(tmp3) <- paste("n.pos",c(spec[1], if(length(spec) >2) paste(spec[-1],collapse="+") else spec[-1]), sep=".")
keyVal <- cbind(keyVal, tmp3)
if(plotROC) {
if(is.null(tit)) tit <- paste("ROC of ",argN)
xLab <- "1 - Specificity"
yLab <- "Sensitivity"
if(overlPlot) graphics::points(1-keyVal[,"spec"], keyVal[,"sens"], col=color, pch=pch, bg=NULL, type="s") else {
graphics::plot(1-keyVal[,"spec"], keyVal[,"sens"], col=color, pch=pch, bg=bg, type="s",main=tit,xlab=xLab,ylab=yLab,xlim=c(0,1), ylim=c(0,1), las=1)}
cutP <- keyVal[which(keyVal[,1]==0.05),-1]
newPch <- cbind(c(1,16,2,17, 7,15,5,6), new=c(21,21,24,24,22,22,23,25)) # transform open or plain filled points to color-filled
if(pch %in% newPch[,1]){ pch2 <- newPch[which(newPch[,1]==pch),2]; bg <- color; col2 <- grDevices::grey(0.2)} else {pch2 <- pch; col2 <- color}
col2 <- wrMisc::convColorToTransp(col2, alph=0.9)
graphics::points(1-cutP["spec"], cutP["sens"], col=col2, pch=pch2, bg=bg, cex=1.4)
graphics::mtext(paste("AUC = ",signif(AucROC(keyVal),3)), side=3, cex=0.8,adj=0)
} }
keyVal }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/summarizeForROC.R
|
#' t-test each line of 2 groups of data
#'
#' \code{test2grp} performs t-test on two groups of data using \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma},
#' this is a custom implementation of \code{\link[wrMisc]{moderTest2grp}} for proteomics.
#' The final obkect also includes the results without moderation by \code{limma} (eg BH-FDR in \code{$nonMod.BH}).
#' Furthermore, there is an option to make use of package ROTS (note, this will increase the time of computatins considerably).
#'
#' @param dat (matrix or data.frame) main data (may contain NAs)
#' @param questNo (integer) specify here which question, ie comparison should be adressed
#' @param useCol (integer or character)
#' @param grp (character or factor)
#' @param annot (matrix or data.frame)
#' @param ROTSn (integer) number of iterations ROTS runs (stabilization of reseults may be seen with >300)
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a limma-type S3 object of class 'MArrayLM' (which can be accessed like a list); multiple testing correction types or modified testing by ROTS may get included ('p.value','FDR','BY','lfdr' or 'ROTS.BH')
#' @seealso \code{\link[wrMisc]{moderTest2grp}}, \code{\link[wrMisc]{pVal2lfdr}}, \code{\link[stats]{t.test}}, \code{ROTS} from the Bioconductor package \href{https://www.bioconductor.org/packages/release/bioc/html/ROTS.html}{ROTS}
#' @examples
#' set.seed(2018); datT8 <- matrix(round(rnorm(800)+3,1), nc=8, dimnames=list(paste(
#' "li",1:100,sep=""), paste(rep(LETTERS[1:3],c(3,3,2)),letters[18:25],sep="")))
#' datT8[3:6,1:2] <- datT8[3:6,1:2] +3 # augment lines 3:6 (c-f)
#' datT8[5:8,5:6] <- datT8[5:8,5:6] +3 # augment lines 5:8 (e-h)
#' grp8 <- gl(3,3,labels=LETTERS[1:3],length=8)
#' datL <- list(data=datT8, filt= wrMisc::presenceFilt(datT8,grp=grp8,maxGrpM=1,ratMa=0.8))
#' testAvB0 <- wrMisc::moderTest2grp(datT8[,1:6], gl(2,3))
#' testAvB <- test2grp(datL, questNo=1)
#' @export
test2grp <- function(dat, questNo, useCol=NULL, grp=NULL, annot=NULL, ROTSn=0, silent=FALSE, debug=FALSE, callFrom=NULL) {
## custom extracting data from list with $data and $filt
## return MA-type list with test resuls
fxNa <- wrMisc::.composeCallName(callFrom,newNa="test2grp")
msg <- " 'dat' must be list containing $data and $filt, with same number of rows !!"
if(!all(c("data","filt") %in% names(dat))) stop(msg)
if(nrow(dat$filt) != nrow(dat$data)) stop(msg)
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
questNa <- colnames(dat$filt)[questNo]
questNa <- unlist(strsplit(questNa, "-"))
if(is.null(useCol)) useCol <- lapply(questNa, grep, colnames(dat$data))
if(is.null(grp)) {grp <- rep(questNa, sapply(useCol, length))
if(!silent) message("Groups set automatic to ",wrMisc::pasteC(grp,quoteC="'")) }
out <- wrMisc::moderTest2grp(dat$data[which(dat$filt[,questNo]), unlist(useCol)], gr=grp, limmaOutput=TRUE, addResults=c("lfdr","FDR","Mval","means","nonMod"), callFrom=fxNa)
out$BH <- apply(out$p.value, 2, stats::p.adjust, method="BH")
out$nonMod.BH <- stats::p.adjust(out$nonMod.p, method="BH")
chLfdr <- try(find.package("fdrtools"), silent=TRUE)
if(inherits(chLfdr, "try-error")) {
message(fxNa,"Package 'fdrtool' not found ! Please install for calculating lfdr-values ..")
} else out$nonMod.lfdr <- wrMisc::pVal2lfdr(out$nonMod.p)
## need to add : (non-moderated test and) ROTS
if(length(ROTSn==1)) if(ROTSn >0 && !is.na(ROTSn)) {
chPa <- requireNamespace("ROTS", quietly=TRUE)
if(!chPa) { ROTSn <- NULL
message(fxNa,"Package 'ROTS' not found ! Please install first .. setting ROTSn=NULL") }
} else ROTSn <- NULL
if(length(ROTSn)==1) if(ROTSn >0) {
## this part requires ROTS
tmp <- ROTS::ROTS(dat$data[which(dat$filt[,questNo]), unlist(useCol)], groups=as.numeric(as.factor(grp)) -1, B=ROTSn) # ,K=500
out$ROTS.p <- tmp$pvalue
out$ROTS.BH <- stats::p.adjust(tmp$pvalue, method="BH")
if(! inherits(chLfdr, "try-error")) out$ROTS.lfdr <- wrMisc::pVal2lfdr(tmp$pvalue)
}
if(!is.null(annot)) out$annot <- as.matrix(annot[which(dat$filt[,questNo]),])
out }
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/test2grp.R
|
#' Pair-wise testing robust to NA-imputation
#'
#' This function replaces \code{NA} values based on group neighbours (based on grouping of columns in argument \code{gr}), following overall assumption of close to Gaussian distribution.
#' Furthermore, it is assumed that \code{NA}-values originate from experimental settings where measurements at or below detection limit are recoreded as \code{NA}.
#' In such cases (eg in proteomics) it is current practice to replace \code{NA}-values by very low (random) values in order to be able to perform t-tests.
#' However, random normal values used for replacing may in rare cases deviate from the average (the 'assumed' value) and in particular, if multiple \code{NA} replacements are above the average,
#' may look like induced biological data and be misinterpreted as so.
#' The statistical testing uses \code{eBayes} from Bioconductor package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma} for robust testing in the context of small numbers of replicates.
#' By repeating multiple times the process of replacing \code{NA}-values and subsequent testing the results can be sumarized afterwards by median over all repeated runs to remmove the stochastic effect of individual NA-imputation.
#' Thus, one may gain stability towards random-character of \code{NA} imputations by repeating imputation & test 'nLoop' times and summarize p-values by median (results stabilized at 50-100 rounds).
#' It is necessary to define all groups of replicates in \code{gr} to obtain all possible pair-wise testing (multiple columns in $BH, $lfdr etc).
#' The modified testing-procedure of Bioconductor package \href{https://bioconductor.org/packages/release/bioc/html/ROTS.html}{ROTS} may optionaly be included, if desired.
#' This function returns a \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}-like S3 list-object further enriched by additional fields/elements.
#'
#' @details
#' The argument \code{multCorMeth} allows to choose which multiple correction algorimths will be used and included to the final results.
#' Possible options are 'lfdr','BH','BY','tValTab', ROTSn='100' (name to element necessary) or 'noLimma' (to add initial p.values and BH to limma-results). By default 'lfdr' (local false discovery rate from package 'fdrtools') and 'BH' (Benjamini-Hochberg FDR) are chosen.
#' The option 'BY' referrs to Benjamini-Yakuteli FDR, 'tValTab' allows exporting all individual t-values from the repeated NA-substitution and subsequent testing.
#'
#' This function is compatible with automatic extraction of experimental setup based on sdrf or other quantitation-specific sample annotation.
#' In this case, the results of automated importing and mining of sample annotation should be stored as \code{$sampleSetup$groups} or \code{$sampleSetup$lev}
#'
#' For details 'on choice of NA-impuation procedures with arguments 'imputMethod' and 'avSd' please see \code{\link{matrixNAneighbourImpute}}.
#'
#' @param dat (matrix or data.frame) main data (may contain \code{NA}); if \code{dat} is list containing $quant and $annot as matrix, the element $quant will be used
#' @param gr (character or factor) replicate association; if \code{dat} contains a list-element \code{$sampleSetup$groups} or \code{$sampleSetup$lev} this may be used in case \code{gr=NULL}
#' @param annot (matrix or data.frame) annotation (lines must match lines of data !), if \code{annot} is \code{NULL} and argument \code{dat} is a list containing both $quant and $annot, the element $annot will be used
#' @param retnNA (logical) retain and report number of \code{NA}
#' @param avSd (numerical,length=2) population characteristics (mean and sd) for >1 \code{NA}-neighbours (per line)
#' @param avSdH depreciated, please use \code{avSd} inestad; (numerical,length=2) population characteristics 'high' (mean and sd) for >1 \code{NA}-neighbours (per line)
#' @param plotHist (logical) additional histogram of original, imputed and resultant distribution (made using \code{\link{matrixNAneighbourImpute}} )
#' @param xLab (character) custom x-axis label
#' @param tit (character) custom title
#' @param imputMethod (character) choose the imputation method (may be 'mode2'(default), 'mode1', 'datQuant', 'modeAdopt', 'informed' or 'none', for details see \code{\link{matrixNAneighbourImpute}} )
#' @param seedNo (integer) seed-value for normal random values
#' @param multCorMeth (character) define which method(s) for correction of multipl testing should be run (for choice : 'BH','lfdr','BY','tValTab', choosing several is possible)
#' @param nLoop (integer) number of runs of independent \code{NA}-imputation
#' @param lfdrInclude (logical) depreciated, please used \code{multCorMeth} instead (include lfdr estimations, may cause warning message(s) concerning convergence if few too lines/proteins in dataset tested).
#' @param ROTSn (integer) depreciated, please used \code{multCorMeth} instead (number of repeats by \code{ROTS}, if \code{NULL} \code{ROTS} will not be called)
#' @param silent (logical) suppress messages
#' @param debug (logical) additional messages for debugging
#' @param callFrom (character) This function allows easier tracking of messages produced
#' @return This function returns a limma-type S3 object of class 'MArrayLM' (which can be accessed lika a list); multiple results of testing or multiple testing correction types may get included ('p.value','FDR','BY','lfdr' or 'ROTS.BH')
#' @seealso NA-imputation via \code{\link{matrixNAneighbourImpute}}, modereated t-test without NA-imputation \code{\link[wrMisc]{moderTest2grp}}, calculating lfdr \code{\link[wrMisc]{pVal2lfdr}}, \code{eBayes} in Bioconductor package \href{https://bioconductor.org/packages/release/bioc/html/limma.html}{limma}, \code{\link[stats]{t.test}},\code{ROTS} of Bioconductor package \href{https://bioconductor.org/packages/release/bioc/html/ROTS.html}{ROTS}
#' @examples
#' set.seed(2015); rand1 <- round(runif(600) +rnorm(600,1,2),3)
#' dat1 <- matrix(rand1,ncol=6) + matrix(rep((1:100)/20,6),ncol=6)
#' dat1[13:16,1:3] <- dat1[13:16,1:3] +2 # augment lines 13:16
#' dat1[19:20,1:3] <- dat1[19:20,1:3] +3 # augment lines 19:20
#' dat1[15:18,4:6] <- dat1[15:18,4:6] +1.4 # augment lines 15:18
#' dat1[dat1 <1] <- NA # mimick some NAs for low abundance
#' ## normalize data
#' boxplot(dat1, main="data before normalization")
#' dat1 <- wrMisc::normalizeThis(as.matrix(dat1), meth="median")
#' ## designate replicate relationships in samples ...
#' grp1 <- gl(2, 3, labels=LETTERS[1:2])
#' ## moderated t-test with repeated inputations (may take >10 sec, >60 sec if ROTSn >0 !)
#' PLtestR1 <- testRobustToNAimputation(dat=dat1, gr=grp1, retnNA=TRUE, nLoop=70)
#' names(PLtestR1)
#' @export
testRobustToNAimputation <- function(dat, gr=NULL, annot=NULL, retnNA=TRUE, avSd=c(0.15,0.5), avSdH=NULL, plotHist=FALSE, xLab=NULL, tit=NULL, imputMethod="mode2",
seedNo=NULL, multCorMeth=NULL, nLoop=100, lfdrInclude=NULL, ROTSn=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- wrMisc::.composeCallName(callFrom, newNa="testRobustToNAimputation")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(!isTRUE(plotHist)) plotHist <- FALSE
datOK <- TRUE
msg <- grIni <- NULL
## start testing input
if(is.list(dat)) { if(all(c("quant","annot") %in% names(dat))) {
if(length(dim(dat$annot)) ==2 && length(annot) <1) annot <- dat$annot else if(!silent) message(fxNa,"Invalid '$annot'") # recover$annot if not given separately
if("sampleSetup" %in% names(dat) && length(gr) <1) {
gr <- if("groups" %in% names(dat$sampleSetup)) dat$sampleSetup$groups else dat$sampleSetup$lev
if(all(match(gr, unique(gr)) ==match(names(gr), unique(names(gr))))) gr <- names(gr) # rather use name instead of index
if(length(gr) >1 && length(dat$sampleSetup$col) <2 && length(names(gr)) <1) names(gr) <- dat$sampleSetup$sdrf[,dat$sampleSetup$col] # in case not names provided
} else grIni <- gr
dat <- dat$quant } else { datOK <- FALSE; msg <- "Invalid 'dat' : does NOT contain both '$quant' and '$annot !"} }
if(datOK) { if(length(unique(gr))==length(gr)) { datOK <- FALSE
msg <- "Argument 'gr' is empty or does NOT design any replicates ! (nothing to do)"} }
if(datOK) if(any(length(dim(dat)) !=2, dim(dat) < 1:2, na.rm=TRUE)) { datOK <- FALSE
msg <- "'dat' must be matrix or data.frame with >1 columns"}
if(datOK) {
if(is.data.frame(dat)) dat <- as.matrix(dat)
if(length(gr) != ncol(dat)) { datOK <- FALSE
msg <- "Number of columns in 'dat' and number of (group-)elements in 'gr' do NOT match !"} }
if(datOK) {
if(!is.factor(gr)) gr <- as.factor(gr)
if(is.null(xLab)) xLab <- "values"
if(length(annot) <1) annot <- matrix(NA, nrow=nrow(dat), ncol=1, dimnames=list(rownames(dat),"rowNa"))
if(length(ROTSn) >0) message(fxNa,"Argument 'ROTSn' is depreciated, please used argument 'multCorMeth' instead (like multCorMeth=c(ROTSn='10'))")
if(length(lfdrInclude) >0) message(fxNa,"Argument 'lfdrInclude' is depreciated, please used argument 'multCorMeth' instead (like multCorMeth='lfdrInclude')")
## get compatible to old arguments lfdrInclude & ROTSn
ROTSn <- NULL
multCorMeth <- if(length(multCorMeth) <1) c("lfdr","FDR","means") else unique(c(multCorMeth, "means"))
if(length(multCorMeth) ==1 & is.numeric(multCorMeth)) {
multCorMeth <- if(multCorMeth >1) c("lfdr", ROTSn=as.integer(multCorMeth), "means") else "lfdr"}
if("ROTSn" %in% names(multCorMeth)) { ROTSn <- try(as.integer(multCorMeth["ROTSn"]), silent=TRUE)
if(inherits(ROTSn, "try-error")) {ROTSn <- NULL; comp<- NULL }} else {ROTSn <- NULL; comp<- NULL }
if("lfdr" %in% multCorMeth) { lfdrInclude <- TRUE
} else if("lfdr" %in% names(multCorMeth)) { lfdrInclude <- try(as.logical(multCorMeth["lfdr"]), silent=TRUE)
if(inherits(lfdrInclude, "try-error")) { lfdrInclude <- FALSE; multCorMeth <- multCorMeth[-which(names(multCorMeth)== "lfdr")] } }
if(length(lfdrInclude) <1) lfdrInclude <- FALSE # if for some reason whatsoever ...
## main
isNA <- is.na(dat)
chNA <- any(isNA)
nNAmat <- matrix(0, nrow=nrow(dat), ncol=length(levels(gr)), dimnames=list(NULL,levels(gr)))
seedNo <- as.integer(seedNo)[1]
gr <- try(as.factor(gr))
if(inherits(gr, "try-error")) message("+++++\n",fxNa," MAJOR PROBLEM with argument 'gr' !! (possibly not sufficient level-names ?) \n+++++")
if(length(avSdH) >1 && length(avSd) <1) { avSd <- avSdH
if(!silent) message(fxNa,"Using depreciated 'avSdH' as substitute of 'avSd', please adopt your code to use 'avSd' !!")
} else if(length(avSdH) >1 && !silent) message(fxNa,"Argument 'avSdH' has been depreciated, 'avSd' is used instead")
## 1st pass
if(debug) message(fxNa," imputMethod=",imputMethod," tRN1")
if("none" %in% tolower(imputMethod)) {
if(debug) {message(fxNa,"Starting 1st pass, no of NA: ",sum(isNA)); tt1 <- list(dat=dat,gr=gr,imputMethod=imputMethod,annot=annot,seedNo=seedNo, retnNA=retnNA, avSd=avSd,ROTSn=ROTSn,lfdrInclude=lfdrInclude,multCorMeth=multCorMeth)}
nLoop <- 1
chFin <- is.finite(dat)
if(any(!chFin, na.rm=TRUE)) dat[which(!chFin)] <- NA # need to replace Inf & -Inf by NA to avoid problems at limma::lmFit()
datI <- list(data=dat)
datFi <- combineMultFilterNAimput(dat=dat, imputed=datI, grp=gr, annDat=annot, abundThr=stats::quantile(if(is.list(dat)) dat$quant else dat, 0.02,na.rm=TRUE), silent=silent, callFrom=fxNa) # number of unique peptides unknown !
} else {
datI <- matrixNAneighbourImpute(dat, gr, imputMethod=imputMethod, retnNA=retnNA ,avSd=avSd, plotHist=plotHist, xLab=xLab, tit=tit, seedNo=seedNo, silent=silent, callFrom=fxNa,debug=debug)
if(debug) {message(fxNa,"Start combineMultFilterNAimput tt2a")}
datFi <- combineMultFilterNAimput(dat=dat, imputed=datI, grp=gr, annDat=annot, abundThr=stats::quantile(if(is.list(dat)) dat$quant else dat, 0.02,na.rm=TRUE), silent=silent, callFrom=fxNa) # number of unique peptides unknown !
if(debug) {message(fxNa,"Done combineMultFilterNAimput tt2b")} # done combineMultFilterNAimput
}
## prepare for testing
if(lfdrInclude) {
chLfdr <- try(find.package("fdrtool"), silent=TRUE)
if(inherits(chLfdr, "try-error")) {
message(fxNa,"Package 'fdrtool' NOT found ! Please install first from CRAN for calculating lfdr-values. Omitting (defaut) 'lfdr' option from argument 'multCorMeth' ..")
lfdrInclude <- FALSE } }
if(debug) {message(fxNa," tRN2"); tRN2 <- list(dat=dat,gr=gr,datFi=datFi,multCorMeth=multCorMeth,datI=datI,datFi=datFi)}
pwComb <- wrMisc::triCoord(length(levels(gr)))
if(debug) message(fxNa,"Start 1st moderTestXgrp()")
out <- wrMisc::moderTestXgrp(datFi$data, grp=gr, limmaOutput=TRUE, addResults=multCorMeth, silent=silent, callFrom=fxNa) # can't do question specific filtering w/o explicit loop
if(debug) {message(fxNa," tRN3"); tRN3 <- list(dat=dat,gr=gr,datFi=datFi,multCorMeth=multCorMeth,datI=datI,datFi=datFi,out=out,pwComb=pwComb)}
chFDR <- names(out) =="FDR"
if(any(chFDR, na.rm=TRUE)) names(out)[which(chFDR)] <- "BH" # rename $FDR to $BH
rownames(pwComb) <- colnames(out$t)
## need to add $ROTS.p
if(length(ROTSn)==1) if(ROTSn >0 && !is.na(ROTSn)) {
chPa <- requireNamespace("ROTS", quietly=TRUE)
if(!chPa) { message(fxNa,"Package 'RORS' not found/installed (please install from Bioconductor), omitting argument 'ROTSn'")
ROTSn <- 0 }
} else ROTSn <- NULL
if(length(ROTSn)==1) if(ROTSn >0) {
## this requires package ROTS
if(debug) message(fxNa,"Start ROTS n=",ROTSn)
comp <- wrMisc::triCoord(length(levels(gr)))
rownames(comp) <- paste(levels(gr)[comp[,1]], levels(gr)[comp[,2]],sep="-")
tmRO <- matrix(nrow=nrow(datFi$data), ncol=nrow(comp))
comPair <- matrix(unlist(strsplit(rownames(comp),"-")), ncol=nrow(comp))
useCol <- apply(comPair, 2, function(x) gr %in% x)
for(i in 1:nrow(comp)) tmRO[which(datFi$filt[,i]),i] <- ROTS::ROTS(datFi$data[which(datFi$filt[,i]),
which(useCol[,i])], groups=as.numeric(as.factor(gr[which(useCol[,i])]))-1, B=ROTSn)$pvalue # K=500
out$ROTS.p <- tmRO
out$ROTS.BH <- apply(tmRO, 2, stats::p.adjust, method="BH")
if(lfdrInclude) out$ROTS.lfdr <- apply(tmRO, 2, wrMisc::pVal2lfdr)
}
if(debug) message(fxNa,"tRN4")
## subsequent rounds of NA-imputation
if(chNA && nLoop >1) {
if(debug) message(fxNa,"Subsequent rounds of NA-imputation nLoop=",nLoop)
pValTab <- tValTab <- array(NA, dim=c(nrow(dat), nrow(pwComb), nLoop))
datIm <- array(NA, dim=c(nrow(dat), ncol(dat), nLoop))
datIm[,,1] <- datFi$data
pValTab[,,1] <- out$p.value
tValTab[,,1] <- out$t
if(length(ROTSn)==1) if(ROTSn >0) {
pVaRotsTab <- array(NA, dim=c(nrow(dat), nrow(pwComb), min(10,nLoop)))
pVaRotsTab[,,1] <- out$ROTS.p }
for(i in 2:nLoop) {
## the repeated NA-imputation & testing
if(length(seedNo)==1) seedNo <- seedNo +i
if(debug) {message("tRN5"); tRN5 <- list(dat=dat,gr=gr,seedNo=seedNo, retnNA=retnNA, avSd=avSd,datI=datI, pValTab=pValTab,datIm=datIm,ROTSn=ROTSn)}
datX <- matrixNAneighbourImpute(dat, gr, imputMethod=imputMethod, seedNo=seedNo, retnNA=retnNA, avSd=avSd, NAneigLst=datI$NAneigLst, plotHist=FALSE, silent=TRUE, callFrom=fxNa)$data
if(debug) message(fxNa,"Passed matrixNAneighbourImpute() in loop no ",i," tRN5b")
#1st round# datI <- matrixNAneighbourImpute(dat, gr, seedNo=seedNo, retnNA=retnNA ,avSd=avSd, plotHist=plotHist, xLab=xLab, tit=tit, silent=silent, callFrom=fxNa)
#1st round# datFi <- combineMultFilterNAimput(dat=dat, imputed=datI, grp=gr, annDat=annot, abundThr=stats::quantile(dat,0.02,na.rm=TRUE), silent=silent, callFrom=fxNa)
#1st round# out <- wrMisc::moderTestXgrp(datFi$data, grp=gr, limmaOutput=TRUE, addResults="", silent=silent, callFrom=fxNa)
fitX <- limma::eBayes(limma::contrasts.fit(limma::lmFit(datX[,], out$design), contrasts=out$contrasts))
datIm[,,i] <- datX
pValTab[,,i] <- fitX$p.value
tValTab[,,i] <- fitX$t
if(length(ROTSn)==1) if(ROTSn >0 & i < min(10, nLoop)) { # test using ROTS (TAKES MUCH TIME !!)
for(i in 1:nrow(comp)) tmRO[which(datFi$filt[,i]),i] <- ROTS::ROTS(datFi$data[which(datFi$filt[,i]),which(useCol[,i])], groups=as.numeric(as.factor(gr[which(useCol[,i])])), B=ROTSn)$pvalue # ,K=500
pVaRotsTab[,,i] <- tmRO } }
if(debug) message("tRN6")
## propagate filtering results to p-values (disqualify to NA)
if(any(!datFi$filt, na.rm=TRUE)) {
fiAr <- rep(datFi$filt, nLoop)
pValTab[which(!fiAr)] <- NA }
## resume indiv rounds of imputation, optinal return details
out$datImp <- as.matrix(apply(datIm, 1:2, mean, na.rm=TRUE))
rownames(out$datImp) <- if(is.null(rownames(dat))) rownames(annot) else rownames(dat)
if("tValTab" %in% multCorMeth) { out$tValArr <- tValTab
out$pValArr <- pValTab }
if("noLimma" %in% multCorMeth) out$simple.p.value <- out$p.value
out$p.value <- as.matrix(apply(pValTab, 1:2, stats::median, na.rm=FALSE))
out$t <- as.matrix(apply(tValTab, 1:2, stats::median, na.rm=FALSE))
colnames(out$p.value) <- colnames(out$t) <- rownames(pwComb)
## when converting t-value to p how to consider n due to nLoop ??
} else out$datImp <- datFi$data
out$annot <- annot
out$filter <- datFi$filt
if(debug) message("tRN7")
## update dimnames of out$datImp
dimnames(out$datImp) <- list(if(is.null(rownames(out$lods))) rownames(out$annot) else rownames(out$lods), colnames(dat))
rownames(out$t) <- rownames(out$p.value) <- rownames(out$datImp)
## integrate column specific filtering
if(any(!datFi$filt, na.rm=TRUE)) out$p.value[which(!datFi$filt)] <- NA
if(lfdrInclude) {out$lfdr <- as.matrix(apply(out$p.value, 2, wrMisc::pVal2lfdr, callFrom=fxNa))
dimnames(out$lfdr) <- list(rownames(out$lods), colnames(out$contrasts))
if("noLimma" %in% multCorMeth) out$simple.lfdr <- if(ncol(out$simple.p.value) >1) apply(out$simple.p.value, 2, wrMisc::pVal2lfdr) else wrMisc::pVal2lfdr(out$simple.p.value)
}
if(any(c("FDR","BH") %in% multCorMeth, na.rm=TRUE)) { out$BH <- as.matrix(apply(out$p.value, 2, stats::p.adjust, method="BH"))
dimnames(out$BH) <- list(rownames(out$lods), colnames(out$contrasts))
if("noLimma" %in% multCorMeth) out$simple.BH <- if(ncol(out$simple.p.value) >1) apply(out$simple.p.value, 2, stats::p.adjust, method="BH") else stats::p.adjust(out$simple.p.value, method="BH")
}
if("BY" %in% multCorMeth) {out$BY <- as.matrix(apply(out$p.value, 2, stats::p.adjust, method="BY"))
dimnames(out$BY) <- list(rownames(out$lods), colnames(out$contrasts))}
if(length(ROTSn)==1) if(ROTSn >0 && chNA && nLoop >1) {
out$ROTS.p <- apply(pVaRotsTab, 1:2, stats::median, na.rm=TRUE)
if(any(!datFi$filt, na.rm=TRUE)) out$ROTS.p[which(!datFi$filt)] <- NA
out$ROTS.BH <- as.matrix(as.matrix(apply(out$ROTS.p, 2, stats::p.adjust, method="BH")))
dimnames(out$ROTS.BH) <- list(rownames(out$lods), colnames(out$contrasts) )
if(lfdrInclude) {out$ROTS.lfdr <- as.matrix(as.matrix(apply(out$ROTS.p, 2, wrMisc::pVal2lfdr)))
dimnames(out$ROTS.lfdr) <- list(rownames(out$lods), colnames(out$contrasts))} }
out
} else { warning(fxNa, msg)
return(NULL) }
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/testRobustToNAimputation.R
|
#' Write sequences in fasta format to file
#'
#' This function writes sequences from character vector as fasta formatted file (from \href{https://www.uniprot.org}{UniProt})
#' Line-headers are based on names of elements of input vector \code{prot}.
#' This function also allows comparing the main vector of sequences with a reference vector \code{ref} to check if any of the sequences therein are truncated.
#'
#' @details
#' Sequences without any names will be given generic headers like protein01 ... etc.
#'
#'
#' @param prot (character) vector of sequenes, names will be used for fasta-header
#' @param fileNa (character) name (and path) for file to be written
#' @param ref (character) optional/additional set of (reference-) sequences (only for comparison to \code{prot}), length of proteins from \code{prot} will be checked to mark truncated proteins by '_tru'
#' @param lineLength (integer, length=1) number of sequence characters per line (default 60, should be >1 and <10000)
#' @param eol (character) the character(s) to print at the end of each line (row); for example, eol = "\\r\\n" will produce Windows' line endings on a Unix-alike OS
#' @param truSuf (character) suffix to be added for sequences found truncated when comparing with \code{ref}
#' @param silent (logical) suppress messages
#' @param debug (logical) supplemental messages for debugging
#' @param callFrom (character) allows easier tracking of messages produced
#' @return This function writes the sequences from \code{prot} as fasta formatted-file
#' @seealso \code{\link{readFasta2}} for reading fasta, \code{write.fasta} from the package \href{https://CRAN.R-project.org/package=seqinr}{seqinr}
#' @examples
#' prots <- c(SEQU1="ABCDEFGHIJKL", SEQU2="CDEFGHIJKLMNOP")
#' writeFasta2(prots, fileNa=file.path(tempdir(),"testWrite.fasta"), lineLength=6)
#' @export
writeFasta2 <- function(prot, fileNa=NULL, ref=NULL, lineLength=60, eol="\n", truSuf="_tru", silent=FALSE, debug=FALSE, callFrom=NULL) {
##
fxNa <- wrMisc::.composeCallName(callFrom, newNa="writeFasta2")
if(isTRUE(debug)) silent <- FALSE
if(!isTRUE(silent)) silent <- FALSE
namesInp <- c(deparse(substitute(prot)), deparse(substitute(fileNa)), deparse(substitute(ref)))
if(length(prot) <1) stop(" '",namesInp,"' seems to be empty ! expecting named character vector with (protein) sequence")
chN <- length(names(prot)) <1 # check names of 'prot'
## compare to 'ref' -if provided
if(length(ref)==length(prot) && length(prot) >1) {
if(length(names(ref)) <1) { if(!silent) message(fxNa," NOTE: '",namesInp[2],"' has no names ! Assuming same order as '",namesInp[1],"' !")
} else {
## check order based on names
heP <- sub(" [[:print:]]+", "", names(ref)) # get 1st word/identifyer per line
heR <- sub(" [[:print:]]+", "", names(prot))
if(!all(heP==heR)) {
message(fxNa,"...adjusting order realtif to '",namesInp[2],"'")
newOrd <- match(heR, heP)
if(sum(is.na(newOrd)) >0 && !silent) message(fxNa," NOTE : ",sum(is.na(newOrd))," entries of 'ref' not found in 'prot'" )
prot <- prot[newOrd] }
}
## compare proteins : count number of characters (AAs)
chLe <- sapply(prot, nchar)
chLeRe <- sapply(ref, nchar)
if(!silent) message(fxNa," (out of ",length(prot)," proteins) ",sum(chLe < chLeRe)," proteins shorter, ",sum(chLe > chLeRe)," proteins longer as in '",namesInp[2],"'")
## propagate names from ref, adj if truncated
if(debug) {message("wF1\n"); wF1 <- list(prot=prot,ref=ref,heP=heP,heR=heR,chLe=chLe,chLeRe=chLeRe,chN=chN)}
## adjust names
if(chN && length(names(ref))==length(ref)) names(prot) <- names(ref)
## look for truncated proteins
if(any(chLe < chLeRe)) {
redLe <- which(chLe < chLeRe)
if(length(truSuf) <1 | any(is.na(truSuf))) {truSuf <- ""; if(!silent) message(fxNa," Suffix 'truSuf' set as empty")}
ch1 <- grepl(" $",truSuf[1])
if(!ch1) truSuf <- paste0(truSuf[1]," ")
names(prot)[redLe] <- sub(" ", truSuf, names(prot)[redLe]) } # append suffix to truncated protein sequences
} else {
if(length(ref) >0 && !silent) message(fxNa,"NOTE : '",namesInp[2],"' does NOT match '",namesInp[1],"', ignoring ...")
if(chN) { if(!silent) message(fxNa," Note : '",namesInp[1],"' has NO NAMES, renaming to protein01 ... proteinN for fasta headers")
names(prot) <- paste0("protein",sprintf(paste("%0",nchar(length(prot)),"d",sep=""),1:length(prot)))} }
## check names of lines/sequences for fasta-format (heading '>')
ch1 <- !grepl("^>", names(prot))
if(any(ch1)) {if(all(ch1)) names(prot) <- paste0(">",names(prot)) else names(prot)[which(ch1)] <- paste0(">",names(prot)[which(ch1)])}
if(debug) {message("wF4"); wF4 <- list(prot=prot, ch1=ch1,ref=ref,fileNa=fileNa,lineLength=lineLength)}
## prepare for adding header, split sequence in fixed length blocks
out <- rep(">", length(prot)*2)
out[2*(1:length(prot)) -1] <- names(prot)
if(all(length(lineLength)==1, is.numeric(lineLength), !is.na(lineLength))) {
if(lineLength <2 || lineLength >= 1e4) { lineLength <- 60
if(!silent) message(fxNa,"Invalid entry for 'lineLength' (setting to default=60)")}
}
if(all(length(lineLength)==1, is.numeric(lineLength), !is.na(lineLength))) {
out[2*(1:length(prot))] <- strsplit(gsub(paste0("([[:alpha:]]{",lineLength,"})"), "\\1 ", as.character(prot)), " ") # first, use pattern recall to add spaces used then with strsplit
out <- unlist(out, use.names=FALSE)}
if(debug) {message("wF5"); wF5 <- list(prot=prot, ch1=ch1,ref=ref,fileNa=fileNa,lineLength=lineLength, out=out)}
## check fileName
if(length(fileNa) <1) fileNa <- namesInp[2]
if(!grepl("\\.fasta$",fileNa)) fileNa <- paste0(fileNa,".fasta")
chFi <- file.exists(fileNa)
if(!silent) {if(chFi) message(fxNa," NOTE : file '",fileNa,"' will be overwritten !") else if(debug) message(fxNa," Ready to write file ",fileNa)}
## setup file connection (see https://stackoverflow.com/questions/36933590/how-to-write-files-with-unix-end-of-lines-on-r-for-windows)
con <- try(file(fileNa), silent=TRUE)
if(inherits(con, "try-error")) warning("Failed to call 'file()' for '",fileNa,"' (check authorization/access)") else {
if(debug) message(" file '",fileNa,"' : conection error : ", "try-error" %in% class(con)) }
## open connection
if(!isOpen(con=con, rw="wb")) { tmp <- try(open(con, open="wb"), silent=TRUE) } else tmp <- NULL # 'wb' means : open for writing, binary mode
if(debug) {message("wF7"); wF7 <- list(prot=prot, ch1=ch1,ref=ref,fileNa=fileNa,lineLength=lineLength,con=con,out=out)}
if(inherits(tmp, "try-error")) warning("Failed to open connection to file '",fileNa,"' (check authorization/access)") else {
if(debug) message(fxNa,"Open connection error ", inherits(tmp, "try-error"))
on.exit(try(close(con), silent=TRUE), add=TRUE)
## write to file
tmp <- try(writeLines(as.character(out), con=con, sep=eol), silent=TRUE) #
if(debug) message(fxNa," writeLines() error ", "try-error" %in% class(tmp))
## close connection
close(con)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/R/writeFasta2.R
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
## ----install, echo=TRUE, eval=FALSE-------------------------------------------
# ## if you need to install the packages 'wrMisc','wrProteo' and 'wrGraph' from CRAN :
# install.packages("wrMisc")
# install.packages("wrProteo")
# ## The package 'wrGraph' is not obligatory, but it allows making better graphs
# install.packages("wrGraph")
#
# ## Installation of limma from Bioconductor
# if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
# BiocManager::install("limma")
## ----setup, echo=FALSE, messages=FALSE, warnings=FALSE------------------------
suppressPackageStartupMessages({
library(wrMisc)
library(wrProteo)
library(wrGraph)
library(knitr)
library(rmarkdown)
})
## ----setup2-------------------------------------------------------------------
## Let's assume this is a fresh R-session
## Get started by loading the packages
library("knitr")
library("wrMisc")
library("wrProteo")
library("wrGraph")
# This is wrProteo version no :
packageVersion("wrProteo")
## ----Vigenttes1, echo=TRUE, eval=FALSE----------------------------------------
# browseVignettes("wrProteo")
## ----ChemFormMolMass1, echo=TRUE----------------------------------------------
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN", " ", "e"))
# Ignore empty/invalid entries
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), rmEmpty=TRUE)
## ----ChemFormMolMass2, echo=TRUE----------------------------------------------
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), massTy="aver")
## ----AAseqMolMass, echo=TRUE--------------------------------------------------
AAmass()
## ----AAseqMolMass2, echo=TRUE-------------------------------------------------
## mass of peptide (or protein)
pep1 <- c(aa="AAAA",de="DEFDEF")
convAASeq2mass(pep1, seqN=FALSE)
## ----readFasta, echo=TRUE-----------------------------------------------------
path1 <- system.file('extdata', package='wrProteo')
fiNa <- "conta1.fasta.gz"
## basic reading of Fasta
fasta1 <- readFasta2(file.path(path1, fiNa))
str(fasta1)
## now let's read and further separate details in annotation-fields
fasta1b <- readFasta2(file.path(path1, fiNa), tableOut=TRUE)
str(fasta1b)
## ----treatFasta2, echo=TRUE---------------------------------------------------
dupEntry <- duplicated(fasta1)
table(dupEntry)
## ----treatFasta3, echo=TRUE---------------------------------------------------
fasta3 <- fasta1[which(!dupEntry)]
length(fasta3)
## ----writeFasta1, echo=TRUE, eval=FALSE---------------------------------------
# writeFasta2(fasta3, fileNa="testWrite.fasta")
## ----readMaxQuant1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
## number of lines and columns of quantitation data
dim(dataMQ$quant)
## ----readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
## The grouping of replicates
grp9 <- rep(1:9,each=3)
head(grp9)
## special group of proteins (we want to differentiate/ highlight lateron)
UPS1ac <- c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165", "P00709", "P06732",
"P12081", "P61626", "Q15843", "P02753", "P16083", "P63279", "P01008", "P61769", "P55957", "O76070",
"P08263", "P01344", "P01127", "P10599", "P99999", "P06396", "P09211", "P01112", "P01579", "P02787",
"O00762", "P51965", "P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375")
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1ac)
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), gr=grp9, plotGraph=FALSE)
## the quantifiation data is the same as before
dim(dataMQ$quant)
## ----readMaxQuant3, echo=TRUE------------------------------------------------
## count of tags based on argument specPref
table(dataMQ$annot[,"SpecType"])
## ----readMaxQuant4, echo=TRUE------------------------------------------------
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, sdrf="PXD001819", suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), plotGraph=FALSE)
## ----exportSdrfDraftMaxQuant5, echo=TRUE-------------------------------------
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
dataMQ2 <- readMaxQuantFile(path1, file=fiNaMQ, refLi="mainSpe", sdrf=FALSE, suplAnnotFile=TRUE)
## Here we'll write simply in the current temporary directory of this R-session
exportSdrfDraft(dataMQ2, file.path(tempdir(),"testSdrf.tsv"))
## ----readMaxQuantPeptides, echo=TRUE-----------------------------------------
MQpepFi1 <- "peptides_tinyMQ.txt.gz"
path1 <- system.file("extdata", package="wrProteo")
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST", spec2="HUMAN")
dataMQpep <- readMaxQuantPeptides(path1, file=MQpepFi1, specPref=specPref1, tit="Tiny MaxQuant Peptides")
summary(dataMQpep$quant)
## ----readProteomeDiscovererProt1, echo=TRUE----------------------------------
fiNa <- "tinyPD_allProteins.txt.gz"
dataPD <- readProteomeDiscovererFile(file=fiNa, path=path1, suplAnnotFile=FALSE, plotGraph=FALSE)
summary(dataPD$quant)
## ----readDiaNN1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
diaNNFi1 <- "tinyDiaNN1.tsv.gz"
## This file contains much less identifications than one may usually obtain
path1 <- system.file("extdata", package="wrProteo")
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN")
dataNN <- readDiaNNFile(path1, file=diaNNFi1, specPref=specPref1, tit="Tiny DIA-NN Data", plotGraph=FALSE)
summary(dataNN$quant)
## ----readProlineProt1, echo=TRUE---------------------------------------------
path1 <- system.file("extdata", package="wrProteo")
fiNa <- "exampleProlineABC.csv.gz" # gz compressed data can be read, too
dataPL <- readProlineFile(file=fiNa, path=path1, plotGraph=FALSE)
summary(dataPL$quant[,1:8])
## ----readFragpipe1, echo=TRUE------------------------------------------------
FPproFi1 <- "tinyFragpipe1.tsv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="MOUSE")
dataFP <- readFragpipeFile(path1, file=FPproFi1, specPref=specPref1, tit="Tiny Fragpipe Example", plotGraph=FALSE)
summary(dataFP$quant)
## ----readMassChroq1, echo=TRUE-----------------------------------------------
MCproFi1 <- "tinyMC.RData"
dataMC <- readMassChroQFile(path1, file=MCproFi1, tit="Tiny MassChroq Example", plotGraph=FALSE)
summary(dataMC$quant)
## ----readAlphaPept1, echo=TRUE-----------------------------------------------
APproFi1 <- "tinyAlpaPeptide.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK")
dataAP <- readAlphaPeptFile(path1, file=APproFi1, specPref=specPref1, tit="Tiny AlphaPept Example", plotGraph=FALSE)
summary(dataAP$quant)
## ----readWombarP1, echo=TRUE-------------------------------------------------
WBproFi1 <- "tinyWombCompo1.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST")
dataWB <- readWombatNormFile(path1, file=WBproFi1, specPref=specPref1, tit="Tiny Wombat-P Example", plotGraph=FALSE)
summary(dataWB$quant)
## ----readSampleMetaData2, echo=TRUE------------------------------------------
MQsdrf001819Setup <- readSampleMetaData(quantMeth="MQ", sdrf="PXD001819", path=path1, suplAnnotFile="summary.txt.gz", abund=dataMQ$quant)
str(MQsdrf001819Setup)
## ----fuseProteomicsProjects1, echo=TRUE--------------------------------------
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
dataMC <- readMassChroQFile(path1, file="tinyMC.RData", tit="Tiny MassChroq Example", plotGraph=FALSE)
dataFused <- fuseProteomicsProjects(dataMQ, dataMC)
str(dataFused$quant)
## ----NA_MaxQuant, echo=TRUE---------------------------------------------------
## Let's inspect NA values as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="Histogram of Protein Abundances and NA-Neighbours")
## ----NArepl_MaxQuant, echo=TRUE-----------------------------------------------
## MaxQuant simple NA-imputation (single round)
dataMQimp <- matrixNAneighbourImpute(dataMQ$quant, gr=grp9, tit="Histogram of Imputed and Final Data")
## ----testRobustToNAimputation_MQ1, echo=TRUE----------------------------------
## Impute NA-values repeatedly and run statistical testing after each round of imputations
testMQ <- testRobustToNAimputation(dataMQ, gr=grp9)
## Example of the data after repeated NA-imputation
head(testMQ$datImp[,1:6])
## ----PCA1MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE------
# limit to UPS1
plotPCAw(testMQ$datImp, sampleGrp=grp9, tit="PCA on Protein Abundances (MaxQuant,NAs imputed)", rowTyName="proteins", useSymb2=0)
## ----MAplot1, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE----
# By default this plots at the first of all pairwise questions
MAplotW(testMQ)
## ----MAplot2, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE----
res1 <- NULL
MAplotW(testMQ, useComp=2, namesNBest="passFC")
## ----VolcanoPlot1MQ, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE----
## by default the first pairwise comparison is taken
## using the argument 'namesNBest' we can add names from the annotation
VolcanoPlotW(testMQ, useComp=2, namesNBest="passFDR")
## ----results1, echo=TRUE------------------------------------------------------
res1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
## ----results2, echo=TRUE------------------------------------------------------
knitr::kable(res1[,-1], caption="5%-FDR (BH) Significant results for 1st pairwise set", align="c")
## ----readUCSC1, echo=TRUE-----------------------------------------------------
path1 <- system.file("extdata", package="wrProteo")
gtfFi <- file.path(path1, "UCSC_hg38_chr11extr.gtf.gz")
UcscAnnot1 <- readUCSCtable(gtfFi)
# The Ensemble transcript identifyers and their chromosomal locations :
head(UcscAnnot1)
## ----readUCSC2, echo=TRUE-----------------------------------------------------
# Here we'll redo reading the UCSC table, plus immediatley write the file for UniProt conversion
# (in this vignette we write to tempdir() to keep things tidy)
expFi <- file.path(tempdir(),"deUcscForUniProt2.txt")
UcscAnnot1 <- readUCSCtable(gtfFi, exportFileNa=expFi)
## ----readUniProt1, echo=TRUE--------------------------------------------------
deUniProtFi <- file.path(path1, "deUniProt_hg38chr11extr.tab")
deUniPr1 <- readUniProtExport(UniP=deUniProtFi, deUcsc=UcscAnnot1, targRegion="chr11:1-135,086,622")
str(deUniPr1)
## ----sessionInfo, echo=FALSE--------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/inst/doc/wrProteoVignette1.R
|
---
title: "Getting started with wrProteo"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrProteoVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains a collection of various tools for Proteomics used at the [proteomics platform](https://proteomics.igbmc.fr) of the [IGBMC](https://www.igbmc.fr).
To get started, we need to load the packages "[wrMisc](https://CRAN.R-project.org/package=wrMisc)" and
this package ([wrProteo](https://CRAN.R-project.org/package=wrProteo)), both are available from CRAN.
The packages [wrGraph](https://CRAN.R-project.org/package=wrGraph) and [RColorBrewer](https://CRAN.R-project.org/package=RColorBrewer) get used internally by some of the functions from this package for (optional/improved) figures.
Furthermore, the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) will be used internally for statistical testing.
If you are not familiar with [R](https://www.r-project.org) you may find many introductory documents on the official R-site
in [contributed documents](https://cran.r-project.org/other-docs.html) or under [Documentation/Manuals](https://cran.r-project.org/manuals.html).
Of course, numerous other documents/sites with tutorials exit.
The aim of package-vignettes is to provide additional information and show examples how the R-package concerned may be used, thus complementing the documentation given with _help()_ for each of the functions of the package. In terms of examples, frequent standard types of problems are preferred in a vignette.
Nevertheless, most functions can be used in many other ways, for this you may have to check the various arguments via calling _help_ on the function of interest.
All R-code in this vigentte can be directly repeated by the user, all data used is provided with the package.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r install, echo=TRUE, eval=FALSE}
## if you need to install the packages 'wrMisc','wrProteo' and 'wrGraph' from CRAN :
install.packages("wrMisc")
install.packages("wrProteo")
## The package 'wrGraph' is not obligatory, but it allows making better graphs
install.packages("wrGraph")
## Installation of limma from Bioconductor
if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
BiocManager::install("limma")
```
```{r setup, echo=FALSE, messages=FALSE, warnings=FALSE}
suppressPackageStartupMessages({
library(wrMisc)
library(wrProteo)
library(wrGraph)
library(knitr)
library(rmarkdown)
})
```
```{r setup2}
## Let's assume this is a fresh R-session
## Get started by loading the packages
library("knitr")
library("wrMisc")
library("wrProteo")
library("wrGraph")
# This is wrProteo version no :
packageVersion("wrProteo")
```
This way you can browse all vignettes available to [this package](https://CRAN.R-project.org/package=wrProteo) :
```{r Vigenttes1, echo=TRUE, eval=FALSE}
browseVignettes("wrProteo")
```
There you can find another vignette dedicated to the analysis of heterogenous spike-in experiments.
## Calculating Molecular Masses From Composition Formulas
Please note that molecular masses may be given in two flavours : Monoisotopic mass and average mass.
For details you may refer to [Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass).
Monoisotopic masses commonly are used in mass-spectrometry and will be used by default in [this package](https://CRAN.R-project.org/package=wrProteo).
Molecular (mono-isotopic) masses of the atomes integrated in this package were taken from [Unimod](http://www.unimod.org/masses.html).
They can be easily updated, if in the future, (mono-isotopic) molecular masses will be determined with higher precision (ie more digits).
### Molecular masses based on (summed) chemical formulas
At this level (summed) atomic compositions are evaluated.
Here, the number of atoms has to be written _before_ the atom. Thus, '2C' means two atoms of carbon.
Empty or invalid entries will be by default returned as mass=0, a message will comment such issues.
The mass of an electron can be assigned using 'e'.
```{r ChemFormMolMass1, echo=TRUE}
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN", " ", "e"))
# Ignore empty/invalid entries
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), rmEmpty=TRUE)
```
Using the argument _massTy_ one can switch from default _monoisotopic mass_ to _average mass_ :
```{r ChemFormMolMass2, echo=TRUE}
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), massTy="aver")
```
### Molecular masses based on amino-acid sequence
The mass of these amino-acids can be used:
```{r AAseqMolMass, echo=TRUE}
AAmass()
```
Here the one-letter amino-acid code is used to descibre peptides or proteins.
```{r AAseqMolMass2, echo=TRUE}
## mass of peptide (or protein)
pep1 <- c(aa="AAAA",de="DEFDEF")
convAASeq2mass(pep1, seqN=FALSE)
```
# Working With Fasta(Files)
## Reading Fasta Files (from Uniprot)
This package contains a parser for Fasta-files allowing to separate different fields of meta-data like IDs, name and species of the respecive entries.
Here we will read a tiny example fasta-file (a collection of typical contaminants in proteomics) using `readFasta2()`.
```{r readFasta, echo=TRUE}
path1 <- system.file('extdata', package='wrProteo')
fiNa <- "conta1.fasta.gz"
## basic reading of Fasta
fasta1 <- readFasta2(file.path(path1, fiNa))
str(fasta1)
## now let's read and further separate details in annotation-fields
fasta1b <- readFasta2(file.path(path1, fiNa), tableOut=TRUE)
str(fasta1b)
```
Now we can check if some entries appear twice.
```{r treatFasta2, echo=TRUE}
dupEntry <- duplicated(fasta1)
table(dupEntry)
```
Let's remove the duplicated entry.
```{r treatFasta3, echo=TRUE}
fasta3 <- fasta1[which(!dupEntry)]
length(fasta3)
```
## Writing Sequences As Fasta Files
Once we have modified a fasta we might want to save it again as fasta-formatted file.
This can be done using `writeFasta2()`.
```{r writeFasta1, echo=TRUE, eval=FALSE}
writeFasta2(fasta3, fileNa="testWrite.fasta")
```
.
***
# Analyzing Label-Free Quantitative Proteomics Data
### Label-free Quantitative Proteomics Introduction
Multiple algorithms and software implementations have been developed for quantitation label-free proteomics experiments (LFQ),
in particular for extracted ion chromatograms (XIC). For more background information you may look at
[Wikipedia labell-free Proteomics](https://en.wikipedia.org/wiki/Label-free_quantification).
The tools presented here are designed for use with label-free XIC (ie LFQ) data.
Several of the programs for extracting initial quantitations also allow getting spectral counting (PSM) data which can also get imported into R,
however their use is not further discussed in this vignette.
In general it is preferable to use XIC for comparing peptde of protein quantities between different protein extracts/samples.
This package provides support for importing quantitation results from [Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812),
[MaxQuant](https://www.maxquant.org), [Fragpipe](https://fragpipe.nesvilab.org), [Proline](https://www.profiproteomics.fr/proline/),
[MassChroQ](http://pappso.inrae.fr/bioinfo/masschroq/), [DIA-NN](https://github.com/vdemichev/DiaNN), [AlphaPept](https://github.com/MannLabs/alphapept),
[Wombat-P](https://github.com/wombat-p) and [OpenMS](https://openms.de/).
All quantitation import functions offer special features for further separating annotation related information, like species, for later use.
In most common real-world cases people typically analyze data using only one quantitation algorithm/software.
Below in this vignette, we'll use only the quantitation data generated using MaxQuant (AlphaPept, DIA-NN, FragPipe, MassChroQ, OpenMS, ProteomeDiscoverer, Proline and Wombat-P are supported, too).
The other vignette to [this package](https://CRAN.R-project.org/package=wrProteo) ("UPS-1 spike-in Experiments") shows in detail the import functions available for MaxQuant, ProteomeDiscoverer and Proline
and how further comparsions can be performed in bench-mark studies.
All these import functions generate an equivalent output format, separating (selected) annotation data (\$annot) from normalized log2-quantitation data (\$quant)
and initial quantitation (\$raw).
Normalization (discussed [below](#Normalization) in more detail) is an important part of 'preparing' the data for subsequant analysis.
The import functions in this package allow performin an initial normalization step (with choice among multiple algorithims), too.
Further information about the proteins identifed can be considered during normalization:
For example, it is possible to exclude contaminants like keratins which are frequently found among the higher abundant proteins which may potentially introduce bias at global normalization.
Technical replicates are very frequently produced in proteomics, they allow to assess the variability linked to repeated injection of the same material.
Biological replicates, however, make additional information accessible, allowing the interpretation of experiments in a more general way.
## Import From Dedicated Quantification Algorithms/Software {#ImportQuantitation}
### MaxQuant: Import Protein Quantification Data {#ReadMaxQuant}
[MaxQuant](https://www.maxquant.org) is free software provided by the [Max-Planck-Institute](https://www.biochem.mpg.de/de),
see also [Tyanova et al 2016](https://doi.org/10.1038/nprot.2016.136).
Typically [MaxQuant](https://www.maxquant.org) exports by default quantitation data on level of consensus-proteins as a folder called txt with a file always called 'proteinGroups.txt'.
Data exported from [MaxQuant](https://www.maxquant.org) can get imported (and normalized) using `readMaxQuantFile()`,
in a standard case one needs only to provide the path to the file 'proteinGroups.txt' which can be found the _combined/txt/_ folder produced by MaxQuant.
gz-compressed files can be read, too (as in the example below the file 'proteinGroups.txt.gz').
The argument _specPref_ allows giving further details about expected (primary) species, it defaults to working with human proteins.
To get started, let's just set it to _NULL_ for ignoring.
```{r readMaxQuant1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
## number of lines and columns of quantitation data
dim(dataMQ$quant)
```
##### Adding Meta-Data at Import (Example MaxQuant) {#ReadMaxQuantWithMetaData}
Similarly we can also add directly information about principal species, contaminants, special groups of proteins and add sdrf annotation (if existing) directly when reading the data.
Setting customized tags according to species or other search-terms can be done using the argument _specPref_.
In the example below we define a main species (tags are made by comparing to the species information initially given by the fasta)
and we define a custom group of proteins by their Uniprot-Accessions (here the UPS1 spike-in).
Then, the content of argument _specPref_ will get searched in multiple types of annotation (if available from the initial Fasta).
By setting _suplAnnotFile=TRUE_ the import function will also look for files (by default produced by MaxQuant as 'summary.txt' and 'parameters.txt')
giving more information about experiment and samples and integrate this to the output.
(This time let's do not display the plot of distributions, it's the same plot as above, see argument _plotGraph_.)
```{r readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The grouping of replicates
grp9 <- rep(1:9,each=3)
head(grp9)
## special group of proteins (we want to differentiate/ highlight lateron)
UPS1ac <- c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165", "P00709", "P06732",
"P12081", "P61626", "Q15843", "P02753", "P16083", "P63279", "P01008", "P61769", "P55957", "O76070",
"P08263", "P01344", "P01127", "P10599", "P99999", "P06396", "P09211", "P01112", "P01579", "P02787",
"O00762", "P51965", "P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375")
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1ac)
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), gr=grp9, plotGraph=FALSE)
## the quantifiation data is the same as before
dim(dataMQ$quant)
```
Now we can access special tags in the annotation part of the resulting object the results :
```{r readMaxQuant3, echo=TRUE}
## count of tags based on argument specPref
table(dataMQ$annot[,"SpecType"])
```
This information can be used automatically lateron for assigning different symbols and/or colors when drawing Volcano-plots or PCA.
##### Adding Experimental Setup (Sdrf) to Meta-Data at Import (Example MaxQuant)
To further analyze the data from an experiment typically the user also need to know/declare different groups of samples (eg who is replicate of whom).
In the simplest case this can be done via the argument _gr_, as shown above.
By the way, if _gr_ is provided it gets priority over other automcatic mining results.
The import-functions from this package try to help you in multiple ways to find out more about the experimental details.
Most quantitation software (like MaxQuant and ProteomeDiscoverer) also produce files/documentation about experimental annotation specified by the user.
These files may be automatically read and mined via argument _suplAnnotFile=TRUE_ to gather information about groups of samples.
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data ([sdrf](#ImportSdrf)).
If sfdr-annotation (see [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata)) exists on [Pride](https://www.ebi.ac.uk/pride/), it can be imported, too.
The information on the experimental setup will be mined to automatically to design groups of samples (ie levels of covariant factors).
If sdrf has not been prepared, the user may also simply provide a data.frame formatted like sfdr from Pride.
Finally, if nothing of the above is available, the column-names from the quantitation columns will be minded to search hints about groups of replicates (in particular when using MaxQuant).
For a bit more complex example of using _readMaxQuantFile()_ or integrating other annotation information,
please look at the vignette "UPS1 spike-in Experiments" also available to [this package](https://CRAN.R-project.org/package=wrProteo).
The simplest way of adding sdrf annotation consists in addin the project ID from [Pride](https://www.ebi.ac.uk/pride/), as shown below.
The argument _groupPref_ allows defining further adjustments/choices.
The import-function will first check if this a local file, and if not try to download from Pride (if available) and further mine the information.
```{r readMaxQuant4, echo=TRUE}
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, sdrf="PXD001819", suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), plotGraph=FALSE)
```
##### Exporting Experimental Setup from MaxQuant to Draft-Sdrf {#ExportDraftSdrf}
As mentioned, the [Proteomics Sample Metadata Format - sdrf](https://github.com/bigbio/proteomics-sample-metadata) is an effort for standardizing experimental meta-data.
Many of the typically documented ones may already have been entered when lauching [MaxQuant](https://www.maxquant.org) and can be exported as a draft Sdrf-file.
All main columns for standard experiments are present in the file, though some columns will have to be completed by the user (by any text-editor) for submitting to Pride.
```{r exportSdrfDraftMaxQuant5, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
dataMQ2 <- readMaxQuantFile(path1, file=fiNaMQ, refLi="mainSpe", sdrf=FALSE, suplAnnotFile=TRUE)
## Here we'll write simply in the current temporary directory of this R-session
exportSdrfDraft(dataMQ2, file.path(tempdir(),"testSdrf.tsv"))
```
#### MaxQuant : Import Peptide Data {#ReadMaxQuantPetides}
Similarly it is possible to read the file by default called _'peptides.txt'_ for the peptide-data.
In the example below we'll provide a custom file-name (to a tiny example non-representative for biological interpretation).
The data get imported to a similar structure like the protein-level data, quantitations on peptide level by default median-normalized, sample-setup from sdrf-files may be added, too.
```{r readMaxQuantPeptides, echo=TRUE}
MQpepFi1 <- "peptides_tinyMQ.txt.gz"
path1 <- system.file("extdata", package="wrProteo")
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST", spec2="HUMAN")
dataMQpep <- readMaxQuantPeptides(path1, file=MQpepFi1, specPref=specPref1, tit="Tiny MaxQuant Peptides")
summary(dataMQpep$quant)
```
If the argument _suplAnnotFile_ is set to _TRUE_, the files 'summary.txt' and 'parameters.txt' (produced by MaxQuant by default) will be searched in the same directory.
If these files are available and seem to correspond to the quantiation date read in the main part of the function,
supplemental information about experimental setup will be mined and added to the resulting object.
.
### ProteomeDiscoverer : Import Protein Quantification {#ReadProteomeDiscoverer}
[Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) is commercial software from ThermoFisher (www.thermofisher.com),
see also [Orsburn, 2021](https://doi.org/10.3390/proteomes9010015).
Data exported from [Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) can get imported (typically the *xx_Proteins.txt* file)
using `readProteomeDiscovererFile()`, for details please see the vignette "UPS-1 spike-in Experiments" also available with [this package](https://CRAN.R-project.org/package=wrProteo).
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readProteomeDiscovererProt1, echo=TRUE}
fiNa <- "tinyPD_allProteins.txt.gz"
dataPD <- readProteomeDiscovererFile(file=fiNa, path=path1, suplAnnotFile=FALSE, plotGraph=FALSE)
summary(dataPD$quant)
```
Please note, that quantitation data exported from ProteomeDiscoverer frequently have very generic column-names (increasing numbers).
When calling the import-function they can be replaced by more meaningful names either using the argument _sampNa_
(thus, much care should be taken on the order when preparing the vector _sampleNames_ !),
or from reading the default annotation in the file _'InputFiles.txt'_ (if exported) or, from sdrf-annotation (if available).
In this case, supplemental information about experimental setup will be mined and added to the resulting object.
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
For a more complex example of using _readProteomeDiscovererFile()_ please see the vignette _'UPS1 spike-in Experiments'_ of [this package](https://CRAN.R-project.org/package=wrProteo).
#### ProteomeDiscoverer : Import Peptide Data {#ReadProteomeDiscovererPetides}
Similarly it is possible to read the peptide-data files exported by ProteomeDiscoverer using the function `readProtDiscovererPeptides()`.
The data get imported to a similar structure like the protein-level data, quantitations on peptide level by default median-normalized, sample-setup from sdrf-files may be added, too.
### DIA-NN: Import Protein Quantification Data {#ReadDiaNN}
[DIA-NN](https://github.com/vdemichev/DiaNN) is free software provided by the by Demichev, Ralser and Lilley labs,
see also [Demichev et al, 2020](https://doi.org/10.1038/s41592-019-0638-x).
Typically [DIA-NN](https://github.com/vdemichev/DiaNN) allows exporting quantitation data on level of consensus-proteins as tsv-formatted files.
Such data can get imported (and normalized) using `readDiaNNFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readDiaNN1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
diaNNFi1 <- "tinyDiaNN1.tsv.gz"
## This file contains much less identifications than one may usually obtain
path1 <- system.file("extdata", package="wrProteo")
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN")
dataNN <- readDiaNNFile(path1, file=diaNNFi1, specPref=specPref1, tit="Tiny DIA-NN Data", plotGraph=FALSE)
summary(dataNN$quant)
```
#### DIA-NN : Import Peptide Data {#ReadDiaNNPetides}
Similarly data from [DIA-NN](https://github.com/vdemichev/DiaNN) on peptide level can get imported (and normalized) using `readDiaNNPeptides()`.
### Proline : Import Protein Quantification Data {#ReadProline}
[Proline](https://www.profiproteomics.fr/proline/) is free software provided by the Profi-consortium,
see also [Bouyssié et al 2020](https://doi.org/10.1016/j.jprot.2015.11.011).
Data exported from [Proline](https://www.profiproteomics.fr/proline/) (xlsx, csv or tsv format) can get imported using `readProlineFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readProlineProt1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNa <- "exampleProlineABC.csv.gz" # gz compressed data can be read, too
dataPL <- readProlineFile(file=fiNa, path=path1, plotGraph=FALSE)
summary(dataPL$quant[,1:8])
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
For a more complex example of using _readProlineFile()_ please see the vignette _'UPS1 spike-in Experiments'_ from [this package](https://CRAN.R-project.org/package=wrProteo).
### Fragpipe : Import Protein Quantification Data {#ReadFragpipe}
[Fragpipe](https://fragpipe.nesvilab.org) is a database search tool for peptide identification, open-source developed by the [Nesvizhskii lab](https://www.nesvilab.org),
see eg [Kong et al 2017](https://doi.org/10.1038/nmeth.4256), [da Veiga Leprevost; et al 2020](https://doi.org/10.1038/s41592-020-0912-y) or other related publications.
Data exported from [Fragpipe](https://fragpipe.nesvilab.org) (in tsv format) can get imported using `readFragpipeFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readFragpipe1, echo=TRUE}
FPproFi1 <- "tinyFragpipe1.tsv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="MOUSE")
dataFP <- readFragpipeFile(path1, file=FPproFi1, specPref=specPref1, tit="Tiny Fragpipe Example", plotGraph=FALSE)
summary(dataFP$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### MassChroQ : Import Protein Quantification Data
[MassChroQ](http://pappso.inrae.fr/bioinfo/masschroq/) is free open software provided by the [PAPPSO](http://pappso.inrae.fr),
see also [Valot et al 2011](https://doi.org/10.1002/pmic.201100120).
Inital quantifications are on peptide basis and should be normalized and summarized using the R-package MassChroqR, which is also publicly available at the [PAPPSO](http://pappso.inrae.fr/bioinfo/).
Quantifications at protein-level can be saved as matrix into an RData-file or written to tsv, csv or txt files for following import into the framework of this package
using `readMassChroQFile()`, for details please see the help-page to this function.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readMassChroq1, echo=TRUE}
MCproFi1 <- "tinyMC.RData"
dataMC <- readMassChroQFile(path1, file=MCproFi1, tit="Tiny MassChroq Example", plotGraph=FALSE)
summary(dataMC$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### AlphaPept : Import Protein Quantification Data {#ReadAlphaPeptide}
[AlphaPept](https://github.com/MannLabs/alphapept) is a free open-source search tool for peptide identification created by the Mann-lab,
see eg [Strauss et al 2021](https://doi.org/10.1101/2021.07.23.453379).
Data exported from AlphaPept (in csv format) can get imported using `readAlphaPeptFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readAlphaPept1, echo=TRUE}
APproFi1 <- "tinyAlpaPeptide.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK")
dataAP <- readAlphaPeptFile(path1, file=APproFi1, specPref=specPref1, tit="Tiny AlphaPept Example", plotGraph=FALSE)
summary(dataAP$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### Wombat-P : Import Protein Quantification Data {#ReadWombatP}
[Wombat-P](https://github.com/wombat-p) is a free open-source search tool for peptide identification created by an Elixir-consortium,
see also [Bouyssie et al 2023](https://doi.org/10.1021/acs.jproteome.3c00636).
Data exported from Wombat-P (in csv format) can get imported using `readWombatNormFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readWombarP1, echo=TRUE}
WBproFi1 <- "tinyWombCompo1.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST")
dataWB <- readWombatNormFile(path1, file=WBproFi1, specPref=specPref1, tit="Tiny Wombat-P Example", plotGraph=FALSE)
summary(dataWB$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### OpenMS : Import Protein Quantification Data
[OpenMS](https://openms.de/) is free open software provided by the deNBI Center for integrative Bioinformatics,
see also [Rost et al 2016](https://doi.org/10.1038/nmeth.3959).
Peptide level data exported as csv get summarized from peptide to protein level and further normalized using `readOpenMSFile()`, for details please see the help-page to this function.
### Importing Sdrf Meta-Data {#ImportSdrf}
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data (sdrf-format).
As mentioned at the section for reading [MaxQuant](#ReadMaxQuant), most import-functions
from [wrProteo](https://CRAN.R-project.org/package=wrProteo) can directly import (if available) the experimental setup from sdrf,
or from files produced using the various quantitation software (as shown with [MaxQuant](#ReadMaxQuant).
To do this separately, or if you need to read an alternative annotation file, you may use `readSampleMetaData()`.
If sdrf annotation is available on Pride/github this information can be read and directly integrated with software specific annotation using the import-functions shown above or as shown below.
Of course the user should always make sure the annotation really corresponds to current the experimental data !
When adding the quantitation-data using argument _abund_, the functions also checks if the number of samples fit and
tries to align the order of the meta-data to that of the quantitation data (based on the raw files), since they are not necessarily in the same order.
```{r readSampleMetaData2, echo=TRUE}
MQsdrf001819Setup <- readSampleMetaData(quantMeth="MQ", sdrf="PXD001819", path=path1, suplAnnotFile="summary.txt.gz", abund=dataMQ$quant)
str(MQsdrf001819Setup)
```
However, the recommended and most convenient way is to add/import meta-data directly when importing quantitation-data (eg using _readMaxQuantFile()_, _readProteomeDiscovererFile()_, etc).
## Combining Proteomics Projects {#FuseProteomicsProjects}
If needed, function `fuseProteomicsProjects()` allows combining up to 3 separate data-sets previously imported using wrProteo.
The user should very carefully think how and why he wants to fuse multiple separately imported data-sets, which might have their own charteristics.
Note, the function presented here does not re-normalize the combined data, the user should investigate the data and decide on suitable strategies
for further [normalization](#Normalization).
Data from different software may not contain exactely the same proteins or peptides, only the common identifiers are retained by this approach.
The user should pay attention to which identifyer should be used and that they do not appear multiple times in the the same data-set.
If, however, some IDs appear multiple times (ie as separate lines) in the same data-set, the corresponding numeric data will be summarized to a single line.
This may may have notocable effect on the following biological interpretation.
Thus, it is very important to know your data and to understand when lines that appear with the same identifyers should/may be fused/summarized without
doing damage to the later biological interpretation ! The user may specify for each dataset the colum out of the protein/peptide-annotation to use
via the argument _columnNa_.
Then, this content will be matched as identical match, so when combining data from different software special care shoud be taken !
```{r fuseProteomicsProjects1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
dataMC <- readMassChroQFile(path1, file="tinyMC.RData", tit="Tiny MassChroq Example", plotGraph=FALSE)
dataFused <- fuseProteomicsProjects(dataMQ, dataMC)
str(dataFused$quant)
```
## Normalization {#Normalization}
As mentioned, the aim of normalization is to remove bias in data not linked to the original (biological) question.
The import functions presented above do already by default run global median normalization.
When choosing a normalization procedure one should reflect what additional information may be available to guide normalization.
For example, it may be very useful to exclude classical protein contaminants since they typically do not reflect the original biolocial material.
In overall, it is important to inspect results from normalization, graphical display of histograms, boxplots or violin-plots to compare distributions.
Multiple options exist for normalizing data, please look at the documentation provided with the import-functions introduced above.
Please note, that enrichment experiments (like IP) can quickly pose major problems to the choice of normalization approaches.
The function `normalizeThis()` from the package [wrMisc](https://CRAN.R-project.org/package=wrMisc) is run internally.
It can be used to run additional normalization, if needed.
Different normalization procedures intervene with different 'aggressiveness', ie also with different capacity to change the initial data.
In general, it is suggested to start normalizing using 'milder' procedures, like global median and switch to more intervening methods if initial results seem not satisfactory.
Beware, heavy normalization procedures may also alter the main information you want to analyze.
Ie, some biologically true positive changes may start to fade or disappear when inappropriate normalization gets performed.
Please note, that normalization should be performed before [NA-imputation](#NA-imputation) to avoid introducing new bias in the group of imputed values.
## Imputation of NA-values {#NA-imputation}
In proteomics the quantitation of very low abundances is very challenging.
Proteins which are absent or very low abundances typically appear in the results as 0 or NA.
Frequantly this may be linked to the fact that no peak is detected in a MS-1 chromatogram (for a given LC elution-time) while other samples had a strong peak
at the respective place which led to successful MS-2 identification.
Please note, that the match-between-runs option in the various softwar options allows to considerably reduce the number of NAs.
To simplify the treatment all 0 values are transformed to NA, anyway they would not allow log2 transformation either.
Before replacing NA-values it is important to verify that such values may be associated to absent or very low abundances.
To do so, we suggest to inspect groups of replicate-measurements using `matrixNAinspect()`.
In particular, with multiple technical replicates of the same sample it is supposed that any variability observed is not linked to the sample itself.
So for each NA that occurs in the data we suggest to
look what was reported for the same protein with the other (technical) replicates.
This brings us to the term of 'NA-neighbours' (quantifications for the same protein in replicates).
When drawing histograms of NA-neighbours one can visually inspect and verify that NA-neighbours are typically low abundance values,
however, but not necessarily the lowest values observed in the entire data-set.
```{r NA_MaxQuant, echo=TRUE}
## Let's inspect NA values as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="Histogram of Protein Abundances and NA-Neighbours")
```
So only if the hypothesis of NA-neighbours as typically low abundance values gets confirmed by visual inspection of the histograms,
one may safely proceed to replacing them by low random values.
If one uses a unique (very) low value for NA-replacements, this will quickly pose a problem
at the level of t-tests to look for proteins changing abundance between two or more groups of samples.
Therefore it is common practice to draw random values from a Normal distribution representing this lower end of abundance values.
Nevertheless, the choice of the parameters of this Normal distribution is very delicate.
This package proposes several related strategies/options for NA-imputation.
First, the classical imputation of NA-values using Normal distributed random data is presented.
The mean value for the Normal data can be taken from the median or mode of the NA-neighbour values,
since (in case of technical replicetes) NA-neighbours tell us what these values might have been and thus we model a distribution around.
Later in this vignette, a more elaborate version based on repeated implementations to obtain more robust results will be presented.
The function `matrixNAneighbourImpute()` proposed in this package offers automatic selection of these parameters, which have been tested in a number of different projects.
However, this choice should be checked by critically inspecting the histograms of 'NA-neighbours' (ie successful quantitation in other
replicate samples of the same protein) and the final resulting distribution. Initially all NA-neighbours are extracted.
It is also worth mentioning that in the majority of data-sets encountered, such NA-neighbours form skewed distributions.
The successful quantitation of instances with more than one NA-values per group may be considered even more representative, but of course less sucessfully quntified values remain.
Thus a primary choice is made: If the selection of (min) 2 NA-values per group has more than 300 values, this distribution will be used as base to model
the distribution for drawing random values. In this case, by default the 0.18 quantile of the 2 NA-neighbour distribution will be used as mean for
the new Normal distribution used for NA-replacements. If the number of 2 NA-neighbours is >= 300, (by default) the 0.1 quantile all NA-neighbour values will used.
Of course, the user has also the possibility to use custom choices for these parameters.
The final replacement is done on all NA values. This also includes proteins with are all NA in a given condition as well a instances of mixed successful quantitation and NA values.
```{r NArepl_MaxQuant, echo=TRUE}
## MaxQuant simple NA-imputation (single round)
dataMQimp <- matrixNAneighbourImpute(dataMQ$quant, gr=grp9, tit="Histogram of Imputed and Final Data")
```
However, imputing using normal distributed random data also brings the risk of occasional extreme values.
In the extreme case it may happen that a given protein is all NA in one group, and by chance the random values turn out be rather high.
Then, the final group mean of imputed values may be even higher than the mean of another group with successful quantitations.
Of course in this case it would be a bad interpretation to consider the protein in question upregulated in a sample where all values for this protein were NA.
To circumvent this problem there are 2 options : 1) one may use special filtering schemes to exclude such constellations from final results or 2)
one could repeat replacement of NA-values numerous times.
The function _testRobustToNAimputation()_ allows such repeated replacement of NA-values. For details, see also the following section.
For other packages dealing with missing values (NAs), please also look at the [missing data task-view](https://CRAN.R-project.org/view=MissingData) on CRAN.
## Filtering {#Filtering}
The main aim of filtering in omic-data analysis is to remove proteins/genes/lines which are for obvious reasons not suitable for further analysis.
Invalid or low quality measures are not suitable for good results and may thus be removed.
Frequently additional information is used to justy the procedure of removing certain proteins/genes/lines.
One very common element in filtering is the observation that very low abundance measures are typically less precise than medium or high abundance values.
Thus, a protein/gene with all abundance measures at the very low end of the global scale may as well just seem to change abundance due to the elevated variance of low abundance measures.
However, most statitical tests are not very well prepared for elevated variance of low abundance measures.
In consequence, it is common to remove or disqualify such proteins/genes/lines which are at high risk of yielding false positive results.
In the context of proteomics the number of samples with NAs (non-quantified peptides/proteins) for a given protein/peptide represents also an interesting starting point. If almost all values finally compared are a result of (random) imputation, any apparent change in abundanc of such proteins/peptides lay rather reflect rare stochastic events of NA-imputation.
Please note, that rather aggressive filtering may severly reduce the ability to identify on/off situations which very well may occur in most biological settings.
General filtering can be performed using `presenceFilt()` (from package [wrMisc](https://CRAN.R-project.org/package=wrMisc)).
Other filtering of proteins/peptides/lines based on the annotation (eg for hypothetical proteins etc) may done using _filterLiColDeList()_ (also from package [wrMisc](https://CRAN.R-project.org/package=wrMisc)).
Initial information for filtering is already collected by the import-functions (_readMaxQuantFile()_, readProteomeDiscovererFile()_, _readProlineFile()_, _readOpenMSFile()_ etc..).
Then information for filtering can be used by the function _combineMultFilterNAimput()_ which is integrated to _testRobustToNAimputation()_ (see section below) to conveniently include filtering-aspects.
## Statistical Testing {#StatisticalTesting}
The [t-test](https://en.wikipedia.org/wiki/Student%27s_t-test) remains the main statistical test used, as in many other coases of omics, too.
Statistical testing in the context of proteomics data poses challenges similar to transcriptomics :
Many times the number of replicate-samples is fairly low and the inter-measurement variability quite high.
In some unfortunate cases proteins with rather constant quantities may appear as false positives when searching for proteins who's abundance changes between two groups of samples : If the apparent variability is by chance too low, the respective standard-deviations will be low and a plain t-test may give very enthusiastic p-values.
Besides stringent filtering (previous section of this vignette), the use of shrinkage when estimating the intra-group/replicate variance from the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) turns out very helpful,
see also [Ritchie et al 2015](https://doi.org/10.1093/nar/gkv007).
In this package the function _eBayes()_ has been used and adopted to proteomics.
The function `testRobustToNAimputation()` allows running multiple cycles of NA-imputation and statistical testing with the aim of providing stable imputation and testing results.
It performs NA-imputation and statistical testing (after repeated imputation) between all groups of samples the same time (as it would be inefficient to separate these two tasks). The tests underneath apply shrinkage from the empirical Bayes procedure from the bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html). In addition, various formats of multiple test correction can be directly added to the results : Benjamini-Hochberg FDR, local false discovery rate (lfdr, using the package [fdrtool](https://CRAN.R-project.org/package=fdrtool), see [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209)), or modified testing by [ROTS](https://bioconductor.org/packages/release/bioc/html/ROTS.html), etc ...
The fact that a single round of NA-imputation may provoke false positives as well as false negatives, made it necessary to combine this (iterative) process of NA-imputation and subsequent testing in one single function.
```{r testRobustToNAimputation_MQ1, echo=TRUE}
## Impute NA-values repeatedly and run statistical testing after each round of imputations
testMQ <- testRobustToNAimputation(dataMQ, gr=grp9)
## Example of the data after repeated NA-imputation
head(testMQ$datImp[,1:6])
```
## Data Exploration With Graphical Support {#DataExploreGraphics}
### PCA {#PCA}
Brielfy, principal components analysis ([PCA](https://en.wikipedia.org/wiki/Principal_component_analysis)) searches to decompose the data along all the axises defined by all samples. Then, the axis-combinations with the highest degree of correlation are searched.
In principle one could also run PCA along the rows, ie the proteins, but their number is typically so high that the resultant plots get too crowded.
In the context of high throughput experiments, like proteomics, PCA allows to distinguish important information how the different samples are related (ie similar).
This covers of course the real differences between different biological conditions, but also additional bias introduced as (technical) artifacts.
Thus, such plots serve as well for quality control (in particular to identify outlyer-samples, eg due to degraded material) as well as for the biological interpretation.
Normally one could immediately check the normalized data by PCA before running statistical tests.
As stated in other places, PCA can't handle missing values (ie _NA_ ).
Thus, all proteins having one NA in just one sample won't be considered during PCA.
This would mask a significant number of proteins in numerous of proteomics experiments.
Thus, it may be preferable to run PCA after NA-imputation.
However, since in this package statistical testing was coupled to the repeated NA-imputation, it may be better to use the NA-imputations made for the statistical testing (in the section above).
Here we'll use the function `plotPCAw()` form the package [wrGraph](https://CRAN.R-project.org/package=wrGraph)
```{r PCA1MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
# limit to UPS1
plotPCAw(testMQ$datImp, sampleGrp=grp9, tit="PCA on Protein Abundances (MaxQuant,NAs imputed)", rowTyName="proteins", useSymb2=0)
```
Please note, the vignette dedicated to spike-in experiments ("UPS-1 spike-in Experiments") presents a slightly different way of making PCA-plots for this specific type of experiment/data-set.
### MA-plot {#MAplot}
MA-plots are mainly used for diagnostic purposes. Basically, an [MA-plot](https://en.wikipedia.org/wiki/MA_plot) displays the log-Fold-Change versus the average abundance.
We'll use the function `MAplotW()` from the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
```{r MAplot1, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
# By default this plots at the first of all pairwise questions
MAplotW(testMQ)
```
#####
Now for the second group of pair-wise comparisons, plus adding names of proteins passing threshold:
```{r MAplot2, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
res1 <- NULL
MAplotW(testMQ, useComp=2, namesNBest="passFC")
```
### Volcano-Plot {#VolcanoPlot}
A [Volcano-plot](https://en.wikipedia.org/wiki/Volcano_plot_\(statistics\)) allows to compare the simple fold-change (FC) opposed to the outcome of a statistcal test.
Frequently we can obsereve, that a some proteins show very small FC but enthousiastic p-values and subsequently enthousiastic FDR-values.
However, generally such proteins with so small FC don't get considered as reliable results, therefore it is common practice to add an additional FC-threshold,
typically a 1.5 or 2 fold-change.
The number of proteins retained by pair-wise comparison :
```{r VolcanoPlot1MQ, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
## by default the first pairwise comparison is taken
## using the argument 'namesNBest' we can add names from the annotation
VolcanoPlotW(testMQ, useComp=2, namesNBest="passFDR")
```
Additional Note : Volcano-plots may also help identifying bias in the data, in particular, to the question if normalization gave satisfactory results.
Based on the hypothesis of no global change used for normalization, normally, one would expect about the same number of down-regulated as up-regulated proteins.
In fact, this experiment is somehow unusual since one set of samples got a strong increase in abundance for 48 UPS1 proteins while the other proteins remained constant.
Thus, on the global scale there may be a (small) imbalance of abundances and the global median will reflect this, which can create some bias.
So, in this special case it might be better to perform normalization only based on the yeast proteins (which are assumed as constant),
as it has been performed in the vignette 'UPS-1 spike-in Experiments', a vignette which is entirely dedicated to UPS1 spike-in experiments.
## Reporting Results
Tables with results can be either directed created using _VolcanoPlotW()_ or, as shown below, using the function `extractTestingResults()`.
For example, let's look at the first of the pair-wise comparisons (the Volcano-plot above shwed another pait-wise comparison):
The moderated t-test expressed as Benjamini-Hochberg FDR gave `r sum(testMQ$BH[,1] < 0.05,na.rm=TRUE)` proteins with
FDR < 0.05 for the comparison `r colnames(testMQ$BH)[1]`.
Since unfortunately many verly low fold-change instances are amongst the results, one should add an additional filter for too low FC values.
This is common practice in most omics analysis when mostly technical replicates are run and/or the number of replicates is rather low.
```{r results1, echo=TRUE}
res1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
```
After FC-filtering for 2-fold (ie change of protein abundance to double or half) `r nrow(res1)` proteins remain.
```{r results2, echo=TRUE}
knitr::kable(res1[,-1], caption="5%-FDR (BH) Significant results for 1st pairwise set", align="c")
```
Please note that the column-name 'BH' referrs to Benjamini-Hochberg FDR (several other options of multiple testing correction exist, too).
We can see that many UPS1 proteins are, as expected, among the best-ranking differential changes.
However, not all UPS1 proteins do show up in the results as expected, and furthermore, a number of yeast proteins
(however expected to remain constant !) were reported as differential, too.
The function _extractTestingResults()_ also allows to write the data shown above directly to a csv-file.
.
## Further Steps
In case of standard projects one typically would like to find out more about the biological context of the proteins retained at statistical analysis, their function and their interactors.
Such a list of significant proteins from a given project could be tested lateron for enrichment of [GO-functions](https://www.uniprot.org/help/gene_ontology) or for their inter-connectivity in PPI networks like [String](https://string-db.org).
There are multiple tools available on [Bioconductor](https://www.bioconductor.org) and [CRAN](https://cran.r-project.org) as well as outside of R to perform such analysis tasks.
In case of UPS1 spike-in experiments the subsequent analysis is different.
Suggestions for in depth-analysis of UPS1 spike-in are shown in the **vignette 'UPS-1 spike-in Experiments'** of [this package](https://CRAN.R-project.org/package=wrProteo).
.
***
# Protein Annotation
In most 'Omics' activities getting additional annotation may get sometimes a bit tricky.
In Proteomics most mass-spectrometry software will use the informaton provided in the Fasta-file as annotation (typically as provided from UniProt).
But this lacks for example chromosomal location information.
There are are many repositories with genome-, gene- and protein-annotation and most of them are linked, but sometimes the links get broken when
data-base updates are not done everywhere or are not followed by new re-matching.
The Fasta-files used initially for mass-spectrometry peak-identification
may be not completely up to date (sometimes gene- or protein-IDs do change or may even disappear) and thus will contribute to a certain percentage of entries hard to link.
Globally two families of strategies for adding annotation exist :
a) Strategies using online-based ressources for getting the most up-to-date information/annotation (like [biomaRt](https://bioconductor.org/packages/release/bioc/html/biomaRt.html)).
Despite the advantage of most up-to-date information there may be some downsides :
Interogating databases may require more time to run all queries via interet-connections and this strategy is vulnerable to broken links
(eg linked to the delay of updates between different types of databases that may need to get combined).
Furthermore, the results typically may change a bit when the queries get repeated (in particular when this contains hypothetical peptides, pseudogenes etc).
When combining multiple interconnected ressources it may be very tricky to document the precise version of all individual ressources used.
b) Strategies based on using (local copies of) defined versions of databases.
Frequently, these databases can get downloaded/installed locally and thus allow faster queries and guarantee of repeatability and comparability to other tools or studies.
A number of databases are available on [Bioconductor](https://www.bioconductor.org) formatted for R.
Besides, the tables from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) are another option (which will be used here).
Note, that tracking version-numbers may be much easier using this approach based on defined versions of databases.
And finally results are 100% reproducible when the same versions much easier to document are used.
In the context of adding chromosomal annotation to a list of proteins here the following concept is developed :
Annotation-tables from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) are available for a decent number of species and can be downloaded for conventient off-line search.
However, in the context of less common species we realized that the UniProt tables from UCSC had many times low yield in final matching.
For this reason we propose the slightly more complicated route that provided finally a much higher success-rate to find chromosomal locations for a list of UniProt IDs.
First one needs to acces/download from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) the table corresponding to the species of question fields _'clade','genome','assembly'_).
For _'group'_ choose 'Genes and Gene Predictions' and for _'track'_ choose 'Ensembl Genes', as table choose 'ensGene'.
In addition, it is possible to select either the entire genome-annotation or user-specified regions.
In terms of 'output-format' one may choose 'GTF' (slightly more condensed, no headers) or 'all filds from selected table'.
The following strategy for adding genomic location data using this package is presented here :
Locate (& download) organism annotation from UCSC, read into R (_readUCSCtable()_ ) -> from R export (non-redundant) 'enst'-IDs (still using _readUCSCtable()_ ),
get corresponding UniProt-IDs at UniProt site, save as file and import result into R (_readUniProtExport()_ ) -> (in R) combine with initial UCSC table (_readUniProtExport()_ ) .
The function `readUCSCtable()` is able to read such files downloaded from UCSC, compressed .gz files can be read, too (like in the example below).
In the example below we'll just look at chromosome 11 of the human genome - to keep this example small.
```{r readUCSC1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
gtfFi <- file.path(path1, "UCSC_hg38_chr11extr.gtf.gz")
UcscAnnot1 <- readUCSCtable(gtfFi)
# The Ensemble transcript identifyers and their chromosomal locations :
head(UcscAnnot1)
```
However, this annotation does not provide protein IDs. In order to obtain the corresponding protein IDs an additional step is required :
Here we will use the batch search/conversion tool from [UniProt](https://www.uniprot.org/id-mapping).
In order to do so, we can export directly from _readUCSCtable()_ a small text-file which can be fed into the UniProt batch-search tool.
```{r readUCSC2, echo=TRUE}
# Here we'll redo reading the UCSC table, plus immediatley write the file for UniProt conversion
# (in this vignette we write to tempdir() to keep things tidy)
expFi <- file.path(tempdir(),"deUcscForUniProt2.txt")
UcscAnnot1 <- readUCSCtable(gtfFi, exportFileNa=expFi)
```
Now everything is ready to go to [UniProt](https://www.uniprot.org/id-mapping) for retrieving the corresponding UniProt-IDs.
Since we exported Ensemble transcript IDs (ENSTxxx), select converting from 'Ensembl Transcript' to 'UniProtKB'.
Then, when downloading the conversion results, choose tab-separated file format (compression is recommended), this may take several seconds (depending on the size).
It is suggested to rename the downloaded file so one can easily understand its content.
Note, that the function `readUniProtExport()` can also read .gz compressed files.
To continue this vignette we'll use a result which has been downloaded from [UniProt](https://www.uniprot.org/id-mapping) and renamed to 'deUniProt_hg38chr11extr.tab'.
One may also optionally define a specific genomic region of interest using the argument 'targRegion', here the entire chromosome 11 was chosen.
```{r readUniProt1, echo=TRUE}
deUniProtFi <- file.path(path1, "deUniProt_hg38chr11extr.tab")
deUniPr1 <- readUniProtExport(UniP=deUniProtFi, deUcsc=UcscAnnot1, targRegion="chr11:1-135,086,622")
str(deUniPr1)
```
The resulting data.frame (ie the column 'UniProtID') may be used to complement protein annotation after importing mass-spectrometry peak- and protein-identification results.
Obviously, using recent Fasta-files from UniProt for protein-identification will typically give better matching at the end.
You may note that sometimes Ensemble transcript IDs are written as 'enst00000410108' whereas at other places it may be written as 'ENST00000410108.5'.
The function _readUniProtExport()_ switches to a more flexible search mode stripping of version-numbers and reading all as lower-caps, if initial direct matching reveals less than 4 hits.
Finally, it should be added, that of course several other ways of retrieving annotation do exist, too.
For example, as mentioned above, [Bioconductor](https://www.bioconductor.org)) offers serval packages dedicated to gene- and protein-annotation.
# Appendix
## Acknowledgements
The author would like to acknowledge the support by the [IGBMC](https://www.igbmc.fr) (CNRS UMR 7104, Inserm U 1258, UdS), [CNRS](http://www.cnrs.fr/en), [Université de Strasbourg (UdS)](https://www.unistra.fr) and [Inserm](https://www.inserm.fr).
All collegues from the [proteomics platform](https://proteomics.igbmc.fr) at the IGBMC work very commited to provide high quality mass-spectrometry data (including some of those used here).
The author wishes to thank the [CRAN-staff](https://CRAN.R-project.org) for all their help with new entries and their efforts in maintaining this repository of R-packages.
Furthermore, many very fruitful discussions with colleages on national and international level have helped to improve the tools presented here.
Thank you for you interest. This package is constantly evolving, new featues/functions may get added to the next version.
## Session-Info
For completeness :
\small
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/inst/doc/wrProteoVignette1.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
## ----install, echo=TRUE, eval=FALSE-------------------------------------------
# ## This is R code, you can run this to redo all analysis presented here.
# ## If not already installed, you'll have to install wrMisc and wrProteo first.
# install.packages("wrMisc")
# install.packages("wrProteo")
# ## These packages are used for the graphics
# install.packages("wrGraph")
# install.packages("RColorBrewer")
#
# ## Installation of limma from Bioconductor
# if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
# BiocManager::install("limma")
#
# ## You cat also see all vignettes for this package by typing :
# browseVignettes("wrProteo") # ... and the select the html output
## ----setup, echo=TRUE, messages=FALSE, warnings=FALSE-------------------------
## Let's assume this is a fresh R-session
library(knitr)
library(wrMisc)
library(wrGraph)
library(wrProteo)
# Version number for wrProteo :
packageVersion("wrProteo")
## ----metaData1, echo=TRUE-----------------------------------------------------
## Read meta-data from github.com/bigbio/proteomics-metadata-standard/
pxd001819meta <- readSdrf("PXD001819")
## The concentration of the UPS1 spike-in proteins in the samples
if(length(pxd001819meta) >0) {
UPSconc <- sort(unique(as.numeric(wrMisc::trimRedundText(pxd001819meta$characteristics.spiked.compound.)))) # trim to get to 'essential' info
} else {
UPSconc <- c(50, 125, 250, 500, 2500, 5000, 12500, 25000, 50000) # in case access to github failed
}
## ----functions1, echo=TRUE----------------------------------------------------
## A few elements and functions we'll need lateron
methNa <- c("ProteomeDiscoverer","MaxQuant","Proline")
names(methNa) <- c("PD","MQ","PL")
## The accession numbers for the UPS1 proteins
UPS1 <- data.frame(ac=c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165",
"P00709", "P06732", "P12081", "P61626", "Q15843", "P02753", "P16083", "P63279",
"P01008", "P61769", "P55957", "O76070", "P08263", "P01344", "P01127", "P10599",
"P99999", "P06396", "P09211", "P01112", "P01579", "P02787", "O00762", "P51965",
"P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375"),
species=rep("Homo sapiens", 48),
name=NA)
## ----functions2, echo=TRUE----------------------------------------------------
## additional functions
replSpecType <- function(x, annCol="SpecType", replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")), silent=TRUE) {
## rename $annot[,"SpecType"] to more specific names
chCol <- annCol[1] %in% colnames(x$annot)
if(chCol) { chCol <- which(colnames(x$annot)==annCol[1])
chIt <- replBy[,1] %in% unique(x$annot[,chCol]) # check items to replace if present
if(any(chIt)) for(i in which(chIt)) {useLi <- which(x$annot[,chCol] %in% replBy[i,1]); cat("useLi",head(useLi),"\n"); x$annot[useLi,chCol] <- replBy[i,2]}
} else if(!silent) message(" replSpecType: 'annCol' not found in x$annot !")
x }
plotConcHist <- function(mat, ref, refColumn=3:4, matCluNa="cluNo", lev=NULL, ylab=NULL, tit=NULL) {
## plot histogram like counts of UPS1 concentrations
if(is.null(tit)) tit <- "Frequency of UPS1 Concentrations Appearing in Cluster"
gr <- unique(mat[,matCluNa])
ref <- ref[,refColumn]
if(length(lev) <2) lev <- sort(unique(as.numeric(as.matrix(ref))))
if(length(ylab) !=1) ylab <- "Frequency"
tbl <- table(factor( as.numeric(ref[which(rownames(ref) %in% rownames(mat)),]), levels=lev))
graphics::barplot(tbl, las=1, beside=TRUE, main=paste(tit,gr), col=grDevices::gray(0.8), ylab=ylab)
}
plotMultRegrPar <- function(dat, methInd, tit=NULL, useColumn=c("logp","slope","medAbund","startFr"), lineGuide=list(v=c(-12,-10),h=c(0.7,0.75),col="grey"), xlim=NULL,ylim=NULL,subTit=NULL) {
## scatter plot logp (x) vs slope (y) for all UPS proteins, symbol by useColumn[4], color by hist of useColumn[3]
## dat (array) UPS1 data
## useColumn (character) 1st as 'logp', 2nd as 'slope', 3rd as median abundance, 4th as starting best regression from this point
fxNa <- "plotMultRegrPar"
#fxNa <- wrMisc::.composeCallName(callFrom,newNa="plotMultRegrPar")
if(length(dim(dat)) !=3) stop("invalid input, expecting as 'dat' array with 3 dimensions (proteins,Softw,regrPar)")
if(any(length(methInd) >1, methInd > dim(dat)[2], !is.numeric(methInd))) stop("invalid 'methInd'")
chCol <- useColumn %in% dimnames(dat)[[3]]
if(any(!chCol)) stop("argument 'useColumn' does not fit to 3rd dim dimnames of 'dat'")
useCol <- colorAccording2(dat[,methInd,useColumn[3]], gradTy="rainbow", revCol=TRUE, nEndOmit=14)
graphics::plot(dat[,methInd,useColumn[1:2]], main=tit, type="n",xlim=xlim,ylim=ylim) #col=1, bg.col=useCol, pch=20+lmPDsum[,"startFr"],
graphics::points(dat[,methInd,useColumn[1:2]], col=1, bg=useCol, pch=20+dat[,methInd,useColumn[4]],)
graphics::legend("topright",paste("best starting from ",1:5), text.col=1, pch=21:25, col=1, pt.bg="white", cex=0.9, xjust=0.5, yjust=0.5)
if(length(subTit)==1) graphics::mtext(subTit,cex=0.9)
if(is.list(lineGuide) & length(lineGuide) >0) {if(length(lineGuide$v) >0) graphics::abline(v=lineGuide$v,lty=2,col=lineGuide$col)
if(length(lineGuide$h) >0) graphics::abline(h=lineGuide$h,lty=2,col=lineGuide$col)}
hi1 <- graphics::hist(dat[,methInd,useColumn[3]], plot=FALSE)
wrGraph::legendHist(sort(dat[,methInd,useColumn[3]]), colRamp=useCol[order(dat[,methInd,useColumn[3]])][cumsum(hi1$counts)],
cex=0.5, location="bottomleft", legTit="median raw abundance") #
}
## ----readMaxQuant, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
## We need to define the setup of species
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1$ac)
dataMQ <- readMaxQuantFile(path1, file=fiNaMQ, specPref=specPrefMQ, refLi="mainSpe",
sdrf=c("PXD001819","max"), suplAnnotFile=TRUE, plotGraph=FALSE)
## ----readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
## The number of lines and colums
dim(dataMQ$quant)
## A quick summary of some columns of quantitation data
summary(dataMQ$quant[,1:7]) # the first 8 cols
table(dataMQ$annot[,"SpecType"], useNA="always")
## ----readProteomeDiscoverer1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
path1 <- system.file("extdata", package="wrProteo")
fiNaPd <- "pxd001819_PD24_Proteins.txt.gz"
## Next, we define the setup of species
specPrefPD <- list(conta="Bos tauris|Gallus", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPD <- readProteomeDiscovererFile(file=fiNaPd, path=path1, refLi="mainSpe", specPref=specPrefPD,
sdrf=c("PXD001819","max"), plotGraph=FALSE)
## ----readProteomeDiscoverer2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
## The number of lines and colums
dim(dataPD$quant)
## A quick summary of some columns of quantitation data
summary(dataPD$quant[,1:7]) # the first 8 cols
table(dataPD$annot[,"SpecType"], useNA="always")
## ----readProline, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
path1 <- system.file("extdata", package="wrProteo")
fiNaPl <- "pxd001819_PL.xlsx"
specPrefPL <- list(conta="_conta", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPL <- readProlineFile(fiNaPl, path=path1, specPref=specPrefPL, normalizeMeth="median", refLi="mainSpe",
sdrf=c("PXD001819","max"), plotGraph=FALSE)
## ----postTreatmPL, echo=TRUE--------------------------------------------------
head(colnames(dataPL$raw), 7)
dataPL <- cleanListCoNames(dataPL, rem=c("Levure2ug+ UPS1-"), subst=cbind(c("fmol","mol-"), c("000amol","mol_R")), mathOper="/2")
## let's check the result
head(colnames(dataPL$raw),8)
## ----readProlineInfo, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE----
## The number of lines and colums
dim(dataPL$quant)
## A quick summary of some columns of quantitation data
summary(dataPL$quant[,1:8]) # the first 8 cols
table(dataPL$annot[,"SpecType"], useNA="always")
## ----rearrange1, echo=TRUE----------------------------------------------------
## bring all results (MaxQuant,ProteomeDiscoverer, ...) in same ascending order
## as reference will use the order from ProteomeDiscoverer, it's output is already in a convenient order
sampNa <- colnames(dataPD$quant)
## it is more convenient to re-order columns this way in each project
dataPD <- corColumnOrder(dataPD, sampNames=sampNa) # already in good order
dataMQ <- corColumnOrder(dataMQ, replNames=paste0("UPS1_",sub("amol_", "amol_R", colnames(dataMQ$quant))), sampNames=sampNa) # incl canged names
dataPL <- corColumnOrder(dataPL, replNames=paste0("UPS1_",colnames(dataPL$quant)), sampNames=sampNa) # incl canged names
## ----postTreatm1, echo=TRUE---------------------------------------------------
## Need to rename $annot[,"SpecType"]
dataPD <- replSpecType(dataPD, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataMQ <- replSpecType(dataMQ, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataPL <- replSpecType(dataPL, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
## Need to address missing ProteinNames (UPS1) due to missing tags in Fasta
dataPD <- replMissingProtNames(dataPD)
dataMQ <- replMissingProtNames(dataMQ)
dataPL <- replMissingProtNames(dataPL)
table(dataPD$annot[,"SpecType"])
## synchronize order of groups
(grp9 <- dataMQ$sampleSetup$level)
names(grp9) <- rep(paste0(UPSconc,"amol"), each=3)
dataPL$sampleSetup$groups <- dataMQ$sampleSetup$groups <- dataPD$sampleSetup$groups <- grp9 # synchronize order of groups
## ----postTreatmCheck, echo=TRUE-----------------------------------------------
## extract names of quantified UPS1-proteins
NamesUpsPD <- dataPD$annot[which(dataPD$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsMQ <- dataMQ$annot[which(dataMQ$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsPL <- dataPL$annot[which(dataPL$annot[,"SpecType"]=="spike"), "Accession"]
## ----postTreatmTables, echo=TRUE----------------------------------------------
tabS <- mergeVectors(PD=table(dataPD$annot[,"SpecType"]), MQ=table(dataMQ$annot[,"SpecType"]), PL=table(dataPL$annot[,"SpecType"]))
tabT <- mergeVectors(PD=table(dataPD$annot[,"Species"]), MQ=table(dataMQ$annot[,"Species"]), PL=table(dataPL$annot[,"Species"]))
tabS[which(is.na(tabS))] <- 0
tabT[which(is.na(tabT))] <- 0
kable(cbind(tabS[,2:1], tabT), caption="Number of proteins identified, by custom tags, species and software")
## ----metaData2, echo=TRUE-----------------------------------------------------
kable(cbind(dataMQ$sampleSetup$sdrf[,c(23,7,19,22)], groups=dataMQ$sampleSetup$groups))
## ----NA_ProteomeDiscoverer, echo=TRUE-----------------------------------------
## Let's inspect NA values from ProteomeDiscoverer as graphic
matrixNAinspect(dataPD$quant, gr=grp9, tit="ProteomeDiscoverer")
## ----NA_MaxQuant, echo=TRUE---------------------------------------------------
## Let's inspect NA values from MaxQuant as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="MaxQuant")
## ----NA_Proline, echo=TRUE----------------------------------------------------
## Let's inspect NA values from Proline as graphic
matrixNAinspect(dataPL$quant, gr=grp9, tit="Proline")
## ----nNA1, echo=TRUE----------------------------------------------------------
## Let's look at the number of NAs. Is there an accumulated number in lower UPS1 samples ?
tabSumNA <- rbind(PD=sumNAperGroup(dataPD$raw, grp9), MQ=sumNAperGroup(dataMQ$raw, grp9), PL=sumNAperGroup(dataPL$raw, grp9) )
kable(tabSumNA, caption="Number of NAs per group of samples", align="r")
## ----testProteomeDiscoverer, echo=TRUE----------------------------------------
testPD <- testRobustToNAimputation(dataPD, imputMethod="informed") # ProteomeDiscoverer
## ----testMaxQuant, echo=TRUE--------------------------------------------------
testMQ <- testRobustToNAimputation(dataMQ, imputMethod="informed") # MaxQuant , ok
## ----testProline, echo=TRUE---------------------------------------------------
testPL <- testRobustToNAimputation(dataPL, imputMethod="informed") # Proline
## ----testReorganize1, echo=TRUE-----------------------------------------------
dataPD$datImp <- testPD$datImp # recuperate imputeded data to main data-object
dataMQ$datImp <- testMQ$datImp
dataPL$datImp <- testPL$datImp
## ----pairWise2, echo=TRUE-----------------------------------------------------
## The number of differentially abundant proteins passing 5% FDR (ProteomeDiscoverer and MaxQuant)
signCount <- cbind( sig.PD.BH=colSums(testPD$BH < 0.05, na.rm=TRUE), sig.PD.lfdr=if("lfdr" %in% names(testPD)) colSums(testPD$lfdr < 0.05, na.rm=TRUE),
sig.MQ.BH=colSums(testMQ$BH < 0.05, na.rm=TRUE), sig.MQ.lfdr=if("lfdr" %in% names(testMQ)) colSums(testMQ$lfdr < 0.05, na.rm=TRUE),
sig.PL.BH=colSums(testPL$BH < 0.05, na.rm=TRUE), sig.PL.lfdr=if("lfdr" %in% names(testPL)) colSums(testPL$lfdr < 0.05, na.rm=TRUE) )
table1 <- numPairDeColNames(testPD$BH, stripTxt="amol", sortByAbsRatio=TRUE)
table1 <- cbind(table1, signCount[table1[,1],])
rownames(table1) <- colnames(testMQ$BH)[table1[,1]]
kable(table1, caption="All pairwise comparisons and number of significant proteins", align="c")
## ----check2, echo=TRUE--------------------------------------------------------
resMQ1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
resPD1 <- extractTestingResults(testPD, compNo=1, thrsh=0.05, FCthrs=2)
resPL1 <- extractTestingResults(testPL, compNo=1, thrsh=0.05, FCthrs=2)
## ----pairWise3, fig.height=4.5, fig.width=9.5, fig.align="center", echo=TRUE----
par(mar=c(5.5, 4.7, 4, 1))
imageW(table1[,c("sig.PD.BH","sig.MQ.BH","sig.PL.BH" )], col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")),
transp=FALSE, tit="Number of BH.FDR passing proteins by the quantification approaches")
mtext("Dark red for high number signif proteins", cex=0.75)
## ----pairWiseSelect2, echo=TRUE-----------------------------------------------
## Selection in Ramus paper
kable(table1[which(rownames(table1) %in% colnames(testPD$BH)[c(2,21,27)]),], caption="Selected pairwise comparisons (as in Ramus et al)", align="c")
## ----ROC_main1, echo=TRUE-----------------------------------------------------
## calulate AUC for each ROC
layout(1)
rocPD <- lapply(table1[,1], function(x) summarizeForROC(testPD, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocMQ <- lapply(table1[,1], function(x) summarizeForROC(testMQ, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocPL <- lapply(table1[,1], function(x) summarizeForROC(testPL, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
# we still need to add the names for the pair-wise groups:
names(rocPD) <- names(rocMQ) <- names(rocPL) <- rownames(table1)
## ----ROC_main2, echo=TRUE-----------------------------------------------------
AucAll <- cbind(ind=table1[match(names(rocPD), rownames(table1)),"index"], clu=NA,
PD=sapply(rocPD, AucROC), MQ=sapply(rocMQ, AucROC), PL=sapply(rocPL, AucROC) )
## ----ROC_biplot, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
try(biplot(prcomp(AucAll[,names(methNa)]), cex=0.7, main="PCA of AUC from ROC Curves"))
## ----ROC_segm, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE-----
## number of groups for clustering
nGr <- 5
## K-Means clustering
kMAx <- stats::kmeans(standardW(AucAll[,c("PD","MQ","PL")]), nGr)$cluster
table(kMAx)
AucAll[,"clu"] <- kMAx
## ----ROC_segm2, echo=TRUE-----------------------------------------------------
AucAll <- reorgByCluNo(AucAll, cluNo=kMAx, useColumn=c("PD","MQ","PL"))
AucAll <- cbind(AucAll, iniInd=table1[match(rownames(AucAll), rownames(table1)), "index"])
colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)] <- paste("Auc",colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)], sep=".")
AucAll[,"cluNo"] <- rep(nGr:1, table(AucAll[,"cluNo"])) # make cluNo descending
kMAx <- AucAll[,"cluNo"] # update
table(AucAll[,"cluNo"])
## note : column 'index' is relative to table1, iniInd to ordering inside objects from clustering
## ----ROC_profFig, echo=TRUE---------------------------------------------------
try(profileAsClu(AucAll[,c(1:length(methNa),(length(methNa)+2:3))], clu="cluNo", meanD="geoMean", tit="Pairwise Comparisons as Clustered AUC from ROC Curves",
xlab="Comparison number", ylab="AUC", meLty=1, meLwd=3))
## ----ROC_segmTable, echo=TRUE-------------------------------------------------
AucRep <- table(AucAll[,"cluNo"])[rank(unique(AucAll[,"cluNo"]))] # representative for each cluster
AucRep <- round(cumsum(AucRep) -AucRep/2 +0.1)
## select representative for each cluster
kable(round(AucAll[AucRep,c("Auc.PD","Auc.MQ","Auc.PL","cluNo")],3), caption="Selected representative for each cluster ", align="c")
## ----freqOfFCperClu, echo=TRUE------------------------------------------------
ratTab <- sapply(5:1, function(x) { y <- table1[match(rownames(AucAll),rownames(table1)),]
table(factor(signif(y[which(AucAll[,"cluNo"]==x),"log2rat"],1), levels=unique(signif(table1[,"log2rat"],1))) )})
colnames(ratTab) <- paste0("\nclu",5:1,"\nn=",rev(table(kMAx)))
layout(1)
imageW(ratTab, tit="Frequency of rounded log2FC in the 5 clusters", xLab="log2FC (rounded)", col=RColorBrewer::brewer.pal(9,"YlOrRd"),las=1)
mtext("Dark red for enrichment of given pair-wise ratio", cex=0.7)
## ----ROC_grp5tab, echo=TRUE---------------------------------------------------
colPanel <- 2:5
gr <- 5
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for best pairwise-comparisons ", align="c")
## ----ROC_grp5fig, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
## ----VolcanoClu5, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
## This required package 'wrGraph' at version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5") {
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
## ----ROC_grp4tab, echo=TRUE---------------------------------------------------
gr <- 4
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++++' pairwise-comparisons ", align="c")
## ----ROC_grp4fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
## ----VolcanoClu4, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
## ----ROC_grp3tab, echo=TRUE---------------------------------------------------
gr <- 3
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+++' pairwise-comparisons ", align="c")
## ----ROC_grp3fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]],rocMQ[[jR]],rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
## ----VolcanoClu3, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
## ----ROC_grp2tab, echo=TRUE---------------------------------------------------
gr <- 2
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++' pairwise-comparisons ", align="c")
## ----ROC_grp2fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
## ----VolcanoClu2, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
## ----ROC_grp1tab, echo=TRUE---------------------------------------------------
gr <- 1
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+' pairwise-comparisons ", align="c")
## ----ROC_grp1fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
## frequent concentrations :
layout(matrix(1:2, ncol=1), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
## ----VolcanoClu1, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
## ----nNA2, echo=TRUE----------------------------------------------------------
tab1 <- rbind(PD=sumNAperGroup(dataPD$raw[which(dataPD$annot[,"SpecType"]=="UPS1"),], grp9),
MQ=sumNAperGroup(dataMQ$raw[which(dataMQ$annot[,"SpecType"]=="UPS1"),], grp9),
PL= sumNAperGroup(dataPL$raw[which(dataPL$annot[,"SpecType"]=="UPS1"),], grp9) )
kable(tab1, caption="The number of NAs in the UPS1 proteins", align="c")
## ----nNAfig1, fig.height=3.5, fig.width=9.5, fig.align="center", echo=TRUE----
countRawNA <- function(dat, newOrd=UPS1$ac, relative=FALSE) { # count number of NAs per UPS protein and order as UPS
out <- rowSums(is.na(dat$raw[match(newOrd,rownames(dat$raw)),]))
if(relative) out/nrow(dat$raw) else out }
sumNAperMeth <- cbind(PD=countRawNA(dataPD), MQ=countRawNA(dataMQ), PL=countRawNA(dataPL) )
UPS1na <- sub("_UPS","",dataPL$annot[UPS1$ac,"EntryName"])
par(mar=c(6.8, 3.5, 4, 1))
imageW(sumNAperMeth, rowNa=UPS1na, tit="Number of NAs in UPS proteins", xLab="", yLab="",
transp=FALSE, col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")))
mtext("Dark red for high number of NAs",cex=0.7)
## ----PCA2PD, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE------
try(plotPCAw(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on ProteomeDiscoverer, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
## ----PCA2MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE------
try(plotPCAw(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on MaxQuant, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
## ----PCA2PL, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE------
try(plotPCAw(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on Proline, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
## ----intraReplicCV1, fig.height=10, fig.width=12, fig.align="center", echo=TRUE----
## combined plot : all data (left), Ups1 (right)
layout(1:3)
sumNAinPD <- list(length=18)
sumNAinPD[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp, grp9))))
sumNAinPD[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPD)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPD)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPD, halfViolin="pairwise", tit="CV Intra Replicate, ProteomeDiscoverer", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinMQ <- list(length=18)
sumNAinMQ[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp, grp9))))
sumNAinMQ[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinMQ)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9)) # paste(unique(grp9),"all",sep=".")
names(sumNAinMQ)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".") #paste(unique(grp9),"Ups1",sep=".")
try(vioplotW(sumNAinMQ, halfViolin="pairwise", tit="CV intra replicate, MaxQuant",cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinPL <- list(length=18)
sumNAinPL[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp, grp9))))
sumNAinPL[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPL)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPL)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPL, halfViolin="pairwise", tit="CV Intra Replicate, Proline", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
## ----linModel0, echo=TRUE-----------------------------------------------------
## prepare object for storing all results
datUPS1 <- array(NA, dim=c(length(UPS1$ac),length(methNa),7), dimnames=list(UPS1$ac,c("PD","MQ","PL"),
c("sco","nPep","medAbund", "logp","slope","startFr","cluNo")))
## ----linModelPD, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE----
lmPD <- list(length=length(NamesUpsPD))
doPl <- FALSE
lmPD[1:length(NamesUpsPD)] <- lapply(NamesUpsPD[1:length(NamesUpsPD)], linModelSelect, dat=dataPD,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPD) <- NamesUpsPD
## ----linModelPD2, echo=TRUE---------------------------------------------------
## We make a little summary of regression-results (ProteomeDiscoverer)
tmp <- cbind(log10(sapply(lmPD, function(x) x$coef[2,4])), sapply(lmPD, function(x) x$coef[2,1]), sapply(lmPD, function(x) x$startLev))
datUPS1[,1,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPD)), ]
datUPS1[,1,"medAbund"] <- apply(wrMisc::.scale01(dataPD$datImp)[match(UPS1$ac,rownames(dataPD$datImp)),],1,median,na.rm=TRUE)
## ----linModelMQ, echo=TRUE----------------------------------------------------
lmMQ <- list(length=length(NamesUpsMQ))
lmMQ[1:length(NamesUpsMQ)] <- lapply(NamesUpsMQ[1:length(NamesUpsMQ)], linModelSelect, dat=dataMQ,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmMQ) <- NamesUpsMQ
## ----linModelMQ2, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE----
## We make a little summary of regression-results (MaxQuant)
tmp <- cbind(log10(sapply(lmMQ, function(x) x$coef[2,4])), sapply(lmMQ, function(x) x$coef[2,1]), sapply(lmMQ, function(x) x$startLev))
datUPS1[,2,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmMQ)), ]
datUPS1[,2,"medAbund"] <- apply(wrMisc::.scale01(dataMQ$datImp)[match(UPS1$ac,rownames(dataMQ$datImp)),],1,median,na.rm=TRUE)
## ----linModelPL, echo=TRUE----------------------------------------------------
lmPL <- list(length=length(NamesUpsPL))
lmPL[1:length(NamesUpsPL)] <- lapply(NamesUpsPL[1:length(NamesUpsPL)], linModelSelect, dat=dataPL,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPL) <- NamesUpsPL
## ----linModelPLsum, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE----
tmp <- cbind(log10(sapply(lmPL, function(x) x$coef[2,4])), sapply(lmPL, function(x) x$coef[2,1]), sapply(lmPL, function(x) x$startLev))
datUPS1[,3,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPL)), ]
datUPS1[,3,"medAbund"] <- apply(wrMisc::.scale01(dataPL$datImp)[match(UPS1$ac,rownames(dataPL$datImp)),],1,median,na.rm=TRUE)
## ----linModelStartStat, echo=TRUE--------------------------------------------
## at which concentration of UPS1 did the best regression start ?
stTab <- sapply(1:5, function(x) apply(datUPS1[,,"startFr"],2,function(y) sum(x==y)))
colnames(stTab) <- paste("lev",1:5,sep="_")
kable(stTab, caption = "Frequency of starting levels for regression")
## ----linModelPlotAll, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE----
layout(matrix(1:4,ncol=2))
subTi <- "fill according to median abundance (blue=low - green - red=high)"
xyRa <- apply(datUPS1[,,4:5], 3, range, na.rm=T)
plotMultRegrPar(datUPS1, 1, xlim=xyRa[,1], ylim=xyRa[,2],tit="ProteomeDiscoverer UPS1, p-value vs slope",subTit=subTi) # adj wr 9jan23
plotMultRegrPar(datUPS1, 2, xlim=xyRa[,1], ylim=xyRa[,2],tit="MaxQuant UPS1, p-value vs slope",subTit=subTi)
plotMultRegrPar(datUPS1, 3, xlim=xyRa[,1], ylim=xyRa[,2],tit="Proline UPS1, p-value vs slope",subTit=subTi)
## ----combRegrScore1, echo=TRUE------------------------------------------------
for(i in 1:(dim(datUPS1)[2])) datUPS1[,i,"sco"] <- -datUPS1[,i,"logp"] - (datUPS1[,i,"slope"] -1)^2 # cut at > 8
## ----combRegrScore2, echo=TRUE------------------------------------------------
datUPS1[,1,2] <- rowSums(dataPD$count[match(UPS1$ac,dataPD$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
datUPS1[,2,2] <- rowSums(dataMQ$count[match(UPS1$ac,dataMQ$annot[,1]),,1], na.rm=TRUE)
datUPS1[,3,2] <- rowSums(dataPL$count[match(UPS1$ac,dataPL$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
## ----combRegrScore3, fig.height=6, fig.width=9.5, fig.align="center", echo=TRUE----
layout(matrix(1:4, ncol=2))
par(mar=c(5.5, 2.2, 4, 0.4))
col1 <- RColorBrewer::brewer.pal(9,"YlOrRd")
imageW(datUPS1[,,1], col=col1, tit="Linear regression score", xLab="",yLab="",transp=FALSE)
mtext("red for bad score", cex=0.75)
imageW(log(datUPS1[,,2]), tit="Number of peptides", xLab="",yLab="", col=col1, transp=FALSE)
mtext("dark red for high number of peptides", cex=0.75)
## ratio : regression score vs no of peptides
imageW(datUPS1[,,1]/log(datUPS1[,,2]), col=rev(col1), tit="Regression score / Number of peptides", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/peptide ratio)", cex=0.75)
## score vs abundance
imageW(datUPS1[,,1]/datUPS1[,,3], col=rev(col1), tit="Regression score / median Abundance", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/abundance ratio)", cex=0.75)
## ----combScore1, echo=TRUE----------------------------------------------------
## number of groups for clustering
nGr <- 5
chFin <- is.finite(datUPS1[,,"sco"])
if(any(!chFin)) datUPS1[,,"sco"][which(!chFin)] <- -1 # just in case..
## clustering using kMeans
kMx <- stats::kmeans(standardW(datUPS1[,,"sco"], byColumn=FALSE), nGr)$cluster
datUPS1[,,"cluNo"] <- matrix(rep(kMx, dim(datUPS1)[2]), nrow=length(kMx))
geoM <- apply(datUPS1[,,"sco"], 1, function(x) prod(x)^(1/length(x))) # geometric mean across analysis soft
geoM2 <- lrbind(by(cbind(geoM,datUPS1[,,"sco"], clu=kMx), kMx, function(x) x[order(x[,1],decreasing=TRUE),])) # organize by clusters
tmp <- tapply(geoM2[,"geoM"], geoM2[,"clu"], median)
geoM2[,"clu"] <- rep(rank(tmp, ties.method="first"), table(kMx))
geoM2 <- geoM2[order(geoM2[,"clu"],geoM2[,"geoM"],decreasing=TRUE),] # order as decreasing median.per.cluster
geoM2[,"clu"] <- rep(1:max(kMx), table(geoM2[,"clu"])[rank(unique(geoM2[,"clu"]))]) # replace cluster-names to increasing
try(profileAsClu(geoM2[,2:4], geoM2[,"clu"], tit="Clustered Regression Results for UPS1 Proteins", ylab="Linear regression score"))
## ----combScore2, echo=TRUE----------------------------------------------------
datUPS1 <- datUPS1[match(rownames(geoM2), rownames(datUPS1)),,] # bring in new order
datUPS1[,,"cluNo"] <- geoM2[,"clu"] # update cluster-names
### prepare annotation of UPS proteins
annUPS1 <- dataPL$annot[match(rownames(datUPS1), dataPL$annot[,1]), c(1,3)]
annUPS1[,2] <- substr(sub("_UPS","",sub("generic_ups\\|[[:alnum:]]+-{0,1}[[:digit:]]\\|","",annUPS1[,2])),1,42)
## ----combScore3, echo=TRUE----------------------------------------------------
## index of representative for each cluster (median position inside cluster)
UPSrep <- tapply(geoM2[,"geoM"], geoM2[,"clu"], function(x) ceiling(length(x)/2)) + c(0, cumsum(table(geoM2[,"clu"]))[-nGr])
## ----regr5star, echo=TRUE-----------------------------------------------------
gr <- 1
useLi <- which(datUPS1[,1,"cluNo"]==gr)
colNa <- c("Protein",paste(colnames(datUPS1), rep(c("slope","logp"), each=ncol(datUPS1)), sep=" "))
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
## ----regrPlot5star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
## Plotting the best regressions, this required package wrGraph version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
## ----regr4star, echo=TRUE-----------------------------------------------------
gr <- 2
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"2nd best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
## ----regrPlot4star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
## ----regr3star, echo=TRUE-----------------------------------------------------
gr <- 3
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
## ----regrPlot3star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
## ----regrPlot2star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
gr <- 4
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
## ----regrPlot1star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE----
gr <- 5
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 5th cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
## ----sessionInfo, echo=FALSE--------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/inst/doc/wrProteoVignetteUPS1.R
|
---
title: "Analyzing Proteomics UPS1 Spike-in Experiments (Example Ramus 2016 Dataset)"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{UPS1 spike-in Experiments}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This vignette complements the _more basic vignette_ **'Getting started with wrProteo'** also from this package and shows in more detail how [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)
_spike-in_ experiments may be analyzed, using this package ([wrProteo](https://CRAN.R-project.org/package=wrProteo)),
[wrMisc](https://CRAN.R-project.org/package=wrMisc), [wrGraph](https://CRAN.R-project.org/package=wrGraph) and
[RColorBrewer](https://CRAN.R-project.org/package=RColorBrewer).
All these packages are available on CRAN.
Furthermore, the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) will be used internally for it's moderated statistical testing.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r install, echo=TRUE, eval=FALSE}
## This is R code, you can run this to redo all analysis presented here.
## If not already installed, you'll have to install wrMisc and wrProteo first.
install.packages("wrMisc")
install.packages("wrProteo")
## These packages are used for the graphics
install.packages("wrGraph")
install.packages("RColorBrewer")
## Installation of limma from Bioconductor
if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
BiocManager::install("limma")
## You cat also see all vignettes for this package by typing :
browseVignettes("wrProteo") # ... and the select the html output
```
As you will see in the interactive window from _browseVignettes()_, [this package](https://CRAN.R-project.org/package=wrProteo) has 2 vignettes,
a more general introductory vignette (mentioned above) and this UPS-1 dedicated vignette.
Now let's load the packages needed :
```{r setup, echo=TRUE, messages=FALSE, warnings=FALSE}
## Let's assume this is a fresh R-session
library(knitr)
library(wrMisc)
library(wrGraph)
library(wrProteo)
# Version number for wrProteo :
packageVersion("wrProteo")
```
### Experimental Setup For Benchmark Tests
The main aim of the experimental setup using heterologous _spike-in_ experiments is to provide a framework to test identification and quantitation procedures in proteomics.
By mixing known amounts of a collection of human proteins ([UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)) in various concentrations on top of a constant level yeast total protein extract,
one expects to find only the spiked human UPS1 proteins varying between samples.
In terms of ROC curves (see also [ROC on Wikipedia](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)) the _spike-in_ proteins are expected to show up as true positives (TP).
In contrast, all yeast proteins were added in the same quantity to same samples and should thus be observed as constant, ie as true negatives (TN) when looking for proteins changing abundance.
The specific dataset used here (seen also next section) is not that recent, thus,
for addressing scientific questions concerning comparison and choice of quantification software it may be better to use similar buut more recent datasets
The main aim of this vignette is to show the possibilities _how_ such comparisons can be performed using [wrProteo](https://CRAN.R-project.org/package=wrProteo).
### The Ramus Data-Set
The data used in this vignette was published with the article : [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011)
"Benchmarking quantitative label-free LC-MS data processing workflows using a complex spiked proteomic standard dataset" in J Proteomics 2016 Jan 30;132:51-62.
This dataset is available on PRIDE as [PXD001819](https://www.ebi.ac.uk/pride/archive/projects/PXD001819) (and on ProteomeXchange).
Briefly, this experiment aims to evaluate and compare various quantification appoaches of the heterologous _spike-in_ [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)
(available from Sigma-Aldrich) in yeast protein extracts as constant matrix. 9 different concentrations of the heterologous _spike-in_ (UPS1) were run in triplicates.
The proteins were initially digested by Trypsin and then analyzed by LC-MS/MS in DDA mode.
As described in more detail in the reference, this dataset was generated using a LTQ-Orbitrap, in the meantime more powerful and precises mass-spectrometers have become avialable.
Thus, scientific questions about the comparison and choice of quantification software may be better addressed using more recent datasets.
### Meta-Data Describing The Experiment (sdrf)
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data (sdrf-format).
The meta-data for experiments already integrated can be directly read/accessed from [wrProteo](https://CRAN.R-project.org/package=wrProteo).
Either you download the meta-data as file 'sdrf.tsv' from [Pride/PXD001819](https://www.ebi.ac.uk/pride/archive/projects/PXD001819), or you may read file 'PXD001819.sdrf.tsv' directly from [github/bigbio](https://github.com/bigbio/proteomics-sample-metadata/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv).
```{r metaData1, echo=TRUE}
## Read meta-data from github.com/bigbio/proteomics-metadata-standard/
pxd001819meta <- readSdrf("PXD001819")
## The concentration of the UPS1 spike-in proteins in the samples
if(length(pxd001819meta) >0) {
UPSconc <- sort(unique(as.numeric(wrMisc::trimRedundText(pxd001819meta$characteristics.spiked.compound.)))) # trim to get to 'essential' info
} else {
UPSconc <- c(50, 125, 250, 500, 2500, 5000, 12500, 25000, 50000) # in case access to github failed
}
```
The import function used furtheron in this vignette can directly download this metadata if the PXD-accession-number is provided.
### Key Elements And Additional Functions
\small
```{r functions1, echo=TRUE}
## A few elements and functions we'll need lateron
methNa <- c("ProteomeDiscoverer","MaxQuant","Proline")
names(methNa) <- c("PD","MQ","PL")
## The accession numbers for the UPS1 proteins
UPS1 <- data.frame(ac=c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165",
"P00709", "P06732", "P12081", "P61626", "Q15843", "P02753", "P16083", "P63279",
"P01008", "P61769", "P55957", "O76070", "P08263", "P01344", "P01127", "P10599",
"P99999", "P06396", "P09211", "P01112", "P01579", "P02787", "O00762", "P51965",
"P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375"),
species=rep("Homo sapiens", 48),
name=NA)
```
\small
```{r functions2, echo=TRUE}
## additional functions
replSpecType <- function(x, annCol="SpecType", replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")), silent=TRUE) {
## rename $annot[,"SpecType"] to more specific names
chCol <- annCol[1] %in% colnames(x$annot)
if(chCol) { chCol <- which(colnames(x$annot)==annCol[1])
chIt <- replBy[,1] %in% unique(x$annot[,chCol]) # check items to replace if present
if(any(chIt)) for(i in which(chIt)) {useLi <- which(x$annot[,chCol] %in% replBy[i,1]); cat("useLi",head(useLi),"\n"); x$annot[useLi,chCol] <- replBy[i,2]}
} else if(!silent) message(" replSpecType: 'annCol' not found in x$annot !")
x }
plotConcHist <- function(mat, ref, refColumn=3:4, matCluNa="cluNo", lev=NULL, ylab=NULL, tit=NULL) {
## plot histogram like counts of UPS1 concentrations
if(is.null(tit)) tit <- "Frequency of UPS1 Concentrations Appearing in Cluster"
gr <- unique(mat[,matCluNa])
ref <- ref[,refColumn]
if(length(lev) <2) lev <- sort(unique(as.numeric(as.matrix(ref))))
if(length(ylab) !=1) ylab <- "Frequency"
tbl <- table(factor( as.numeric(ref[which(rownames(ref) %in% rownames(mat)),]), levels=lev))
graphics::barplot(tbl, las=1, beside=TRUE, main=paste(tit,gr), col=grDevices::gray(0.8), ylab=ylab)
}
plotMultRegrPar <- function(dat, methInd, tit=NULL, useColumn=c("logp","slope","medAbund","startFr"), lineGuide=list(v=c(-12,-10),h=c(0.7,0.75),col="grey"), xlim=NULL,ylim=NULL,subTit=NULL) {
## scatter plot logp (x) vs slope (y) for all UPS proteins, symbol by useColumn[4], color by hist of useColumn[3]
## dat (array) UPS1 data
## useColumn (character) 1st as 'logp', 2nd as 'slope', 3rd as median abundance, 4th as starting best regression from this point
fxNa <- "plotMultRegrPar"
#fxNa <- wrMisc::.composeCallName(callFrom,newNa="plotMultRegrPar")
if(length(dim(dat)) !=3) stop("invalid input, expecting as 'dat' array with 3 dimensions (proteins,Softw,regrPar)")
if(any(length(methInd) >1, methInd > dim(dat)[2], !is.numeric(methInd))) stop("invalid 'methInd'")
chCol <- useColumn %in% dimnames(dat)[[3]]
if(any(!chCol)) stop("argument 'useColumn' does not fit to 3rd dim dimnames of 'dat'")
useCol <- colorAccording2(dat[,methInd,useColumn[3]], gradTy="rainbow", revCol=TRUE, nEndOmit=14)
graphics::plot(dat[,methInd,useColumn[1:2]], main=tit, type="n",xlim=xlim,ylim=ylim) #col=1, bg.col=useCol, pch=20+lmPDsum[,"startFr"],
graphics::points(dat[,methInd,useColumn[1:2]], col=1, bg=useCol, pch=20+dat[,methInd,useColumn[4]],)
graphics::legend("topright",paste("best starting from ",1:5), text.col=1, pch=21:25, col=1, pt.bg="white", cex=0.9, xjust=0.5, yjust=0.5)
if(length(subTit)==1) graphics::mtext(subTit,cex=0.9)
if(is.list(lineGuide) & length(lineGuide) >0) {if(length(lineGuide$v) >0) graphics::abline(v=lineGuide$v,lty=2,col=lineGuide$col)
if(length(lineGuide$h) >0) graphics::abline(h=lineGuide$h,lty=2,col=lineGuide$col)}
hi1 <- graphics::hist(dat[,methInd,useColumn[3]], plot=FALSE)
wrGraph::legendHist(sort(dat[,methInd,useColumn[3]]), colRamp=useCol[order(dat[,methInd,useColumn[3]])][cumsum(hi1$counts)],
cex=0.5, location="bottomleft", legTit="median raw abundance") #
}
```
\normalsize
## Protein Identification and Initial Quantification
Multiple algorithms and software implementations have been developed for quantitation label-free proteomics experiments,
in particular for extracted ion chromatograms (XIC). For background information you may look at
[Wikipedia labell-free Proteomics](https://en.wikipedia.org/wiki/Label-free_quantification).
Here, the use of the output for 3 such implementations for extracting peptide/protein quantifications is shown.
These 3 software implementations were run individually using equivalent settings, ie identifcation based on the same fasta-database,
starting at a single peptide with 1% FDR, MS mass tolerance for ion precursors at 0.7 ppm,
oxidation of methionins and N-terminal acetylation as fixed as well as carbamidomethylation of cysteins as variable modifications.
Since in this context it is crucial to recognize all UPS1 proteins as such, the import-functions make use of the _specPref_ argument, allowing to define custom tags.
Most additional arguments to the various import-functions have been kept common for conventient use and for generating output structured the same way.
Indeed, simply separating proteins by their species origin is not sufficient since common contaminants
like human keratin might get considered by error as UPS1.
### MaxQuant {#ReadMaxQuant}
[MaxQuant](https://www.maxquant.org) is free software provided by the [Max-Planck-Institute](https://www.biochem.mpg.de/en),
see also [Tyanova et al 2016](https://doi.org/10.1038/nprot.2016.136).
Later in this document data from MaxQuant will by frequently abbreviated as **MQ**.
Typically [MaxQuant](https://www.maxquant.org) exports quantitation data on level of consensus-proteins by default to a folder called _txt_ with a file called *"proteinGroups.txt"* .
So in a standard case (when the file name has not been changed manually) it is sufficient to provide the path to this file.
Of course, you can explicitely point to a specific file, as shown below.
With the data presented here MaxQuant version 1.6.10 was run.
Files compressed as .gz can be read, too (like in the example below).
```{r readMaxQuant, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
## We need to define the setup of species
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1$ac)
dataMQ <- readMaxQuantFile(path1, file=fiNaMQ, specPref=specPrefMQ, refLi="mainSpe",
sdrf=c("PXD001819","max"), suplAnnotFile=TRUE, plotGraph=FALSE)
```
The data were imported, log2-transformed and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
The species anotation was extracted out of the fasta-headers, as given in the _specPref_ argument (MaxQuant specific setting).
As explained in more detail in the general vignette [wrProteoVignette1](https://CRAN.R-project.org/package=wrProteo),
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
Please note, that in the example above we directly added information about the experimental setup from the _sdrf_ repository.
```{r readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataMQ$quant)
## A quick summary of some columns of quantitation data
summary(dataMQ$quant[,1:7]) # the first 8 cols
table(dataMQ$annot[,"SpecType"], useNA="always")
```
Now we can summarize the presence of UPS1 proteins after treatment by MaxQuant :
In sum, `r sum(UPS1$ac %in% dataMQ$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataMQ$annot[,1])` are missing.
### ProteomeDiscoverer {#ReadProteomeDiscoverer}
[ProteomeDiscoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) is commercial software from ThermoFisher (www.thermofisher.com).
Later in this document data from ProteomeDiscoverer will by frequently abbreviated as **PD**.
With the data used here, the identification was performed using the XCalibur module of ProteomeDiscoverer version 2.4 .
Quantitation data at the level of consensus-proteins can be exported to tabulated text files, which can be treated by the function shown below.
The resultant data were export in tablulated format and the file automatically named '\_Proteins.txt_' by ProteomeDiscoverer (the option R-headers was checked, however this option is not mandatory).
Files compressed as .gz can be read, too (like in the example below).
```{r readProteomeDiscoverer1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaPd <- "pxd001819_PD24_Proteins.txt.gz"
## Next, we define the setup of species
specPrefPD <- list(conta="Bos tauris|Gallus", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPD <- readProteomeDiscovererFile(file=fiNaPd, path=path1, refLi="mainSpe", specPref=specPrefPD,
sdrf=c("PXD001819","max"), plotGraph=FALSE)
```
The data were imported, log2-transformed and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
Please note, that quantitation data exported from ProteomeDiscoverer frequently have very generic column-names (increasing numbers).
When calling the import-function they can be replaced by more meaningful names either using the argument _sampNa_,
or from reading the default annotation in the file _'InputFiles.txt'_ or, finally, from the sdrf-annotation.
In the example below both the default annotation as file _'InputFiles.txt'_ and _sdrf_ annotation are available and were integrated to object produced by the import-function.
The species anotation was extracted out as given in the _specPref_ argument.
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
```{r readProteomeDiscoverer2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataPD$quant)
## A quick summary of some columns of quantitation data
summary(dataPD$quant[,1:7]) # the first 8 cols
table(dataPD$annot[,"SpecType"], useNA="always")
```
Confirming the presence of UPS1 proteins by ProteomeDiscoverer:
Now we can summarize the presence of UPS1 proteins after treatment by ProteomeDiscoverer :
In sum, `r sum(UPS1$ac %in% dataPD$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataPD$annot[,1])` are missing.
### Proline
[Proline](http://www.profiproteomics.fr/proline/) is open-source software provided by the [Profi-consortium](https://www.profiproteomics.fr)
(see also [proline-core on github](https://github.com/profiproteomics/proline-core)), published by [Bouyssie et al 2020](https://doi.org/10.1093/bioinformatics/btaa118).
Later in this document data from Proline will by frequently abbreviated as **PL**.
Protein identification in Proline gets performed by [SearchGUI](http://compomics.github.io/projects/searchgui), see also [Vaudel et al 2015](https://doi.org/10.1002/pmic.201000595).
In this case [X!Tandem](https://www.thegpm.org/TANDEM/) (see also [Duncan et al 2005](https://doi.org/10.1021/pr050058i)) was used as search engine.
Quantitation data at the level of consensus-proteins can be exported from [Proline](http://www.profiproteomics.fr/proline/)
as _.xlsx_ or tabulated text files, both formats can be treated by the import-functions shown below.
Here, Proline version 1.6.1 was used with addition of Percolator (via MS-Angel from the same authors).
```{r readProline, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaPl <- "pxd001819_PL.xlsx"
specPrefPL <- list(conta="_conta", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPL <- readProlineFile(fiNaPl, path=path1, specPref=specPrefPL, normalizeMeth="median", refLi="mainSpe",
sdrf=c("PXD001819","max"), plotGraph=FALSE)
```
The (log2-transformed) data were imported and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
The species anotation was extracted out of protein annotation columns, as specified with the _specPref_ argument.
As explained in more detail in the general vignette [wrProteoVignette1](https://CRAN.R-project.org/package=wrProteo),
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
Please note, that in the example above we directly added information about the experimental setup from the _sdrf_ repository.
In addition, we need to correct the quantification column-heads (like 'Levure2ug+ UPS1-100amol') and bring them to a simpler version :
Here, both the default annotation from the _xlsx_ and sdrf annotation are available and were integrated to object produced by the import-function.
```{r postTreatmPL, echo=TRUE}
head(colnames(dataPL$raw), 7)
dataPL <- cleanListCoNames(dataPL, rem=c("Levure2ug+ UPS1-"), subst=cbind(c("fmol","mol-"), c("000amol","mol_R")), mathOper="/2")
## let's check the result
head(colnames(dataPL$raw),8)
```
```{r readProlineInfo, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataPL$quant)
## A quick summary of some columns of quantitation data
summary(dataPL$quant[,1:8]) # the first 8 cols
table(dataPL$annot[,"SpecType"], useNA="always")
```
Now we can summarize the presence of UPS1 proteins after treatment by Proline :
In sum, `r sum(UPS1$ac %in% dataPL$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataPL$annot[,1])` are missing.
### Uniform Re-Arranging Of Data
For easy and proper comparisons we need to make sure all columns are in the same order, however using different software, this is not immediately the case.
The basic names of the groups have already been figured out using the sample meta-data, notably the sdfr.
```{r rearrange1, echo=TRUE}
## bring all results (MaxQuant,ProteomeDiscoverer, ...) in same ascending order
## as reference will use the order from ProteomeDiscoverer, it's output is already in a convenient order
sampNa <- colnames(dataPD$quant)
## it is more convenient to re-order columns this way in each project
dataPD <- corColumnOrder(dataPD, sampNames=sampNa) # already in good order
dataMQ <- corColumnOrder(dataMQ, replNames=paste0("UPS1_",sub("amol_", "amol_R", colnames(dataMQ$quant))), sampNames=sampNa) # incl canged names
dataPL <- corColumnOrder(dataPL, replNames=paste0("UPS1_",colnames(dataPL$quant)), sampNames=sampNa) # incl canged names
```
At import we made use of the argument _specPref_ (specifying '_mainSpecies_', '_conta_' and '_spike_') which allows to build categories based on searching keywords based on the initial annotation.
In turn, we obtain the labels :
'_main Spe_' for yeast (ie matrix), '_species2_' for the UPS1 (ie spike) 'conta' for contaminants.
Let's replace the first two generic terms by more specific ones (ie '_Yeast_' and '_UPS1_') :
```{r postTreatm1, echo=TRUE}
## Need to rename $annot[,"SpecType"]
dataPD <- replSpecType(dataPD, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataMQ <- replSpecType(dataMQ, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataPL <- replSpecType(dataPL, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
## Need to address missing ProteinNames (UPS1) due to missing tags in Fasta
dataPD <- replMissingProtNames(dataPD)
dataMQ <- replMissingProtNames(dataMQ)
dataPL <- replMissingProtNames(dataPL)
table(dataPD$annot[,"SpecType"])
## synchronize order of groups
(grp9 <- dataMQ$sampleSetup$level)
names(grp9) <- rep(paste0(UPSconc,"amol"), each=3)
dataPL$sampleSetup$groups <- dataMQ$sampleSetup$groups <- dataPD$sampleSetup$groups <- grp9 # synchronize order of groups
```
```{r postTreatmCheck, echo=TRUE}
## extract names of quantified UPS1-proteins
NamesUpsPD <- dataPD$annot[which(dataPD$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsMQ <- dataMQ$annot[which(dataMQ$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsPL <- dataPL$annot[which(dataPL$annot[,"SpecType"]=="spike"), "Accession"]
```
```{r postTreatmTables, echo=TRUE}
tabS <- mergeVectors(PD=table(dataPD$annot[,"SpecType"]), MQ=table(dataMQ$annot[,"SpecType"]), PL=table(dataPL$annot[,"SpecType"]))
tabT <- mergeVectors(PD=table(dataPD$annot[,"Species"]), MQ=table(dataMQ$annot[,"Species"]), PL=table(dataPL$annot[,"Species"]))
tabS[which(is.na(tabS))] <- 0
tabT[which(is.na(tabT))] <- 0
kable(cbind(tabS[,2:1], tabT), caption="Number of proteins identified, by custom tags, species and software")
```
The initial fasta file also contained the yeast strain number, this has been stripped off when using default parameters.
------
## Basic Data Treatment
### Structure of Experiment
The global structure of experiments can be provided as sdrf-file and/or from meta-data stored with the experimental data read.
For convenience, this information about the groups of replicates was already deduced and can be found (for example) in _dataMQ$sampleSetup$sdrf_.
```{r metaData2, echo=TRUE}
kable(cbind(dataMQ$sampleSetup$sdrf[,c(23,7,19,22)], groups=dataMQ$sampleSetup$groups))
```
### Normalization
No additional normalization is needed, all data were already median normalized to the host proteins (ie _Saccharomyces cerevisiae_) after importing the
initial quantification-output using '_readMaxQuantFile()_', '_readProlineFile()_' and '_readProteomeDiscovererFile()_'.
### Presence of NA-values
As mentioned in the general vignette of [this package](https://CRAN.R-project.org/package=wrProteo), 'wrProteoVignette1',
it is important to investigate the nature of NA-values.
In particular, checking the hypothesis that NA-values originate from very low abundance instances is very important for deciding how to treat NA-values furtheron.
```{r NA_ProteomeDiscoverer, echo=TRUE}
## Let's inspect NA values from ProteomeDiscoverer as graphic
matrixNAinspect(dataPD$quant, gr=grp9, tit="ProteomeDiscoverer")
```
```{r NA_MaxQuant, echo=TRUE}
## Let's inspect NA values from MaxQuant as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="MaxQuant")
```
```{r NA_Proline, echo=TRUE}
## Let's inspect NA values from Proline as graphic
matrixNAinspect(dataPL$quant, gr=grp9, tit="Proline")
```
A key element to understand the nature of NA-value is to investigate their NA-neighbours.
If a given protein has for just one of the 3 replicates an NA, the other two valid quantifications can be considered as NA-neighbours.
In the figures above all NA-neighbours are shown in the histogram and their mode is marked by an arrow.
One can see, that NA-neighbours are predominantely (but not exclusively) part of the lower quantitation values.
This supports the hypothesis that NAs occur most frequently with low abundance proteins.
### NA-Imputation and Statistical Testing for Changes in Abundance
NA-values represent a challange for statistical testing. In addition, techniques like PCA don't allow NAs, neither.
The number of NAs varies between samples : Indeed, very low concentrations of UPS1 are difficult to get detected and contribute largely to the NAs (as we will see later in more detail).
Since the amout of yeast proteins (ie the matrix in this setup) stays constant across all samples, yeast proteins should always get detected the same way.
```{r nNA1, echo=TRUE}
## Let's look at the number of NAs. Is there an accumulated number in lower UPS1 samples ?
tabSumNA <- rbind(PD=sumNAperGroup(dataPD$raw, grp9), MQ=sumNAperGroup(dataMQ$raw, grp9), PL=sumNAperGroup(dataPL$raw, grp9) )
kable(tabSumNA, caption="Number of NAs per group of samples", align="r")
```
In the section above we investigated the circumstances of NA-instances and provided evidence that NA-values typically represent proteins with low abundance which frequently ended up as non-detectable (NA).
Thus, we hypothesize that (in most cases) NA-values might also have been detected in quantities like their NA-neighbours.
In consequence, we will model a normal distribution based on the NA-neighbours and use for substituting.
The function `testRobustToNAimputation()` from this package (wrProteo) allows to perform NA-imputation and subsequent statistical testing (after repeated imputation) between all groups of samples (see also the general vignette).
One of the advantages of this implementation, is that multiple rounds of imputation are run, so that final results (including pair-wise testing) get stabilized to (rare) stochastic effects. For this reason one may also speak of stabilized NA-imputations.
The statistical tests used underneith make use of the shrinkage-procedure provided from the empirical Bayes procedure as implemented to the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), see also [Ritchie et al 2015](https://doi.org/10.1093/nar/gkv007).
In addition, various formats of multiple testing correction can be added to the results : Benjamini-Hochberg FDR (lateron referred to as BH or BH-FDR, see [FDR on Wikipedia](https://en.wikipedia.org/wiki/False_discovery_rate), see also [Benjamini and Hochberg 1995](https://mathscinet.ams.org/mathscinet-getitem?mr=1325392)), local false discovery rate (lfdr, using the package [fdrtool](https://CRAN.R-project.org/package=fdrtool), see [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209)), or modified testing by [ROTS](https://bioconductor.org/packages/release/bioc/html/ROTS.html), etc ... In this vignette we will make use of the BH-FDR.
We are ready to launch the NA-imputation and testing for data from ProteomeDiscoverer.
Please note, that the procedure including repetive NA-imputations may take a few seconds.
```{r testProteomeDiscoverer, echo=TRUE}
testPD <- testRobustToNAimputation(dataPD, imputMethod="informed") # ProteomeDiscoverer
```
Then for MaxQuant ...
```{r testMaxQuant, echo=TRUE}
testMQ <- testRobustToNAimputation(dataMQ, imputMethod="informed") # MaxQuant , ok
```
And finally for Proline :
```{r testProline, echo=TRUE}
testPL <- testRobustToNAimputation(dataPL, imputMethod="informed") # Proline
```
From these results we'll use i) the NA-imputed version of our datasets for plotting principal components (PCA) and ii) the (stabilized) testing results for counting TP, FP, etc and to construct ROC curves.
Let's add the NA-imputed data to our main object :
```{r testReorganize1, echo=TRUE}
dataPD$datImp <- testPD$datImp # recuperate imputeded data to main data-object
dataMQ$datImp <- testMQ$datImp
dataPL$datImp <- testPL$datImp
```
------
## Analysis Using All Proteins Identified (Matrix + UPS1)
In this section we'll consider all proteins identified and quantified in a pair-wise fashion, using the t-tests already run in the previous section.
As mentioned, the experimental setup is very special, since all proteins that are truly changing are known in advance (the UPS1 _spike-in_ proteins).
Tables get constructed by counting based on various thresholds for considering given protein abundances as differential or not.
A traditional 5 percent FDR cut-off is used for Volcano-plots, while ROC-curves allow inspecting the entire range of potential cut-off values.
### Pairwise Testing Summary
A very universal and simple way to analyze data is by checking as several pairwise comparisons, in particular, if the experimental setup does not include complete multifactorial plans.
This [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) _spike-in_ experiment has `r ncol(dataPD$quant)` samples organized (according to meta-information) as `r length(UPSconc)` groups.
Thus, one obtains in total `r ncol(testPD$BH)` pair-wise comparisons which will make comparisons very crowded.
The publication by [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011) focussed on 3 pairwise comparisons only.
In this vignette it is shown how all of them can get considered.
Now, we'll construct a table showing all possible pairwise-comparisons. Using the function *numPairDeColNames()* we can easily extract the UPS1 concentrations as numeric content and show the (log-)ratio of the pairwise comparisons (column 'log2rat'), the final concentrations (columns 'conc1' and 'conc2', in amol) and the number of differentially abundant proteins passing 5% FDR (using classical Benjamini-Hochberg FDR (columns 'sig.xx.BH') or lfdr
([Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209), columns 'sig._xx_.lfdr' ).
```{r pairWise2, echo=TRUE}
## The number of differentially abundant proteins passing 5% FDR (ProteomeDiscoverer and MaxQuant)
signCount <- cbind( sig.PD.BH=colSums(testPD$BH < 0.05, na.rm=TRUE), sig.PD.lfdr=if("lfdr" %in% names(testPD)) colSums(testPD$lfdr < 0.05, na.rm=TRUE),
sig.MQ.BH=colSums(testMQ$BH < 0.05, na.rm=TRUE), sig.MQ.lfdr=if("lfdr" %in% names(testMQ)) colSums(testMQ$lfdr < 0.05, na.rm=TRUE),
sig.PL.BH=colSums(testPL$BH < 0.05, na.rm=TRUE), sig.PL.lfdr=if("lfdr" %in% names(testPL)) colSums(testPL$lfdr < 0.05, na.rm=TRUE) )
table1 <- numPairDeColNames(testPD$BH, stripTxt="amol", sortByAbsRatio=TRUE)
table1 <- cbind(table1, signCount[table1[,1],])
rownames(table1) <- colnames(testMQ$BH)[table1[,1]]
kable(table1, caption="All pairwise comparisons and number of significant proteins", align="c")
```
```{r check2, echo=TRUE}
resMQ1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
resPD1 <- extractTestingResults(testPD, compNo=1, thrsh=0.05, FCthrs=2)
resPL1 <- extractTestingResults(testPL, compNo=1, thrsh=0.05, FCthrs=2)
```
You can see that in numerous cases much more than the `r length(UPS1$ac)` UPS1 proteins showed up significant,
ie yeast proteins supposed to remain constant also showed up in part as 'sigificantly changing'.
However, some proteins with enthousiastic FDR values have very low log-FC amplitude and will be removed by filtering in the following steps.
```{r pairWise3, fig.height=4.5, fig.width=9.5, fig.align="center", echo=TRUE}
par(mar=c(5.5, 4.7, 4, 1))
imageW(table1[,c("sig.PD.BH","sig.MQ.BH","sig.PL.BH" )], col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")),
transp=FALSE, tit="Number of BH.FDR passing proteins by the quantification approaches")
mtext("Dark red for high number signif proteins", cex=0.75)
```
In the original [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011) et al paper only 3 pairwise comparisons were further analyzed :
```{r pairWiseSelect2, echo=TRUE}
## Selection in Ramus paper
kable(table1[which(rownames(table1) %in% colnames(testPD$BH)[c(2,21,27)]),], caption="Selected pairwise comparisons (as in Ramus et al)", align="c")
```
Here we'll consider all possible pairwise comparisons, as shown below.
### Volcano Plots
[Volcano-plots](https://en.wikipedia.org/wiki/Volcano_plot_(statistics)) offer additional insight in how statistical test results relate to log-fold-change of pair-wise comparisons.
In addition, we can mark the different protein-groups (or species) by different symbols, see also the general vignette 'wrProteoVignette1' (from this package) and the vignette to the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
Counting the number of proteins passing a classical threshold for differential expression combined with a filter for minimum log-fold-change is a good way to start.
As mentioned, the dataset from [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011)
contains `r length(UPSconc)` different levels of [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) concentrations,
in consequence `r ncol(testPD$BH)` pair-wise comparisons are possible.
Again, plotting all possible Volcano plots would make way too crowded plots, instead we'll try to summarize (see ROC curves), cluster into groups and finally plot only a few representative ones.
### ROC for Multiple Pairs
_Receiver Operator Curves_ ([ROC](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)) curves
display _sensitivity_ (True Positive Rate) versus _1-Specificity_ (False Positive Rate).
They are typically used as illustrate and compare the discriminiative capacity of a yes/no decision system (here: differential abundance or not),
see eg also the original publication [Hand and Till 2001](https://doi.org/doi:10.1023/A:1010920819831).
The data get constructed by sliding through a panel of threshold-values for the statistical tests instead of just using 0.05.
Due to the experimental setup we know that all yeast proteins should stay constant and only [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) are expected to change.
For each of these threshold values one counts the number of true positives (TP), false positives (FP) etc, allowing then to calculate _sensitivity_ and _specificity_.
In the case of bechmarking quantitation efforts, ROC curves are used to judge how well heterologous spikes [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) can be recognized as differentially abundant while constant yeast matrix proteins should not get classified as differential.
Finally, ROC curves let us also gain some additional insights in the question which cutoff may be optimal or if the commonly used 5-percent FDR threshld cutoff allows getting the best out of the testing system.
The next step consists in calculating the area under the curve (AUC) for the individual profiles of each pairwise comparison.
Below, these calculations of _summarizeForROC()_ are run in batch.
```{r ROC_main1, echo=TRUE}
## calulate AUC for each ROC
layout(1)
rocPD <- lapply(table1[,1], function(x) summarizeForROC(testPD, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocMQ <- lapply(table1[,1], function(x) summarizeForROC(testMQ, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocPL <- lapply(table1[,1], function(x) summarizeForROC(testPL, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
# we still need to add the names for the pair-wise groups:
names(rocPD) <- names(rocMQ) <- names(rocPL) <- rownames(table1)
```
```{r ROC_main2, echo=TRUE}
AucAll <- cbind(ind=table1[match(names(rocPD), rownames(table1)),"index"], clu=NA,
PD=sapply(rocPD, AucROC), MQ=sapply(rocMQ, AucROC), PL=sapply(rocPL, AucROC) )
```
To provide a quick overview, the clustered AUC values are displayed as PCA :
```{r ROC_biplot, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
try(biplot(prcomp(AucAll[,names(methNa)]), cex=0.7, main="PCA of AUC from ROC Curves"))
```
On this PCA one can see the three software types in red.
We can see that AUC values from MaxQuant correlate somehow less to Proline and ProteomeDiscoverer (red arrows).
The pair-wise ratios constructed from the different rations are shown in black.
They form a compact area with mostly wide ratios (one rather high and one low concentration of UPS1 proteins).
Besides, there is a number of disperse points, typically containig the point of 125 and/or 250 fmol.
These disperse points do not replicate well and follow their own characteristics captured by PC2.
Now we are ready to inspect the 5 clusters in detail :
### Grouping of ROC Curves to Display Representative Ones
As mentioned, there are too many pair-wise combinations available for plotting and inspecting all ROC-curves.
So we can try to group similar pairwise comparison AUC values into clusters and then easily display representative examples for each cluster/group.
Again, we (pre)define that we want to obtain 5 groups (like customer-ratings from 5 to 1 stars), a k-Means clustering approach was chosen.
```{r ROC_segm, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## number of groups for clustering
nGr <- 5
## K-Means clustering
kMAx <- stats::kmeans(standardW(AucAll[,c("PD","MQ","PL")]), nGr)$cluster
table(kMAx)
AucAll[,"clu"] <- kMAx
```
```{r ROC_segm2, echo=TRUE}
AucAll <- reorgByCluNo(AucAll, cluNo=kMAx, useColumn=c("PD","MQ","PL"))
AucAll <- cbind(AucAll, iniInd=table1[match(rownames(AucAll), rownames(table1)), "index"])
colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)] <- paste("Auc",colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)], sep=".")
AucAll[,"cluNo"] <- rep(nGr:1, table(AucAll[,"cluNo"])) # make cluNo descending
kMAx <- AucAll[,"cluNo"] # update
table(AucAll[,"cluNo"])
## note : column 'index' is relative to table1, iniInd to ordering inside objects from clustering
```
To graphically summarize the AUC values, the clustered AUC values are plotted accompagnied by the geometric mean:
```{r ROC_profFig, echo=TRUE}
try(profileAsClu(AucAll[,c(1:length(methNa),(length(methNa)+2:3))], clu="cluNo", meanD="geoMean", tit="Pairwise Comparisons as Clustered AUC from ROC Curves",
xlab="Comparison number", ylab="AUC", meLty=1, meLwd=3))
```
From this figure we can see clearly that there are some pairwise comparisons where all initial analysis-software results yield high AUC values,
while other pairwise comparisons less discriminative power.
Again, now we can select a representative pairwise-comparison for each cluster (from the center of each cluster):
```{r ROC_segmTable, echo=TRUE}
AucRep <- table(AucAll[,"cluNo"])[rank(unique(AucAll[,"cluNo"]))] # representative for each cluster
AucRep <- round(cumsum(AucRep) -AucRep/2 +0.1)
## select representative for each cluster
kable(round(AucAll[AucRep,c("Auc.PD","Auc.MQ","Auc.PL","cluNo")],3), caption="Selected representative for each cluster ", align="c")
```
Now we can check if some experimental UPS1 log-fold-change have a bias for some clusters.
```{r freqOfFCperClu, echo=TRUE}
ratTab <- sapply(5:1, function(x) { y <- table1[match(rownames(AucAll),rownames(table1)),]
table(factor(signif(y[which(AucAll[,"cluNo"]==x),"log2rat"],1), levels=unique(signif(table1[,"log2rat"],1))) )})
colnames(ratTab) <- paste0("\nclu",5:1,"\nn=",rev(table(kMAx)))
layout(1)
imageW(ratTab, tit="Frequency of rounded log2FC in the 5 clusters", xLab="log2FC (rounded)", col=RColorBrewer::brewer.pal(9,"YlOrRd"),las=1)
mtext("Dark red for enrichment of given pair-wise ratio", cex=0.7)
```
We can see, that the cluster of best ROC-curves (cluster 5) covers practically all UPS1 log-ratios from this experiment without being restricted just to the high ratios.
#### Plotting ROC Curves for the Best Cluster (the '+++++')
```{r ROC_grp5tab, echo=TRUE}
colPanel <- 2:5
gr <- 5
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for best pairwise-comparisons ", align="c")
```
```{r ROC_grp5fig, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu5, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## This required package 'wrGraph' at version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5") {
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for 2nd Best Cluster (the '++++')
```{r ROC_grp4tab, echo=TRUE}
gr <- 4
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++++' pairwise-comparisons ", align="c")
```
```{r ROC_grp4fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu4, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the 3rd Best Cluster (the '+++')
```{r ROC_grp3tab, echo=TRUE}
gr <- 3
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+++' pairwise-comparisons ", align="c")
```
```{r ROC_grp3fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]],rocMQ[[jR]],rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu3, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the 4th Best Cluster (the '++')
```{r ROC_grp2tab, echo=TRUE}
gr <- 2
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++' pairwise-comparisons ", align="c")
```
```{r ROC_grp2fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu2, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the Weakest Cluster 1 (the '+')
```{r ROC_grp1tab, echo=TRUE}
gr <- 1
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+' pairwise-comparisons ", align="c")
```
```{r ROC_grp1fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2, ncol=1), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu1, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
------
## Analysis Focussing on UPS1 Spike-In Proteins Only
We know from the experimental setup that there were 48 [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1))
present in the commercial mix added to a constant background of yeast-proteins.
The lowest concentrations are extremely challenging and it is no surprise that many of them were not detected at the lowest concentration(s).
In order to choose among the various concentrations of UPS1, let's look how many NAs are in each group of replicates (ie before NA-imputation),
and in particular, the number of NAs among the UPS1 proteins.
Previsouly we've looked at the total number of NAs, now let's focus just on the UPS1 proteins.
Obviously, instances of non-quantified UPS1 proteins make the following comparisons using these samples rather insecure, since NA-imputation is just an 'educated guess'.
```{r nNA2, echo=TRUE}
tab1 <- rbind(PD=sumNAperGroup(dataPD$raw[which(dataPD$annot[,"SpecType"]=="UPS1"),], grp9),
MQ=sumNAperGroup(dataMQ$raw[which(dataMQ$annot[,"SpecType"]=="UPS1"),], grp9),
PL= sumNAperGroup(dataPL$raw[which(dataPL$annot[,"SpecType"]=="UPS1"),], grp9) )
kable(tab1, caption="The number of NAs in the UPS1 proteins", align="c")
```
One can see that starting the 5th level of UPS1 concentrations almost all UPS1 proteins were found in nearly all samples.
In consequence we'll avoid using all of them at all times, but this should be made depending on the very protein and quantification method.
Let's look graphically at the number of NAs in each of the UPS1 proteins along the quantification methods :
```{r nNAfig1, fig.height=3.5, fig.width=9.5, fig.align="center", echo=TRUE}
countRawNA <- function(dat, newOrd=UPS1$ac, relative=FALSE) { # count number of NAs per UPS protein and order as UPS
out <- rowSums(is.na(dat$raw[match(newOrd,rownames(dat$raw)),]))
if(relative) out/nrow(dat$raw) else out }
sumNAperMeth <- cbind(PD=countRawNA(dataPD), MQ=countRawNA(dataMQ), PL=countRawNA(dataPL) )
UPS1na <- sub("_UPS","",dataPL$annot[UPS1$ac,"EntryName"])
par(mar=c(6.8, 3.5, 4, 1))
imageW(sumNAperMeth, rowNa=UPS1na, tit="Number of NAs in UPS proteins", xLab="", yLab="",
transp=FALSE, col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")))
mtext("Dark red for high number of NAs",cex=0.7)
```
Typically the number of NAs is similar when comparing the different quantitation approaches, it tends to be a bit higher with MaxQuant.
This means that some UPS1 proteins which are easier to (detect and) quantify than others.
We can conclude, the capacity to successfully quantify a given protein depends on its abundance and its composition.
### Similarity by PCA (UPS1 proteins only)
Plotting the [principal components (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) typically allows to gain
an overview on how samples are related to each other.
This type of experiment is special for the fact that the majority of proteins is expected to remain constant (yeast matrix),
while only the [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) vary.
Since we are primarily intereseted in the UPS1 proteins, the regular plots of PCA are not shown here, but PCA of the lines identified as UPS1.
[Principal component analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) cannot handle NA-values. Either all lines with any NAs have to be excluded, or data after NA-imputation have to be used.
Here, the option of plotting data after NA-imputation was chosen (in the context of filtering UPS1 lines only one whould loose too many lines, ie proteins).
Below plots are be made using the function `plotPCAw()` from the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
Via indexing we choose only the lines./proteins with the annoation 'spike' (ie UPS1).
#### PCA of UPS1 for ProteomeDiscoverer
```{r PCA2PD, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on ProteomeDiscoverer, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
#### PCA of UPS1 for MaxQuant
```{r PCA2MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on MaxQuant, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
#### PCA of UPS1 for Proline
```{r PCA2PL, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on Proline, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
Based on PCA plots one can see that the concentrations 125 - 500 aMol are very much alike and detecting differences may perform better when not combining them, as also confirmed by ROC part later.
In the Screeplot we can see that the first principal component captures almost all variability.
Thus, displaying the 3rd principal component (as done above) finally has no importance.
### CV of Replicates
In order to have more data available for linear regression modelling it was decided to use UPS1 abundance values after NA-Imputation for linear regressions.
Previously it was shown that NA values originate predominantly from absent or very low abundance quantitations, which justified relplacing NA values by low abundance values in a shrinkage like fashion.
As general indicator for data-quality and -usability let's look at the intra-replicate variability.
Here we plot all intra-group CVs (defined by UPS1-concentration), either the CVs for all quantified proteins or the UPS1 proteins only.
In the figure below the complete series (including yeast) is shown on the left side, the human UPS1 proteins only on the right side.
Briefly, vioplots show a kernel-estimate for the distribution, in addition, a box-plot is also integrated (see vignette to package [wrGraph](https://CRAN.R-project.org/package=wrGraph)).
```{r intraReplicCV1, fig.height=10, fig.width=12, fig.align="center", echo=TRUE}
## combined plot : all data (left), Ups1 (right)
layout(1:3)
sumNAinPD <- list(length=18)
sumNAinPD[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp, grp9))))
sumNAinPD[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPD)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPD)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPD, halfViolin="pairwise", tit="CV Intra Replicate, ProteomeDiscoverer", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinMQ <- list(length=18)
sumNAinMQ[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp, grp9))))
sumNAinMQ[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinMQ)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9)) # paste(unique(grp9),"all",sep=".")
names(sumNAinMQ)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".") #paste(unique(grp9),"Ups1",sep=".")
try(vioplotW(sumNAinMQ, halfViolin="pairwise", tit="CV intra replicate, MaxQuant",cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinPL <- list(length=18)
sumNAinPL[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp, grp9))))
sumNAinPL[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPL)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPL)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPL, halfViolin="pairwise", tit="CV Intra Replicate, Proline", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
```
The distribution of intra-group CV-values showed (without major surprise) that the highest UPS1 concentrations replicated best.
This phenomenon also correlates with the content of NAs in the original data.
When imputing NA-values it is a challange to respect the variability of the respective data (NA-neighbours) before NA-imputation.
Many NA-values can be observed when looking at very low UPS1-doses and too few initial quantitations values may remain for meaningful comparisons.
Of course, with an elevanted content of NAs the mechanism of NA-substitution will also contribute to masking (in part) the true variability.
In consequence pair-wise comparisons using one of the higher UPS1-concentrations group are expected to have a decent chance to rather specifically reveil a high number of UPS1 proteins.
Once can see that lower concentrations of UPS1 usually have worse CV (coefficient of variance) in the respective samples,
### Testing All Individual UPS1 Proteins By Linear Regression
First, we construct a container for storing various measures and results which we will look at lateron.
```{r linModel0, echo=TRUE}
## prepare object for storing all results
datUPS1 <- array(NA, dim=c(length(UPS1$ac),length(methNa),7), dimnames=list(UPS1$ac,c("PD","MQ","PL"),
c("sco","nPep","medAbund", "logp","slope","startFr","cluNo")))
```
Now we'll calculate the linear models, extract slope & pval for each UPS1 protein.
The functions used also allow plotting the resulting regression results, but plotting each UPS1 protein would make very crowded figures.
Instead, we'll plot representative examples only after clustering the regression-results.
#### Linear Regression for each UPS1 : ProteomeDiscoverer
```{r linModelPD, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
lmPD <- list(length=length(NamesUpsPD))
doPl <- FALSE
lmPD[1:length(NamesUpsPD)] <- lapply(NamesUpsPD[1:length(NamesUpsPD)], linModelSelect, dat=dataPD,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPD) <- NamesUpsPD
```
```{r linModelPD2, echo=TRUE}
## We make a little summary of regression-results (ProteomeDiscoverer)
tmp <- cbind(log10(sapply(lmPD, function(x) x$coef[2,4])), sapply(lmPD, function(x) x$coef[2,1]), sapply(lmPD, function(x) x$startLev))
datUPS1[,1,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPD)), ]
datUPS1[,1,"medAbund"] <- apply(wrMisc::.scale01(dataPD$datImp)[match(UPS1$ac,rownames(dataPD$datImp)),],1,median,na.rm=TRUE)
```
#### Linear Regression for each UPS1 : MaxQuant
```{r linModelMQ, echo=TRUE}
lmMQ <- list(length=length(NamesUpsMQ))
lmMQ[1:length(NamesUpsMQ)] <- lapply(NamesUpsMQ[1:length(NamesUpsMQ)], linModelSelect, dat=dataMQ,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmMQ) <- NamesUpsMQ
```
```{r linModelMQ2, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
## We make a little summary of regression-results (MaxQuant)
tmp <- cbind(log10(sapply(lmMQ, function(x) x$coef[2,4])), sapply(lmMQ, function(x) x$coef[2,1]), sapply(lmMQ, function(x) x$startLev))
datUPS1[,2,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmMQ)), ]
datUPS1[,2,"medAbund"] <- apply(wrMisc::.scale01(dataMQ$datImp)[match(UPS1$ac,rownames(dataMQ$datImp)),],1,median,na.rm=TRUE)
```
#### Linear Regression for each UPS1 : Proline
```{r linModelPL, echo=TRUE}
lmPL <- list(length=length(NamesUpsPL))
lmPL[1:length(NamesUpsPL)] <- lapply(NamesUpsPL[1:length(NamesUpsPL)], linModelSelect, dat=dataPL,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPL) <- NamesUpsPL
```
```{r linModelPLsum, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
tmp <- cbind(log10(sapply(lmPL, function(x) x$coef[2,4])), sapply(lmPL, function(x) x$coef[2,1]), sapply(lmPL, function(x) x$startLev))
datUPS1[,3,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPL)), ]
datUPS1[,3,"medAbund"] <- apply(wrMisc::.scale01(dataPL$datImp)[match(UPS1$ac,rownames(dataPL$datImp)),],1,median,na.rm=TRUE)
```
#### Frequency Of Starting Levels For Regression
To get a general view, let's look where regressions typically have their best starting-site (ie how many low concentrations points are usually better omitted):
```{r linModelStartStat, echo=TRUE}
## at which concentration of UPS1 did the best regression start ?
stTab <- sapply(1:5, function(x) apply(datUPS1[,,"startFr"],2,function(y) sum(x==y)))
colnames(stTab) <- paste("lev",1:5,sep="_")
kable(stTab, caption = "Frequency of starting levels for regression")
```
### Global Comparison Of Regression Models
Next, we'll inspect the relation between regression-slopes and p-values (for H0: slope=0) :
```{r linModelPlotAll, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
layout(matrix(1:4,ncol=2))
subTi <- "fill according to median abundance (blue=low - green - red=high)"
xyRa <- apply(datUPS1[,,4:5], 3, range, na.rm=T)
plotMultRegrPar(datUPS1, 1, xlim=xyRa[,1], ylim=xyRa[,2],tit="ProteomeDiscoverer UPS1, p-value vs slope",subTit=subTi) # adj wr 9jan23
plotMultRegrPar(datUPS1, 2, xlim=xyRa[,1], ylim=xyRa[,2],tit="MaxQuant UPS1, p-value vs slope",subTit=subTi)
plotMultRegrPar(datUPS1, 3, xlim=xyRa[,1], ylim=xyRa[,2],tit="Proline UPS1, p-value vs slope",subTit=subTi)
```
We can observe, that sope and (log)p-value of the resultant regressions do not necessarily correlate well.
Thus, considering only one of these resultant values may not be sufficient.
### Summarize Linear Regression Results
When judging results for indivual UPS1 proteins one may see that both the value of the slope as well as the p-value (for H0:slope=0) are important to consider.
For example, there are some cases where the quantitations lign up well giving a good p-value but with slopes < 0.4.
This is definitely not the type of dose-response characteristics we are looking for.
In consequence, let's construct a **combined score** for these components _slope_ and _p-value_ for easier consideration of both elements at once :
```{r combRegrScore1, echo=TRUE}
for(i in 1:(dim(datUPS1)[2])) datUPS1[,i,"sco"] <- -datUPS1[,i,"logp"] - (datUPS1[,i,"slope"] -1)^2 # cut at > 8
```
Next, let's bring together all linear-model scores, the number of peptides and meadian protein abundance for each of UPS1 proteins in one object to facilite further steps.
```{r combRegrScore2, echo=TRUE}
datUPS1[,1,2] <- rowSums(dataPD$count[match(UPS1$ac,dataPD$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
datUPS1[,2,2] <- rowSums(dataMQ$count[match(UPS1$ac,dataMQ$annot[,1]),,1], na.rm=TRUE)
datUPS1[,3,2] <- rowSums(dataPL$count[match(UPS1$ac,dataPL$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
```
Now we can explore the regression score and its context to other parameters, below it's done graphically.
```{r combRegrScore3, fig.height=6, fig.width=9.5, fig.align="center", echo=TRUE}
layout(matrix(1:4, ncol=2))
par(mar=c(5.5, 2.2, 4, 0.4))
col1 <- RColorBrewer::brewer.pal(9,"YlOrRd")
imageW(datUPS1[,,1], col=col1, tit="Linear regression score", xLab="",yLab="",transp=FALSE)
mtext("red for bad score", cex=0.75)
imageW(log(datUPS1[,,2]), tit="Number of peptides", xLab="",yLab="", col=col1, transp=FALSE)
mtext("dark red for high number of peptides", cex=0.75)
## ratio : regression score vs no of peptides
imageW(datUPS1[,,1]/log(datUPS1[,,2]), col=rev(col1), tit="Regression score / Number of peptides", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/peptide ratio)", cex=0.75)
## score vs abundance
imageW(datUPS1[,,1]/datUPS1[,,3], col=rev(col1), tit="Regression score / median Abundance", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/abundance ratio)", cex=0.75)
```
From the heatmap-like plots we can see that some proteins are rather consistently quantified by any of the methods.
Some of the varaibility may be explained by the number of peptides (in case of MaxQuant 'razor-peptides' were used), see plot of 'regression score / number of peptides'.
In contrast, UPS-protein median abundance does not correlate or explain this phenomenon (see last plot 'regression score / median abundance').
So we cannot support the hypothesis that highly abundant proteins get quantified better.
### Grouping of UPS1 Proteins to Display Representative Proteins
Using the linear regression score defined above we can rank UPS1 proteins and display representative ones in order to avoid crowded and repetitive figures.
Now, we can try to group the regression scores into groups and easily display representative examples for each group.
Here, we (pre)define that we want to obtain 5 groups (like ratings from 1 -5 starts), a k-Means clustering approach was chosen.
```{r combScore1, echo=TRUE}
## number of groups for clustering
nGr <- 5
chFin <- is.finite(datUPS1[,,"sco"])
if(any(!chFin)) datUPS1[,,"sco"][which(!chFin)] <- -1 # just in case..
## clustering using kMeans
kMx <- stats::kmeans(standardW(datUPS1[,,"sco"], byColumn=FALSE), nGr)$cluster
datUPS1[,,"cluNo"] <- matrix(rep(kMx, dim(datUPS1)[2]), nrow=length(kMx))
geoM <- apply(datUPS1[,,"sco"], 1, function(x) prod(x)^(1/length(x))) # geometric mean across analysis soft
geoM2 <- lrbind(by(cbind(geoM,datUPS1[,,"sco"], clu=kMx), kMx, function(x) x[order(x[,1],decreasing=TRUE),])) # organize by clusters
tmp <- tapply(geoM2[,"geoM"], geoM2[,"clu"], median)
geoM2[,"clu"] <- rep(rank(tmp, ties.method="first"), table(kMx))
geoM2 <- geoM2[order(geoM2[,"clu"],geoM2[,"geoM"],decreasing=TRUE),] # order as decreasing median.per.cluster
geoM2[,"clu"] <- rep(1:max(kMx), table(geoM2[,"clu"])[rank(unique(geoM2[,"clu"]))]) # replace cluster-names to increasing
try(profileAsClu(geoM2[,2:4], geoM2[,"clu"], tit="Clustered Regression Results for UPS1 Proteins", ylab="Linear regression score"))
```
```{r combScore2, echo=TRUE}
datUPS1 <- datUPS1[match(rownames(geoM2), rownames(datUPS1)),,] # bring in new order
datUPS1[,,"cluNo"] <- geoM2[,"clu"] # update cluster-names
### prepare annotation of UPS proteins
annUPS1 <- dataPL$annot[match(rownames(datUPS1), dataPL$annot[,1]), c(1,3)]
annUPS1[,2] <- substr(sub("_UPS","",sub("generic_ups\\|[[:alnum:]]+-{0,1}[[:digit:]]\\|","",annUPS1[,2])),1,42)
```
```{r combScore3, echo=TRUE}
## index of representative for each cluster (median position inside cluster)
UPSrep <- tapply(geoM2[,"geoM"], geoM2[,"clu"], function(x) ceiling(length(x)/2)) + c(0, cumsum(table(geoM2[,"clu"]))[-nGr])
```
Previously we organized all UPS1 proteins according to their regression characteristics into 5 clusters and each cluster was ordered for descending scores.
Now we can use the median position within each cluster as representative example for this cluster.
#### Representative UPS1-protein of the Best Group (the '+++++')
```{r regr5star, echo=TRUE}
gr <- 1
useLi <- which(datUPS1[,1,"cluNo"]==gr)
colNa <- c("Protein",paste(colnames(datUPS1), rep(c("slope","logp"), each=ncol(datUPS1)), sep=" "))
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot5star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## Plotting the best regressions, this required package wrGraph version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 2nd Best Group (the '++++')
```{r regr4star, echo=TRUE}
gr <- 2
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"2nd best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot4star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 3rd Group (the '+++')
```{r regr3star, echo=TRUE}
gr <- 3
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot3star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 4th Group (the '++')
```{r regrPlot2star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
gr <- 4
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 5th (And Last) Group (the '+')
```{r regrPlot1star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
gr <- 5
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 5th cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
## Additional Comments
\normalsize
The choice of the 'best suited' approach to quantify and compare proteomics data is not trivial at all.
Particular attention has to be given to the choice of the numerous 'small' parameters which may have a very strong impact on the final outcome,
as it has been experienced when preparing the data for this vignette or at other places (eg [Chawade et al 2015](https://doi.org/10.1021/pr500665j)).
Thus, knowing and understanding well the software/tools one has chosen is of prime importance !
Of course, this also concerns the protein-identifcation part/software.
The total number of proteins identified varies considerably between methods, this information may be very important to the user in real-world settings
but is only taken in consideration in part in the comparisons presented.
ROC curves allow us to gain more insight on the impact of cutoff values (alpha) for statistical testing.
Frequently the ideal threshold maximizing sensitivity and specificity lies quite distant to the common 5-percent threshold.
This indicates that many times the common 5-percent threshold may not be the 'optimal' compromise for calling differential abundant proteins.
However, the _optimal_ point varies very much between data-sets and in a real world setting with unknown samples this type of analysis is not possible.
As mentioned before, the dataset used in this vignette is not very recent, much better performing mass-spectrometers have been introduced since then.
The main aim of this vignette consists in showing _how to use wrProteo_ with a smaller example (allowing to limit file-size of this package).
Thus, for rather scientific conclusions the user is encouraged to run the same procedure using data run on more recent mass-spectrometers.
## Acknowledgements
The author wants to acknowledge the support by the [IGBMC](https://www.igbmc.fr) (CNRS UMR 7104, Inserm U 1258, UdS),
[CNRS](http://www.cnrs.fr/en), [Université de Strasbourg](https://www.unistra.fr) and [Inserm](https://www.inserm.fr)
and of course all collegues from the [IGBMC proteomics platform](https://proteomics.igbmc.fr).
The author wishes to thank the [CRAN](https://CRAN.R-project.org) -staff for all their help with new entries and their efforts in maintaining this repository of R-packages.
Furthermore, many very fruitful discussions with colleages on national and international level have helped to formulate ideas and improve the tools presented here.
Thank you for you interest. This package is constantly evolving, new featues/functions may get added to the next version of [this package](https://CRAN.R-project.org/package=wrProteo).
## Session-Info
For completeness :
\small
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/inst/doc/wrProteoVignetteUPS1.Rmd
|
---
title: "Getting started with wrProteo"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrProteoVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains a collection of various tools for Proteomics used at the [proteomics platform](https://proteomics.igbmc.fr) of the [IGBMC](https://www.igbmc.fr).
To get started, we need to load the packages "[wrMisc](https://CRAN.R-project.org/package=wrMisc)" and
this package ([wrProteo](https://CRAN.R-project.org/package=wrProteo)), both are available from CRAN.
The packages [wrGraph](https://CRAN.R-project.org/package=wrGraph) and [RColorBrewer](https://CRAN.R-project.org/package=RColorBrewer) get used internally by some of the functions from this package for (optional/improved) figures.
Furthermore, the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) will be used internally for statistical testing.
If you are not familiar with [R](https://www.r-project.org) you may find many introductory documents on the official R-site
in [contributed documents](https://cran.r-project.org/other-docs.html) or under [Documentation/Manuals](https://cran.r-project.org/manuals.html).
Of course, numerous other documents/sites with tutorials exit.
The aim of package-vignettes is to provide additional information and show examples how the R-package concerned may be used, thus complementing the documentation given with _help()_ for each of the functions of the package. In terms of examples, frequent standard types of problems are preferred in a vignette.
Nevertheless, most functions can be used in many other ways, for this you may have to check the various arguments via calling _help_ on the function of interest.
All R-code in this vigentte can be directly repeated by the user, all data used is provided with the package.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r install, echo=TRUE, eval=FALSE}
## if you need to install the packages 'wrMisc','wrProteo' and 'wrGraph' from CRAN :
install.packages("wrMisc")
install.packages("wrProteo")
## The package 'wrGraph' is not obligatory, but it allows making better graphs
install.packages("wrGraph")
## Installation of limma from Bioconductor
if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
BiocManager::install("limma")
```
```{r setup, echo=FALSE, messages=FALSE, warnings=FALSE}
suppressPackageStartupMessages({
library(wrMisc)
library(wrProteo)
library(wrGraph)
library(knitr)
library(rmarkdown)
})
```
```{r setup2}
## Let's assume this is a fresh R-session
## Get started by loading the packages
library("knitr")
library("wrMisc")
library("wrProteo")
library("wrGraph")
# This is wrProteo version no :
packageVersion("wrProteo")
```
This way you can browse all vignettes available to [this package](https://CRAN.R-project.org/package=wrProteo) :
```{r Vigenttes1, echo=TRUE, eval=FALSE}
browseVignettes("wrProteo")
```
There you can find another vignette dedicated to the analysis of heterogenous spike-in experiments.
## Calculating Molecular Masses From Composition Formulas
Please note that molecular masses may be given in two flavours : Monoisotopic mass and average mass.
For details you may refer to [Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass).
Monoisotopic masses commonly are used in mass-spectrometry and will be used by default in [this package](https://CRAN.R-project.org/package=wrProteo).
Molecular (mono-isotopic) masses of the atomes integrated in this package were taken from [Unimod](http://www.unimod.org/masses.html).
They can be easily updated, if in the future, (mono-isotopic) molecular masses will be determined with higher precision (ie more digits).
### Molecular masses based on (summed) chemical formulas
At this level (summed) atomic compositions are evaluated.
Here, the number of atoms has to be written _before_ the atom. Thus, '2C' means two atoms of carbon.
Empty or invalid entries will be by default returned as mass=0, a message will comment such issues.
The mass of an electron can be assigned using 'e'.
```{r ChemFormMolMass1, echo=TRUE}
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN", " ", "e"))
# Ignore empty/invalid entries
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), rmEmpty=TRUE)
```
Using the argument _massTy_ one can switch from default _monoisotopic mass_ to _average mass_ :
```{r ChemFormMolMass2, echo=TRUE}
massDeFormula(c("12H12O", "HO", " 2H 1 Se, 6C 2N", "HSeCN"), massTy="aver")
```
### Molecular masses based on amino-acid sequence
The mass of these amino-acids can be used:
```{r AAseqMolMass, echo=TRUE}
AAmass()
```
Here the one-letter amino-acid code is used to descibre peptides or proteins.
```{r AAseqMolMass2, echo=TRUE}
## mass of peptide (or protein)
pep1 <- c(aa="AAAA",de="DEFDEF")
convAASeq2mass(pep1, seqN=FALSE)
```
# Working With Fasta(Files)
## Reading Fasta Files (from Uniprot)
This package contains a parser for Fasta-files allowing to separate different fields of meta-data like IDs, name and species of the respecive entries.
Here we will read a tiny example fasta-file (a collection of typical contaminants in proteomics) using `readFasta2()`.
```{r readFasta, echo=TRUE}
path1 <- system.file('extdata', package='wrProteo')
fiNa <- "conta1.fasta.gz"
## basic reading of Fasta
fasta1 <- readFasta2(file.path(path1, fiNa))
str(fasta1)
## now let's read and further separate details in annotation-fields
fasta1b <- readFasta2(file.path(path1, fiNa), tableOut=TRUE)
str(fasta1b)
```
Now we can check if some entries appear twice.
```{r treatFasta2, echo=TRUE}
dupEntry <- duplicated(fasta1)
table(dupEntry)
```
Let's remove the duplicated entry.
```{r treatFasta3, echo=TRUE}
fasta3 <- fasta1[which(!dupEntry)]
length(fasta3)
```
## Writing Sequences As Fasta Files
Once we have modified a fasta we might want to save it again as fasta-formatted file.
This can be done using `writeFasta2()`.
```{r writeFasta1, echo=TRUE, eval=FALSE}
writeFasta2(fasta3, fileNa="testWrite.fasta")
```
.
***
# Analyzing Label-Free Quantitative Proteomics Data
### Label-free Quantitative Proteomics Introduction
Multiple algorithms and software implementations have been developed for quantitation label-free proteomics experiments (LFQ),
in particular for extracted ion chromatograms (XIC). For more background information you may look at
[Wikipedia labell-free Proteomics](https://en.wikipedia.org/wiki/Label-free_quantification).
The tools presented here are designed for use with label-free XIC (ie LFQ) data.
Several of the programs for extracting initial quantitations also allow getting spectral counting (PSM) data which can also get imported into R,
however their use is not further discussed in this vignette.
In general it is preferable to use XIC for comparing peptde of protein quantities between different protein extracts/samples.
This package provides support for importing quantitation results from [Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812),
[MaxQuant](https://www.maxquant.org), [Fragpipe](https://fragpipe.nesvilab.org), [Proline](https://www.profiproteomics.fr/proline/),
[MassChroQ](http://pappso.inrae.fr/bioinfo/masschroq/), [DIA-NN](https://github.com/vdemichev/DiaNN), [AlphaPept](https://github.com/MannLabs/alphapept),
[Wombat-P](https://github.com/wombat-p) and [OpenMS](https://openms.de/).
All quantitation import functions offer special features for further separating annotation related information, like species, for later use.
In most common real-world cases people typically analyze data using only one quantitation algorithm/software.
Below in this vignette, we'll use only the quantitation data generated using MaxQuant (AlphaPept, DIA-NN, FragPipe, MassChroQ, OpenMS, ProteomeDiscoverer, Proline and Wombat-P are supported, too).
The other vignette to [this package](https://CRAN.R-project.org/package=wrProteo) ("UPS-1 spike-in Experiments") shows in detail the import functions available for MaxQuant, ProteomeDiscoverer and Proline
and how further comparsions can be performed in bench-mark studies.
All these import functions generate an equivalent output format, separating (selected) annotation data (\$annot) from normalized log2-quantitation data (\$quant)
and initial quantitation (\$raw).
Normalization (discussed [below](#Normalization) in more detail) is an important part of 'preparing' the data for subsequant analysis.
The import functions in this package allow performin an initial normalization step (with choice among multiple algorithims), too.
Further information about the proteins identifed can be considered during normalization:
For example, it is possible to exclude contaminants like keratins which are frequently found among the higher abundant proteins which may potentially introduce bias at global normalization.
Technical replicates are very frequently produced in proteomics, they allow to assess the variability linked to repeated injection of the same material.
Biological replicates, however, make additional information accessible, allowing the interpretation of experiments in a more general way.
## Import From Dedicated Quantification Algorithms/Software {#ImportQuantitation}
### MaxQuant: Import Protein Quantification Data {#ReadMaxQuant}
[MaxQuant](https://www.maxquant.org) is free software provided by the [Max-Planck-Institute](https://www.biochem.mpg.de/de),
see also [Tyanova et al 2016](https://doi.org/10.1038/nprot.2016.136).
Typically [MaxQuant](https://www.maxquant.org) exports by default quantitation data on level of consensus-proteins as a folder called txt with a file always called 'proteinGroups.txt'.
Data exported from [MaxQuant](https://www.maxquant.org) can get imported (and normalized) using `readMaxQuantFile()`,
in a standard case one needs only to provide the path to the file 'proteinGroups.txt' which can be found the _combined/txt/_ folder produced by MaxQuant.
gz-compressed files can be read, too (as in the example below the file 'proteinGroups.txt.gz').
The argument _specPref_ allows giving further details about expected (primary) species, it defaults to working with human proteins.
To get started, let's just set it to _NULL_ for ignoring.
```{r readMaxQuant1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
## number of lines and columns of quantitation data
dim(dataMQ$quant)
```
##### Adding Meta-Data at Import (Example MaxQuant) {#ReadMaxQuantWithMetaData}
Similarly we can also add directly information about principal species, contaminants, special groups of proteins and add sdrf annotation (if existing) directly when reading the data.
Setting customized tags according to species or other search-terms can be done using the argument _specPref_.
In the example below we define a main species (tags are made by comparing to the species information initially given by the fasta)
and we define a custom group of proteins by their Uniprot-Accessions (here the UPS1 spike-in).
Then, the content of argument _specPref_ will get searched in multiple types of annotation (if available from the initial Fasta).
By setting _suplAnnotFile=TRUE_ the import function will also look for files (by default produced by MaxQuant as 'summary.txt' and 'parameters.txt')
giving more information about experiment and samples and integrate this to the output.
(This time let's do not display the plot of distributions, it's the same plot as above, see argument _plotGraph_.)
```{r readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The grouping of replicates
grp9 <- rep(1:9,each=3)
head(grp9)
## special group of proteins (we want to differentiate/ highlight lateron)
UPS1ac <- c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165", "P00709", "P06732",
"P12081", "P61626", "Q15843", "P02753", "P16083", "P63279", "P01008", "P61769", "P55957", "O76070",
"P08263", "P01344", "P01127", "P10599", "P99999", "P06396", "P09211", "P01112", "P01579", "P02787",
"O00762", "P51965", "P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375")
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1ac)
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), gr=grp9, plotGraph=FALSE)
## the quantifiation data is the same as before
dim(dataMQ$quant)
```
Now we can access special tags in the annotation part of the resulting object the results :
```{r readMaxQuant3, echo=TRUE}
## count of tags based on argument specPref
table(dataMQ$annot[,"SpecType"])
```
This information can be used automatically lateron for assigning different symbols and/or colors when drawing Volcano-plots or PCA.
##### Adding Experimental Setup (Sdrf) to Meta-Data at Import (Example MaxQuant)
To further analyze the data from an experiment typically the user also need to know/declare different groups of samples (eg who is replicate of whom).
In the simplest case this can be done via the argument _gr_, as shown above.
By the way, if _gr_ is provided it gets priority over other automcatic mining results.
The import-functions from this package try to help you in multiple ways to find out more about the experimental details.
Most quantitation software (like MaxQuant and ProteomeDiscoverer) also produce files/documentation about experimental annotation specified by the user.
These files may be automatically read and mined via argument _suplAnnotFile=TRUE_ to gather information about groups of samples.
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data ([sdrf](#ImportSdrf)).
If sfdr-annotation (see [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata)) exists on [Pride](https://www.ebi.ac.uk/pride/), it can be imported, too.
The information on the experimental setup will be mined to automatically to design groups of samples (ie levels of covariant factors).
If sdrf has not been prepared, the user may also simply provide a data.frame formatted like sfdr from Pride.
Finally, if nothing of the above is available, the column-names from the quantitation columns will be minded to search hints about groups of replicates (in particular when using MaxQuant).
For a bit more complex example of using _readMaxQuantFile()_ or integrating other annotation information,
please look at the vignette "UPS1 spike-in Experiments" also available to [this package](https://CRAN.R-project.org/package=wrProteo).
The simplest way of adding sdrf annotation consists in addin the project ID from [Pride](https://www.ebi.ac.uk/pride/), as shown below.
The argument _groupPref_ allows defining further adjustments/choices.
The import-function will first check if this a local file, and if not try to download from Pride (if available) and further mine the information.
```{r readMaxQuant4, echo=TRUE}
dataMQ <- readMaxQuantFile(path1, specPref=specPrefMQ, sdrf="PXD001819", suplAnnotFile=TRUE, groupPref=list(lowNumberOfGroups=FALSE), plotGraph=FALSE)
```
##### Exporting Experimental Setup from MaxQuant to Draft-Sdrf {#ExportDraftSdrf}
As mentioned, the [Proteomics Sample Metadata Format - sdrf](https://github.com/bigbio/proteomics-sample-metadata) is an effort for standardizing experimental meta-data.
Many of the typically documented ones may already have been entered when lauching [MaxQuant](https://www.maxquant.org) and can be exported as a draft Sdrf-file.
All main columns for standard experiments are present in the file, though some columns will have to be completed by the user (by any text-editor) for submitting to Pride.
```{r exportSdrfDraftMaxQuant5, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
dataMQ2 <- readMaxQuantFile(path1, file=fiNaMQ, refLi="mainSpe", sdrf=FALSE, suplAnnotFile=TRUE)
## Here we'll write simply in the current temporary directory of this R-session
exportSdrfDraft(dataMQ2, file.path(tempdir(),"testSdrf.tsv"))
```
#### MaxQuant : Import Peptide Data {#ReadMaxQuantPetides}
Similarly it is possible to read the file by default called _'peptides.txt'_ for the peptide-data.
In the example below we'll provide a custom file-name (to a tiny example non-representative for biological interpretation).
The data get imported to a similar structure like the protein-level data, quantitations on peptide level by default median-normalized, sample-setup from sdrf-files may be added, too.
```{r readMaxQuantPeptides, echo=TRUE}
MQpepFi1 <- "peptides_tinyMQ.txt.gz"
path1 <- system.file("extdata", package="wrProteo")
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST", spec2="HUMAN")
dataMQpep <- readMaxQuantPeptides(path1, file=MQpepFi1, specPref=specPref1, tit="Tiny MaxQuant Peptides")
summary(dataMQpep$quant)
```
If the argument _suplAnnotFile_ is set to _TRUE_, the files 'summary.txt' and 'parameters.txt' (produced by MaxQuant by default) will be searched in the same directory.
If these files are available and seem to correspond to the quantiation date read in the main part of the function,
supplemental information about experimental setup will be mined and added to the resulting object.
.
### ProteomeDiscoverer : Import Protein Quantification {#ReadProteomeDiscoverer}
[Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) is commercial software from ThermoFisher (www.thermofisher.com),
see also [Orsburn, 2021](https://doi.org/10.3390/proteomes9010015).
Data exported from [Proteome Discoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) can get imported (typically the *xx_Proteins.txt* file)
using `readProteomeDiscovererFile()`, for details please see the vignette "UPS-1 spike-in Experiments" also available with [this package](https://CRAN.R-project.org/package=wrProteo).
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readProteomeDiscovererProt1, echo=TRUE}
fiNa <- "tinyPD_allProteins.txt.gz"
dataPD <- readProteomeDiscovererFile(file=fiNa, path=path1, suplAnnotFile=FALSE, plotGraph=FALSE)
summary(dataPD$quant)
```
Please note, that quantitation data exported from ProteomeDiscoverer frequently have very generic column-names (increasing numbers).
When calling the import-function they can be replaced by more meaningful names either using the argument _sampNa_
(thus, much care should be taken on the order when preparing the vector _sampleNames_ !),
or from reading the default annotation in the file _'InputFiles.txt'_ (if exported) or, from sdrf-annotation (if available).
In this case, supplemental information about experimental setup will be mined and added to the resulting object.
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
For a more complex example of using _readProteomeDiscovererFile()_ please see the vignette _'UPS1 spike-in Experiments'_ of [this package](https://CRAN.R-project.org/package=wrProteo).
#### ProteomeDiscoverer : Import Peptide Data {#ReadProteomeDiscovererPetides}
Similarly it is possible to read the peptide-data files exported by ProteomeDiscoverer using the function `readProtDiscovererPeptides()`.
The data get imported to a similar structure like the protein-level data, quantitations on peptide level by default median-normalized, sample-setup from sdrf-files may be added, too.
### DIA-NN: Import Protein Quantification Data {#ReadDiaNN}
[DIA-NN](https://github.com/vdemichev/DiaNN) is free software provided by the by Demichev, Ralser and Lilley labs,
see also [Demichev et al, 2020](https://doi.org/10.1038/s41592-019-0638-x).
Typically [DIA-NN](https://github.com/vdemichev/DiaNN) allows exporting quantitation data on level of consensus-proteins as tsv-formatted files.
Such data can get imported (and normalized) using `readDiaNNFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readDiaNN1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
diaNNFi1 <- "tinyDiaNN1.tsv.gz"
## This file contains much less identifications than one may usually obtain
path1 <- system.file("extdata", package="wrProteo")
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="HUMAN")
dataNN <- readDiaNNFile(path1, file=diaNNFi1, specPref=specPref1, tit="Tiny DIA-NN Data", plotGraph=FALSE)
summary(dataNN$quant)
```
#### DIA-NN : Import Peptide Data {#ReadDiaNNPetides}
Similarly data from [DIA-NN](https://github.com/vdemichev/DiaNN) on peptide level can get imported (and normalized) using `readDiaNNPeptides()`.
### Proline : Import Protein Quantification Data {#ReadProline}
[Proline](https://www.profiproteomics.fr/proline/) is free software provided by the Profi-consortium,
see also [Bouyssié et al 2020](https://doi.org/10.1016/j.jprot.2015.11.011).
Data exported from [Proline](https://www.profiproteomics.fr/proline/) (xlsx, csv or tsv format) can get imported using `readProlineFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readProlineProt1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNa <- "exampleProlineABC.csv.gz" # gz compressed data can be read, too
dataPL <- readProlineFile(file=fiNa, path=path1, plotGraph=FALSE)
summary(dataPL$quant[,1:8])
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
For a more complex example of using _readProlineFile()_ please see the vignette _'UPS1 spike-in Experiments'_ from [this package](https://CRAN.R-project.org/package=wrProteo).
### Fragpipe : Import Protein Quantification Data {#ReadFragpipe}
[Fragpipe](https://fragpipe.nesvilab.org) is a database search tool for peptide identification, open-source developed by the [Nesvizhskii lab](https://www.nesvilab.org),
see eg [Kong et al 2017](https://doi.org/10.1038/nmeth.4256), [da Veiga Leprevost; et al 2020](https://doi.org/10.1038/s41592-020-0912-y) or other related publications.
Data exported from [Fragpipe](https://fragpipe.nesvilab.org) (in tsv format) can get imported using `readFragpipeFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readFragpipe1, echo=TRUE}
FPproFi1 <- "tinyFragpipe1.tsv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="MOUSE")
dataFP <- readFragpipeFile(path1, file=FPproFi1, specPref=specPref1, tit="Tiny Fragpipe Example", plotGraph=FALSE)
summary(dataFP$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### MassChroQ : Import Protein Quantification Data
[MassChroQ](http://pappso.inrae.fr/bioinfo/masschroq/) is free open software provided by the [PAPPSO](http://pappso.inrae.fr),
see also [Valot et al 2011](https://doi.org/10.1002/pmic.201100120).
Inital quantifications are on peptide basis and should be normalized and summarized using the R-package MassChroqR, which is also publicly available at the [PAPPSO](http://pappso.inrae.fr/bioinfo/).
Quantifications at protein-level can be saved as matrix into an RData-file or written to tsv, csv or txt files for following import into the framework of this package
using `readMassChroQFile()`, for details please see the help-page to this function.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readMassChroq1, echo=TRUE}
MCproFi1 <- "tinyMC.RData"
dataMC <- readMassChroQFile(path1, file=MCproFi1, tit="Tiny MassChroq Example", plotGraph=FALSE)
summary(dataMC$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### AlphaPept : Import Protein Quantification Data {#ReadAlphaPeptide}
[AlphaPept](https://github.com/MannLabs/alphapept) is a free open-source search tool for peptide identification created by the Mann-lab,
see eg [Strauss et al 2021](https://doi.org/10.1101/2021.07.23.453379).
Data exported from AlphaPept (in csv format) can get imported using `readAlphaPeptFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readAlphaPept1, echo=TRUE}
APproFi1 <- "tinyAlpaPeptide.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK")
dataAP <- readAlphaPeptFile(path1, file=APproFi1, specPref=specPref1, tit="Tiny AlphaPept Example", plotGraph=FALSE)
summary(dataAP$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### Wombat-P : Import Protein Quantification Data {#ReadWombatP}
[Wombat-P](https://github.com/wombat-p) is a free open-source search tool for peptide identification created by an Elixir-consortium,
see also [Bouyssie et al 2023](https://doi.org/10.1021/acs.jproteome.3c00636).
Data exported from Wombat-P (in csv format) can get imported using `readWombatNormFile()`.
The example below is just a toy data-set, normally one can identify and quantify many more proteins.
```{r readWombarP1, echo=TRUE}
WBproFi1 <- "tinyWombCompo1.csv.gz"
## let's define the main species and allow tagging some contaminants
specPref1 <- c(conta="conta|CON_|LYSC_CHICK", mainSpecies="YEAST")
dataWB <- readWombatNormFile(path1, file=WBproFi1, specPref=specPref1, tit="Tiny Wombat-P Example", plotGraph=FALSE)
summary(dataWB$quant)
```
As descibed with [MaxQuant](#ReadMaxQuant), additional meta-data as [sdrf](#ImportSdrf) can be imported in the same way.
### OpenMS : Import Protein Quantification Data
[OpenMS](https://openms.de/) is free open software provided by the deNBI Center for integrative Bioinformatics,
see also [Rost et al 2016](https://doi.org/10.1038/nmeth.3959).
Peptide level data exported as csv get summarized from peptide to protein level and further normalized using `readOpenMSFile()`, for details please see the help-page to this function.
### Importing Sdrf Meta-Data {#ImportSdrf}
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data (sdrf-format).
As mentioned at the section for reading [MaxQuant](#ReadMaxQuant), most import-functions
from [wrProteo](https://CRAN.R-project.org/package=wrProteo) can directly import (if available) the experimental setup from sdrf,
or from files produced using the various quantitation software (as shown with [MaxQuant](#ReadMaxQuant).
To do this separately, or if you need to read an alternative annotation file, you may use `readSampleMetaData()`.
If sdrf annotation is available on Pride/github this information can be read and directly integrated with software specific annotation using the import-functions shown above or as shown below.
Of course the user should always make sure the annotation really corresponds to current the experimental data !
When adding the quantitation-data using argument _abund_, the functions also checks if the number of samples fit and
tries to align the order of the meta-data to that of the quantitation data (based on the raw files), since they are not necessarily in the same order.
```{r readSampleMetaData2, echo=TRUE}
MQsdrf001819Setup <- readSampleMetaData(quantMeth="MQ", sdrf="PXD001819", path=path1, suplAnnotFile="summary.txt.gz", abund=dataMQ$quant)
str(MQsdrf001819Setup)
```
However, the recommended and most convenient way is to add/import meta-data directly when importing quantitation-data (eg using _readMaxQuantFile()_, _readProteomeDiscovererFile()_, etc).
## Combining Proteomics Projects {#FuseProteomicsProjects}
If needed, function `fuseProteomicsProjects()` allows combining up to 3 separate data-sets previously imported using wrProteo.
The user should very carefully think how and why he wants to fuse multiple separately imported data-sets, which might have their own charteristics.
Note, the function presented here does not re-normalize the combined data, the user should investigate the data and decide on suitable strategies
for further [normalization](#Normalization).
Data from different software may not contain exactely the same proteins or peptides, only the common identifiers are retained by this approach.
The user should pay attention to which identifyer should be used and that they do not appear multiple times in the the same data-set.
If, however, some IDs appear multiple times (ie as separate lines) in the same data-set, the corresponding numeric data will be summarized to a single line.
This may may have notocable effect on the following biological interpretation.
Thus, it is very important to know your data and to understand when lines that appear with the same identifyers should/may be fused/summarized without
doing damage to the later biological interpretation ! The user may specify for each dataset the colum out of the protein/peptide-annotation to use
via the argument _columnNa_.
Then, this content will be matched as identical match, so when combining data from different software special care shoud be taken !
```{r fuseProteomicsProjects1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
dataMQ <- readMaxQuantFile(path1, specPref=NULL, normalizeMeth="median")
dataMC <- readMassChroQFile(path1, file="tinyMC.RData", tit="Tiny MassChroq Example", plotGraph=FALSE)
dataFused <- fuseProteomicsProjects(dataMQ, dataMC)
str(dataFused$quant)
```
## Normalization {#Normalization}
As mentioned, the aim of normalization is to remove bias in data not linked to the original (biological) question.
The import functions presented above do already by default run global median normalization.
When choosing a normalization procedure one should reflect what additional information may be available to guide normalization.
For example, it may be very useful to exclude classical protein contaminants since they typically do not reflect the original biolocial material.
In overall, it is important to inspect results from normalization, graphical display of histograms, boxplots or violin-plots to compare distributions.
Multiple options exist for normalizing data, please look at the documentation provided with the import-functions introduced above.
Please note, that enrichment experiments (like IP) can quickly pose major problems to the choice of normalization approaches.
The function `normalizeThis()` from the package [wrMisc](https://CRAN.R-project.org/package=wrMisc) is run internally.
It can be used to run additional normalization, if needed.
Different normalization procedures intervene with different 'aggressiveness', ie also with different capacity to change the initial data.
In general, it is suggested to start normalizing using 'milder' procedures, like global median and switch to more intervening methods if initial results seem not satisfactory.
Beware, heavy normalization procedures may also alter the main information you want to analyze.
Ie, some biologically true positive changes may start to fade or disappear when inappropriate normalization gets performed.
Please note, that normalization should be performed before [NA-imputation](#NA-imputation) to avoid introducing new bias in the group of imputed values.
## Imputation of NA-values {#NA-imputation}
In proteomics the quantitation of very low abundances is very challenging.
Proteins which are absent or very low abundances typically appear in the results as 0 or NA.
Frequantly this may be linked to the fact that no peak is detected in a MS-1 chromatogram (for a given LC elution-time) while other samples had a strong peak
at the respective place which led to successful MS-2 identification.
Please note, that the match-between-runs option in the various softwar options allows to considerably reduce the number of NAs.
To simplify the treatment all 0 values are transformed to NA, anyway they would not allow log2 transformation either.
Before replacing NA-values it is important to verify that such values may be associated to absent or very low abundances.
To do so, we suggest to inspect groups of replicate-measurements using `matrixNAinspect()`.
In particular, with multiple technical replicates of the same sample it is supposed that any variability observed is not linked to the sample itself.
So for each NA that occurs in the data we suggest to
look what was reported for the same protein with the other (technical) replicates.
This brings us to the term of 'NA-neighbours' (quantifications for the same protein in replicates).
When drawing histograms of NA-neighbours one can visually inspect and verify that NA-neighbours are typically low abundance values,
however, but not necessarily the lowest values observed in the entire data-set.
```{r NA_MaxQuant, echo=TRUE}
## Let's inspect NA values as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="Histogram of Protein Abundances and NA-Neighbours")
```
So only if the hypothesis of NA-neighbours as typically low abundance values gets confirmed by visual inspection of the histograms,
one may safely proceed to replacing them by low random values.
If one uses a unique (very) low value for NA-replacements, this will quickly pose a problem
at the level of t-tests to look for proteins changing abundance between two or more groups of samples.
Therefore it is common practice to draw random values from a Normal distribution representing this lower end of abundance values.
Nevertheless, the choice of the parameters of this Normal distribution is very delicate.
This package proposes several related strategies/options for NA-imputation.
First, the classical imputation of NA-values using Normal distributed random data is presented.
The mean value for the Normal data can be taken from the median or mode of the NA-neighbour values,
since (in case of technical replicetes) NA-neighbours tell us what these values might have been and thus we model a distribution around.
Later in this vignette, a more elaborate version based on repeated implementations to obtain more robust results will be presented.
The function `matrixNAneighbourImpute()` proposed in this package offers automatic selection of these parameters, which have been tested in a number of different projects.
However, this choice should be checked by critically inspecting the histograms of 'NA-neighbours' (ie successful quantitation in other
replicate samples of the same protein) and the final resulting distribution. Initially all NA-neighbours are extracted.
It is also worth mentioning that in the majority of data-sets encountered, such NA-neighbours form skewed distributions.
The successful quantitation of instances with more than one NA-values per group may be considered even more representative, but of course less sucessfully quntified values remain.
Thus a primary choice is made: If the selection of (min) 2 NA-values per group has more than 300 values, this distribution will be used as base to model
the distribution for drawing random values. In this case, by default the 0.18 quantile of the 2 NA-neighbour distribution will be used as mean for
the new Normal distribution used for NA-replacements. If the number of 2 NA-neighbours is >= 300, (by default) the 0.1 quantile all NA-neighbour values will used.
Of course, the user has also the possibility to use custom choices for these parameters.
The final replacement is done on all NA values. This also includes proteins with are all NA in a given condition as well a instances of mixed successful quantitation and NA values.
```{r NArepl_MaxQuant, echo=TRUE}
## MaxQuant simple NA-imputation (single round)
dataMQimp <- matrixNAneighbourImpute(dataMQ$quant, gr=grp9, tit="Histogram of Imputed and Final Data")
```
However, imputing using normal distributed random data also brings the risk of occasional extreme values.
In the extreme case it may happen that a given protein is all NA in one group, and by chance the random values turn out be rather high.
Then, the final group mean of imputed values may be even higher than the mean of another group with successful quantitations.
Of course in this case it would be a bad interpretation to consider the protein in question upregulated in a sample where all values for this protein were NA.
To circumvent this problem there are 2 options : 1) one may use special filtering schemes to exclude such constellations from final results or 2)
one could repeat replacement of NA-values numerous times.
The function _testRobustToNAimputation()_ allows such repeated replacement of NA-values. For details, see also the following section.
For other packages dealing with missing values (NAs), please also look at the [missing data task-view](https://CRAN.R-project.org/view=MissingData) on CRAN.
## Filtering {#Filtering}
The main aim of filtering in omic-data analysis is to remove proteins/genes/lines which are for obvious reasons not suitable for further analysis.
Invalid or low quality measures are not suitable for good results and may thus be removed.
Frequently additional information is used to justy the procedure of removing certain proteins/genes/lines.
One very common element in filtering is the observation that very low abundance measures are typically less precise than medium or high abundance values.
Thus, a protein/gene with all abundance measures at the very low end of the global scale may as well just seem to change abundance due to the elevated variance of low abundance measures.
However, most statitical tests are not very well prepared for elevated variance of low abundance measures.
In consequence, it is common to remove or disqualify such proteins/genes/lines which are at high risk of yielding false positive results.
In the context of proteomics the number of samples with NAs (non-quantified peptides/proteins) for a given protein/peptide represents also an interesting starting point. If almost all values finally compared are a result of (random) imputation, any apparent change in abundanc of such proteins/peptides lay rather reflect rare stochastic events of NA-imputation.
Please note, that rather aggressive filtering may severly reduce the ability to identify on/off situations which very well may occur in most biological settings.
General filtering can be performed using `presenceFilt()` (from package [wrMisc](https://CRAN.R-project.org/package=wrMisc)).
Other filtering of proteins/peptides/lines based on the annotation (eg for hypothetical proteins etc) may done using _filterLiColDeList()_ (also from package [wrMisc](https://CRAN.R-project.org/package=wrMisc)).
Initial information for filtering is already collected by the import-functions (_readMaxQuantFile()_, readProteomeDiscovererFile()_, _readProlineFile()_, _readOpenMSFile()_ etc..).
Then information for filtering can be used by the function _combineMultFilterNAimput()_ which is integrated to _testRobustToNAimputation()_ (see section below) to conveniently include filtering-aspects.
## Statistical Testing {#StatisticalTesting}
The [t-test](https://en.wikipedia.org/wiki/Student%27s_t-test) remains the main statistical test used, as in many other coases of omics, too.
Statistical testing in the context of proteomics data poses challenges similar to transcriptomics :
Many times the number of replicate-samples is fairly low and the inter-measurement variability quite high.
In some unfortunate cases proteins with rather constant quantities may appear as false positives when searching for proteins who's abundance changes between two groups of samples : If the apparent variability is by chance too low, the respective standard-deviations will be low and a plain t-test may give very enthusiastic p-values.
Besides stringent filtering (previous section of this vignette), the use of shrinkage when estimating the intra-group/replicate variance from the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) turns out very helpful,
see also [Ritchie et al 2015](https://doi.org/10.1093/nar/gkv007).
In this package the function _eBayes()_ has been used and adopted to proteomics.
The function `testRobustToNAimputation()` allows running multiple cycles of NA-imputation and statistical testing with the aim of providing stable imputation and testing results.
It performs NA-imputation and statistical testing (after repeated imputation) between all groups of samples the same time (as it would be inefficient to separate these two tasks). The tests underneath apply shrinkage from the empirical Bayes procedure from the bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html). In addition, various formats of multiple test correction can be directly added to the results : Benjamini-Hochberg FDR, local false discovery rate (lfdr, using the package [fdrtool](https://CRAN.R-project.org/package=fdrtool), see [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209)), or modified testing by [ROTS](https://bioconductor.org/packages/release/bioc/html/ROTS.html), etc ...
The fact that a single round of NA-imputation may provoke false positives as well as false negatives, made it necessary to combine this (iterative) process of NA-imputation and subsequent testing in one single function.
```{r testRobustToNAimputation_MQ1, echo=TRUE}
## Impute NA-values repeatedly and run statistical testing after each round of imputations
testMQ <- testRobustToNAimputation(dataMQ, gr=grp9)
## Example of the data after repeated NA-imputation
head(testMQ$datImp[,1:6])
```
## Data Exploration With Graphical Support {#DataExploreGraphics}
### PCA {#PCA}
Brielfy, principal components analysis ([PCA](https://en.wikipedia.org/wiki/Principal_component_analysis)) searches to decompose the data along all the axises defined by all samples. Then, the axis-combinations with the highest degree of correlation are searched.
In principle one could also run PCA along the rows, ie the proteins, but their number is typically so high that the resultant plots get too crowded.
In the context of high throughput experiments, like proteomics, PCA allows to distinguish important information how the different samples are related (ie similar).
This covers of course the real differences between different biological conditions, but also additional bias introduced as (technical) artifacts.
Thus, such plots serve as well for quality control (in particular to identify outlyer-samples, eg due to degraded material) as well as for the biological interpretation.
Normally one could immediately check the normalized data by PCA before running statistical tests.
As stated in other places, PCA can't handle missing values (ie _NA_ ).
Thus, all proteins having one NA in just one sample won't be considered during PCA.
This would mask a significant number of proteins in numerous of proteomics experiments.
Thus, it may be preferable to run PCA after NA-imputation.
However, since in this package statistical testing was coupled to the repeated NA-imputation, it may be better to use the NA-imputations made for the statistical testing (in the section above).
Here we'll use the function `plotPCAw()` form the package [wrGraph](https://CRAN.R-project.org/package=wrGraph)
```{r PCA1MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
# limit to UPS1
plotPCAw(testMQ$datImp, sampleGrp=grp9, tit="PCA on Protein Abundances (MaxQuant,NAs imputed)", rowTyName="proteins", useSymb2=0)
```
Please note, the vignette dedicated to spike-in experiments ("UPS-1 spike-in Experiments") presents a slightly different way of making PCA-plots for this specific type of experiment/data-set.
### MA-plot {#MAplot}
MA-plots are mainly used for diagnostic purposes. Basically, an [MA-plot](https://en.wikipedia.org/wiki/MA_plot) displays the log-Fold-Change versus the average abundance.
We'll use the function `MAplotW()` from the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
```{r MAplot1, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
# By default this plots at the first of all pairwise questions
MAplotW(testMQ)
```
#####
Now for the second group of pair-wise comparisons, plus adding names of proteins passing threshold:
```{r MAplot2, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
res1 <- NULL
MAplotW(testMQ, useComp=2, namesNBest="passFC")
```
### Volcano-Plot {#VolcanoPlot}
A [Volcano-plot](https://en.wikipedia.org/wiki/Volcano_plot_\(statistics\)) allows to compare the simple fold-change (FC) opposed to the outcome of a statistcal test.
Frequently we can obsereve, that a some proteins show very small FC but enthousiastic p-values and subsequently enthousiastic FDR-values.
However, generally such proteins with so small FC don't get considered as reliable results, therefore it is common practice to add an additional FC-threshold,
typically a 1.5 or 2 fold-change.
The number of proteins retained by pair-wise comparison :
```{r VolcanoPlot1MQ, fig.height=6.5, fig.width=9.5, fig.align="center", echo=TRUE}
## by default the first pairwise comparison is taken
## using the argument 'namesNBest' we can add names from the annotation
VolcanoPlotW(testMQ, useComp=2, namesNBest="passFDR")
```
Additional Note : Volcano-plots may also help identifying bias in the data, in particular, to the question if normalization gave satisfactory results.
Based on the hypothesis of no global change used for normalization, normally, one would expect about the same number of down-regulated as up-regulated proteins.
In fact, this experiment is somehow unusual since one set of samples got a strong increase in abundance for 48 UPS1 proteins while the other proteins remained constant.
Thus, on the global scale there may be a (small) imbalance of abundances and the global median will reflect this, which can create some bias.
So, in this special case it might be better to perform normalization only based on the yeast proteins (which are assumed as constant),
as it has been performed in the vignette 'UPS-1 spike-in Experiments', a vignette which is entirely dedicated to UPS1 spike-in experiments.
## Reporting Results
Tables with results can be either directed created using _VolcanoPlotW()_ or, as shown below, using the function `extractTestingResults()`.
For example, let's look at the first of the pair-wise comparisons (the Volcano-plot above shwed another pait-wise comparison):
The moderated t-test expressed as Benjamini-Hochberg FDR gave `r sum(testMQ$BH[,1] < 0.05,na.rm=TRUE)` proteins with
FDR < 0.05 for the comparison `r colnames(testMQ$BH)[1]`.
Since unfortunately many verly low fold-change instances are amongst the results, one should add an additional filter for too low FC values.
This is common practice in most omics analysis when mostly technical replicates are run and/or the number of replicates is rather low.
```{r results1, echo=TRUE}
res1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
```
After FC-filtering for 2-fold (ie change of protein abundance to double or half) `r nrow(res1)` proteins remain.
```{r results2, echo=TRUE}
knitr::kable(res1[,-1], caption="5%-FDR (BH) Significant results for 1st pairwise set", align="c")
```
Please note that the column-name 'BH' referrs to Benjamini-Hochberg FDR (several other options of multiple testing correction exist, too).
We can see that many UPS1 proteins are, as expected, among the best-ranking differential changes.
However, not all UPS1 proteins do show up in the results as expected, and furthermore, a number of yeast proteins
(however expected to remain constant !) were reported as differential, too.
The function _extractTestingResults()_ also allows to write the data shown above directly to a csv-file.
.
## Further Steps
In case of standard projects one typically would like to find out more about the biological context of the proteins retained at statistical analysis, their function and their interactors.
Such a list of significant proteins from a given project could be tested lateron for enrichment of [GO-functions](https://www.uniprot.org/help/gene_ontology) or for their inter-connectivity in PPI networks like [String](https://string-db.org).
There are multiple tools available on [Bioconductor](https://www.bioconductor.org) and [CRAN](https://cran.r-project.org) as well as outside of R to perform such analysis tasks.
In case of UPS1 spike-in experiments the subsequent analysis is different.
Suggestions for in depth-analysis of UPS1 spike-in are shown in the **vignette 'UPS-1 spike-in Experiments'** of [this package](https://CRAN.R-project.org/package=wrProteo).
.
***
# Protein Annotation
In most 'Omics' activities getting additional annotation may get sometimes a bit tricky.
In Proteomics most mass-spectrometry software will use the informaton provided in the Fasta-file as annotation (typically as provided from UniProt).
But this lacks for example chromosomal location information.
There are are many repositories with genome-, gene- and protein-annotation and most of them are linked, but sometimes the links get broken when
data-base updates are not done everywhere or are not followed by new re-matching.
The Fasta-files used initially for mass-spectrometry peak-identification
may be not completely up to date (sometimes gene- or protein-IDs do change or may even disappear) and thus will contribute to a certain percentage of entries hard to link.
Globally two families of strategies for adding annotation exist :
a) Strategies using online-based ressources for getting the most up-to-date information/annotation (like [biomaRt](https://bioconductor.org/packages/release/bioc/html/biomaRt.html)).
Despite the advantage of most up-to-date information there may be some downsides :
Interogating databases may require more time to run all queries via interet-connections and this strategy is vulnerable to broken links
(eg linked to the delay of updates between different types of databases that may need to get combined).
Furthermore, the results typically may change a bit when the queries get repeated (in particular when this contains hypothetical peptides, pseudogenes etc).
When combining multiple interconnected ressources it may be very tricky to document the precise version of all individual ressources used.
b) Strategies based on using (local copies of) defined versions of databases.
Frequently, these databases can get downloaded/installed locally and thus allow faster queries and guarantee of repeatability and comparability to other tools or studies.
A number of databases are available on [Bioconductor](https://www.bioconductor.org) formatted for R.
Besides, the tables from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) are another option (which will be used here).
Note, that tracking version-numbers may be much easier using this approach based on defined versions of databases.
And finally results are 100% reproducible when the same versions much easier to document are used.
In the context of adding chromosomal annotation to a list of proteins here the following concept is developed :
Annotation-tables from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) are available for a decent number of species and can be downloaded for conventient off-line search.
However, in the context of less common species we realized that the UniProt tables from UCSC had many times low yield in final matching.
For this reason we propose the slightly more complicated route that provided finally a much higher success-rate to find chromosomal locations for a list of UniProt IDs.
First one needs to acces/download from [UCSC](https://genome.ucsc.edu/cgi-bin/hgTables) the table corresponding to the species of question fields _'clade','genome','assembly'_).
For _'group'_ choose 'Genes and Gene Predictions' and for _'track'_ choose 'Ensembl Genes', as table choose 'ensGene'.
In addition, it is possible to select either the entire genome-annotation or user-specified regions.
In terms of 'output-format' one may choose 'GTF' (slightly more condensed, no headers) or 'all filds from selected table'.
The following strategy for adding genomic location data using this package is presented here :
Locate (& download) organism annotation from UCSC, read into R (_readUCSCtable()_ ) -> from R export (non-redundant) 'enst'-IDs (still using _readUCSCtable()_ ),
get corresponding UniProt-IDs at UniProt site, save as file and import result into R (_readUniProtExport()_ ) -> (in R) combine with initial UCSC table (_readUniProtExport()_ ) .
The function `readUCSCtable()` is able to read such files downloaded from UCSC, compressed .gz files can be read, too (like in the example below).
In the example below we'll just look at chromosome 11 of the human genome - to keep this example small.
```{r readUCSC1, echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
gtfFi <- file.path(path1, "UCSC_hg38_chr11extr.gtf.gz")
UcscAnnot1 <- readUCSCtable(gtfFi)
# The Ensemble transcript identifyers and their chromosomal locations :
head(UcscAnnot1)
```
However, this annotation does not provide protein IDs. In order to obtain the corresponding protein IDs an additional step is required :
Here we will use the batch search/conversion tool from [UniProt](https://www.uniprot.org/id-mapping).
In order to do so, we can export directly from _readUCSCtable()_ a small text-file which can be fed into the UniProt batch-search tool.
```{r readUCSC2, echo=TRUE}
# Here we'll redo reading the UCSC table, plus immediatley write the file for UniProt conversion
# (in this vignette we write to tempdir() to keep things tidy)
expFi <- file.path(tempdir(),"deUcscForUniProt2.txt")
UcscAnnot1 <- readUCSCtable(gtfFi, exportFileNa=expFi)
```
Now everything is ready to go to [UniProt](https://www.uniprot.org/id-mapping) for retrieving the corresponding UniProt-IDs.
Since we exported Ensemble transcript IDs (ENSTxxx), select converting from 'Ensembl Transcript' to 'UniProtKB'.
Then, when downloading the conversion results, choose tab-separated file format (compression is recommended), this may take several seconds (depending on the size).
It is suggested to rename the downloaded file so one can easily understand its content.
Note, that the function `readUniProtExport()` can also read .gz compressed files.
To continue this vignette we'll use a result which has been downloaded from [UniProt](https://www.uniprot.org/id-mapping) and renamed to 'deUniProt_hg38chr11extr.tab'.
One may also optionally define a specific genomic region of interest using the argument 'targRegion', here the entire chromosome 11 was chosen.
```{r readUniProt1, echo=TRUE}
deUniProtFi <- file.path(path1, "deUniProt_hg38chr11extr.tab")
deUniPr1 <- readUniProtExport(UniP=deUniProtFi, deUcsc=UcscAnnot1, targRegion="chr11:1-135,086,622")
str(deUniPr1)
```
The resulting data.frame (ie the column 'UniProtID') may be used to complement protein annotation after importing mass-spectrometry peak- and protein-identification results.
Obviously, using recent Fasta-files from UniProt for protein-identification will typically give better matching at the end.
You may note that sometimes Ensemble transcript IDs are written as 'enst00000410108' whereas at other places it may be written as 'ENST00000410108.5'.
The function _readUniProtExport()_ switches to a more flexible search mode stripping of version-numbers and reading all as lower-caps, if initial direct matching reveals less than 4 hits.
Finally, it should be added, that of course several other ways of retrieving annotation do exist, too.
For example, as mentioned above, [Bioconductor](https://www.bioconductor.org)) offers serval packages dedicated to gene- and protein-annotation.
# Appendix
## Acknowledgements
The author would like to acknowledge the support by the [IGBMC](https://www.igbmc.fr) (CNRS UMR 7104, Inserm U 1258, UdS), [CNRS](http://www.cnrs.fr/en), [Université de Strasbourg (UdS)](https://www.unistra.fr) and [Inserm](https://www.inserm.fr).
All collegues from the [proteomics platform](https://proteomics.igbmc.fr) at the IGBMC work very commited to provide high quality mass-spectrometry data (including some of those used here).
The author wishes to thank the [CRAN-staff](https://CRAN.R-project.org) for all their help with new entries and their efforts in maintaining this repository of R-packages.
Furthermore, many very fruitful discussions with colleages on national and international level have helped to improve the tools presented here.
Thank you for you interest. This package is constantly evolving, new featues/functions may get added to the next version.
## Session-Info
For completeness :
\small
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/vignettes/wrProteoVignette1.Rmd
|
---
title: "Analyzing Proteomics UPS1 Spike-in Experiments (Example Ramus 2016 Dataset)"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{UPS1 spike-in Experiments}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This vignette complements the _more basic vignette_ **'Getting started with wrProteo'** also from this package and shows in more detail how [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)
_spike-in_ experiments may be analyzed, using this package ([wrProteo](https://CRAN.R-project.org/package=wrProteo)),
[wrMisc](https://CRAN.R-project.org/package=wrMisc), [wrGraph](https://CRAN.R-project.org/package=wrGraph) and
[RColorBrewer](https://CRAN.R-project.org/package=RColorBrewer).
All these packages are available on CRAN.
Furthermore, the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html) will be used internally for it's moderated statistical testing.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r install, echo=TRUE, eval=FALSE}
## This is R code, you can run this to redo all analysis presented here.
## If not already installed, you'll have to install wrMisc and wrProteo first.
install.packages("wrMisc")
install.packages("wrProteo")
## These packages are used for the graphics
install.packages("wrGraph")
install.packages("RColorBrewer")
## Installation of limma from Bioconductor
if(!requireNamespace("BiocManager", quietly=TRUE)) install.packages("BiocManager")
BiocManager::install("limma")
## You cat also see all vignettes for this package by typing :
browseVignettes("wrProteo") # ... and the select the html output
```
As you will see in the interactive window from _browseVignettes()_, [this package](https://CRAN.R-project.org/package=wrProteo) has 2 vignettes,
a more general introductory vignette (mentioned above) and this UPS-1 dedicated vignette.
Now let's load the packages needed :
```{r setup, echo=TRUE, messages=FALSE, warnings=FALSE}
## Let's assume this is a fresh R-session
library(knitr)
library(wrMisc)
library(wrGraph)
library(wrProteo)
# Version number for wrProteo :
packageVersion("wrProteo")
```
### Experimental Setup For Benchmark Tests
The main aim of the experimental setup using heterologous _spike-in_ experiments is to provide a framework to test identification and quantitation procedures in proteomics.
By mixing known amounts of a collection of human proteins ([UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)) in various concentrations on top of a constant level yeast total protein extract,
one expects to find only the spiked human UPS1 proteins varying between samples.
In terms of ROC curves (see also [ROC on Wikipedia](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)) the _spike-in_ proteins are expected to show up as true positives (TP).
In contrast, all yeast proteins were added in the same quantity to same samples and should thus be observed as constant, ie as true negatives (TN) when looking for proteins changing abundance.
The specific dataset used here (seen also next section) is not that recent, thus,
for addressing scientific questions concerning comparison and choice of quantification software it may be better to use similar buut more recent datasets
The main aim of this vignette is to show the possibilities _how_ such comparisons can be performed using [wrProteo](https://CRAN.R-project.org/package=wrProteo).
### The Ramus Data-Set
The data used in this vignette was published with the article : [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011)
"Benchmarking quantitative label-free LC-MS data processing workflows using a complex spiked proteomic standard dataset" in J Proteomics 2016 Jan 30;132:51-62.
This dataset is available on PRIDE as [PXD001819](https://www.ebi.ac.uk/pride/archive/projects/PXD001819) (and on ProteomeXchange).
Briefly, this experiment aims to evaluate and compare various quantification appoaches of the heterologous _spike-in_ [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1)
(available from Sigma-Aldrich) in yeast protein extracts as constant matrix. 9 different concentrations of the heterologous _spike-in_ (UPS1) were run in triplicates.
The proteins were initially digested by Trypsin and then analyzed by LC-MS/MS in DDA mode.
As described in more detail in the reference, this dataset was generated using a LTQ-Orbitrap, in the meantime more powerful and precises mass-spectrometers have become avialable.
Thus, scientific questions about the comparison and choice of quantification software may be better addressed using more recent datasets.
### Meta-Data Describing The Experiment (sdrf)
The project [Proteomics Sample Metadata Format](https://github.com/bigbio/proteomics-sample-metadata) aims to provide a framework
of providing a uniform format for documenting experimental meta-data (sdrf-format).
The meta-data for experiments already integrated can be directly read/accessed from [wrProteo](https://CRAN.R-project.org/package=wrProteo).
Either you download the meta-data as file 'sdrf.tsv' from [Pride/PXD001819](https://www.ebi.ac.uk/pride/archive/projects/PXD001819), or you may read file 'PXD001819.sdrf.tsv' directly from [github/bigbio](https://github.com/bigbio/proteomics-sample-metadata/blob/master/annotated-projects/PXD001819/PXD001819.sdrf.tsv).
```{r metaData1, echo=TRUE}
## Read meta-data from github.com/bigbio/proteomics-metadata-standard/
pxd001819meta <- readSdrf("PXD001819")
## The concentration of the UPS1 spike-in proteins in the samples
if(length(pxd001819meta) >0) {
UPSconc <- sort(unique(as.numeric(wrMisc::trimRedundText(pxd001819meta$characteristics.spiked.compound.)))) # trim to get to 'essential' info
} else {
UPSconc <- c(50, 125, 250, 500, 2500, 5000, 12500, 25000, 50000) # in case access to github failed
}
```
The import function used furtheron in this vignette can directly download this metadata if the PXD-accession-number is provided.
### Key Elements And Additional Functions
\small
```{r functions1, echo=TRUE}
## A few elements and functions we'll need lateron
methNa <- c("ProteomeDiscoverer","MaxQuant","Proline")
names(methNa) <- c("PD","MQ","PL")
## The accession numbers for the UPS1 proteins
UPS1 <- data.frame(ac=c("P00915", "P00918", "P01031", "P69905", "P68871", "P41159", "P02768", "P62988",
"P04040", "P00167", "P01133", "P02144", "P15559", "P62937", "Q06830", "P63165",
"P00709", "P06732", "P12081", "P61626", "Q15843", "P02753", "P16083", "P63279",
"P01008", "P61769", "P55957", "O76070", "P08263", "P01344", "P01127", "P10599",
"P99999", "P06396", "P09211", "P01112", "P01579", "P02787", "O00762", "P51965",
"P08758", "P02741", "P05413", "P10145", "P02788", "P10636-8", "P00441", "P01375"),
species=rep("Homo sapiens", 48),
name=NA)
```
\small
```{r functions2, echo=TRUE}
## additional functions
replSpecType <- function(x, annCol="SpecType", replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")), silent=TRUE) {
## rename $annot[,"SpecType"] to more specific names
chCol <- annCol[1] %in% colnames(x$annot)
if(chCol) { chCol <- which(colnames(x$annot)==annCol[1])
chIt <- replBy[,1] %in% unique(x$annot[,chCol]) # check items to replace if present
if(any(chIt)) for(i in which(chIt)) {useLi <- which(x$annot[,chCol] %in% replBy[i,1]); cat("useLi",head(useLi),"\n"); x$annot[useLi,chCol] <- replBy[i,2]}
} else if(!silent) message(" replSpecType: 'annCol' not found in x$annot !")
x }
plotConcHist <- function(mat, ref, refColumn=3:4, matCluNa="cluNo", lev=NULL, ylab=NULL, tit=NULL) {
## plot histogram like counts of UPS1 concentrations
if(is.null(tit)) tit <- "Frequency of UPS1 Concentrations Appearing in Cluster"
gr <- unique(mat[,matCluNa])
ref <- ref[,refColumn]
if(length(lev) <2) lev <- sort(unique(as.numeric(as.matrix(ref))))
if(length(ylab) !=1) ylab <- "Frequency"
tbl <- table(factor( as.numeric(ref[which(rownames(ref) %in% rownames(mat)),]), levels=lev))
graphics::barplot(tbl, las=1, beside=TRUE, main=paste(tit,gr), col=grDevices::gray(0.8), ylab=ylab)
}
plotMultRegrPar <- function(dat, methInd, tit=NULL, useColumn=c("logp","slope","medAbund","startFr"), lineGuide=list(v=c(-12,-10),h=c(0.7,0.75),col="grey"), xlim=NULL,ylim=NULL,subTit=NULL) {
## scatter plot logp (x) vs slope (y) for all UPS proteins, symbol by useColumn[4], color by hist of useColumn[3]
## dat (array) UPS1 data
## useColumn (character) 1st as 'logp', 2nd as 'slope', 3rd as median abundance, 4th as starting best regression from this point
fxNa <- "plotMultRegrPar"
#fxNa <- wrMisc::.composeCallName(callFrom,newNa="plotMultRegrPar")
if(length(dim(dat)) !=3) stop("invalid input, expecting as 'dat' array with 3 dimensions (proteins,Softw,regrPar)")
if(any(length(methInd) >1, methInd > dim(dat)[2], !is.numeric(methInd))) stop("invalid 'methInd'")
chCol <- useColumn %in% dimnames(dat)[[3]]
if(any(!chCol)) stop("argument 'useColumn' does not fit to 3rd dim dimnames of 'dat'")
useCol <- colorAccording2(dat[,methInd,useColumn[3]], gradTy="rainbow", revCol=TRUE, nEndOmit=14)
graphics::plot(dat[,methInd,useColumn[1:2]], main=tit, type="n",xlim=xlim,ylim=ylim) #col=1, bg.col=useCol, pch=20+lmPDsum[,"startFr"],
graphics::points(dat[,methInd,useColumn[1:2]], col=1, bg=useCol, pch=20+dat[,methInd,useColumn[4]],)
graphics::legend("topright",paste("best starting from ",1:5), text.col=1, pch=21:25, col=1, pt.bg="white", cex=0.9, xjust=0.5, yjust=0.5)
if(length(subTit)==1) graphics::mtext(subTit,cex=0.9)
if(is.list(lineGuide) & length(lineGuide) >0) {if(length(lineGuide$v) >0) graphics::abline(v=lineGuide$v,lty=2,col=lineGuide$col)
if(length(lineGuide$h) >0) graphics::abline(h=lineGuide$h,lty=2,col=lineGuide$col)}
hi1 <- graphics::hist(dat[,methInd,useColumn[3]], plot=FALSE)
wrGraph::legendHist(sort(dat[,methInd,useColumn[3]]), colRamp=useCol[order(dat[,methInd,useColumn[3]])][cumsum(hi1$counts)],
cex=0.5, location="bottomleft", legTit="median raw abundance") #
}
```
\normalsize
## Protein Identification and Initial Quantification
Multiple algorithms and software implementations have been developed for quantitation label-free proteomics experiments,
in particular for extracted ion chromatograms (XIC). For background information you may look at
[Wikipedia labell-free Proteomics](https://en.wikipedia.org/wiki/Label-free_quantification).
Here, the use of the output for 3 such implementations for extracting peptide/protein quantifications is shown.
These 3 software implementations were run individually using equivalent settings, ie identifcation based on the same fasta-database,
starting at a single peptide with 1% FDR, MS mass tolerance for ion precursors at 0.7 ppm,
oxidation of methionins and N-terminal acetylation as fixed as well as carbamidomethylation of cysteins as variable modifications.
Since in this context it is crucial to recognize all UPS1 proteins as such, the import-functions make use of the _specPref_ argument, allowing to define custom tags.
Most additional arguments to the various import-functions have been kept common for conventient use and for generating output structured the same way.
Indeed, simply separating proteins by their species origin is not sufficient since common contaminants
like human keratin might get considered by error as UPS1.
### MaxQuant {#ReadMaxQuant}
[MaxQuant](https://www.maxquant.org) is free software provided by the [Max-Planck-Institute](https://www.biochem.mpg.de/en),
see also [Tyanova et al 2016](https://doi.org/10.1038/nprot.2016.136).
Later in this document data from MaxQuant will by frequently abbreviated as **MQ**.
Typically [MaxQuant](https://www.maxquant.org) exports quantitation data on level of consensus-proteins by default to a folder called _txt_ with a file called *"proteinGroups.txt"* .
So in a standard case (when the file name has not been changed manually) it is sufficient to provide the path to this file.
Of course, you can explicitely point to a specific file, as shown below.
With the data presented here MaxQuant version 1.6.10 was run.
Files compressed as .gz can be read, too (like in the example below).
```{r readMaxQuant, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaMQ <- "proteinGroups.txt.gz"
## We need to define the setup of species
specPrefMQ <- list(conta="CON_|LYSC_CHICK", mainSpecies="OS=Saccharomyces cerevisiae", spike=UPS1$ac)
dataMQ <- readMaxQuantFile(path1, file=fiNaMQ, specPref=specPrefMQ, refLi="mainSpe",
sdrf=c("PXD001819","max"), suplAnnotFile=TRUE, plotGraph=FALSE)
```
The data were imported, log2-transformed and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
The species anotation was extracted out of the fasta-headers, as given in the _specPref_ argument (MaxQuant specific setting).
As explained in more detail in the general vignette [wrProteoVignette1](https://CRAN.R-project.org/package=wrProteo),
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
Please note, that in the example above we directly added information about the experimental setup from the _sdrf_ repository.
```{r readMaxQuant2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataMQ$quant)
## A quick summary of some columns of quantitation data
summary(dataMQ$quant[,1:7]) # the first 8 cols
table(dataMQ$annot[,"SpecType"], useNA="always")
```
Now we can summarize the presence of UPS1 proteins after treatment by MaxQuant :
In sum, `r sum(UPS1$ac %in% dataMQ$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataMQ$annot[,1])` are missing.
### ProteomeDiscoverer {#ReadProteomeDiscoverer}
[ProteomeDiscoverer](https://www.thermofisher.com/order/catalog/product/OPTON-30812) is commercial software from ThermoFisher (www.thermofisher.com).
Later in this document data from ProteomeDiscoverer will by frequently abbreviated as **PD**.
With the data used here, the identification was performed using the XCalibur module of ProteomeDiscoverer version 2.4 .
Quantitation data at the level of consensus-proteins can be exported to tabulated text files, which can be treated by the function shown below.
The resultant data were export in tablulated format and the file automatically named '\_Proteins.txt_' by ProteomeDiscoverer (the option R-headers was checked, however this option is not mandatory).
Files compressed as .gz can be read, too (like in the example below).
```{r readProteomeDiscoverer1, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaPd <- "pxd001819_PD24_Proteins.txt.gz"
## Next, we define the setup of species
specPrefPD <- list(conta="Bos tauris|Gallus", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPD <- readProteomeDiscovererFile(file=fiNaPd, path=path1, refLi="mainSpe", specPref=specPrefPD,
sdrf=c("PXD001819","max"), plotGraph=FALSE)
```
The data were imported, log2-transformed and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
Please note, that quantitation data exported from ProteomeDiscoverer frequently have very generic column-names (increasing numbers).
When calling the import-function they can be replaced by more meaningful names either using the argument _sampNa_,
or from reading the default annotation in the file _'InputFiles.txt'_ or, finally, from the sdrf-annotation.
In the example below both the default annotation as file _'InputFiles.txt'_ and _sdrf_ annotation are available and were integrated to object produced by the import-function.
The species anotation was extracted out as given in the _specPref_ argument.
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
```{r readProteomeDiscoverer2, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataPD$quant)
## A quick summary of some columns of quantitation data
summary(dataPD$quant[,1:7]) # the first 8 cols
table(dataPD$annot[,"SpecType"], useNA="always")
```
Confirming the presence of UPS1 proteins by ProteomeDiscoverer:
Now we can summarize the presence of UPS1 proteins after treatment by ProteomeDiscoverer :
In sum, `r sum(UPS1$ac %in% dataPD$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataPD$annot[,1])` are missing.
### Proline
[Proline](http://www.profiproteomics.fr/proline/) is open-source software provided by the [Profi-consortium](https://www.profiproteomics.fr)
(see also [proline-core on github](https://github.com/profiproteomics/proline-core)), published by [Bouyssie et al 2020](https://doi.org/10.1093/bioinformatics/btaa118).
Later in this document data from Proline will by frequently abbreviated as **PL**.
Protein identification in Proline gets performed by [SearchGUI](http://compomics.github.io/projects/searchgui), see also [Vaudel et al 2015](https://doi.org/10.1002/pmic.201000595).
In this case [X!Tandem](https://www.thegpm.org/TANDEM/) (see also [Duncan et al 2005](https://doi.org/10.1021/pr050058i)) was used as search engine.
Quantitation data at the level of consensus-proteins can be exported from [Proline](http://www.profiproteomics.fr/proline/)
as _.xlsx_ or tabulated text files, both formats can be treated by the import-functions shown below.
Here, Proline version 1.6.1 was used with addition of Percolator (via MS-Angel from the same authors).
```{r readProline, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
path1 <- system.file("extdata", package="wrProteo")
fiNaPl <- "pxd001819_PL.xlsx"
specPrefPL <- list(conta="_conta", mainSpecies="Saccharomyces cerevisiae", spike=UPS1$ac)
dataPL <- readProlineFile(fiNaPl, path=path1, specPref=specPrefPL, normalizeMeth="median", refLi="mainSpe",
sdrf=c("PXD001819","max"), plotGraph=FALSE)
```
The (log2-transformed) data were imported and median-normalized, the protein annotation was parsed to automatically extract IDs, protein-names and species information.
The species anotation was extracted out of protein annotation columns, as specified with the _specPref_ argument.
As explained in more detail in the general vignette [wrProteoVignette1](https://CRAN.R-project.org/package=wrProteo),
In this example we use only proteins annotated as _Homo sapiens_ for determining the normalization-factors via the argument _refLi_.
If you wish to inspect the graphs for the distribution of abundance values for each sample before and after median-normalization, please set the argument _plotGraph=TRUE_ (default).
Please note, that in the example above we directly added information about the experimental setup from the _sdrf_ repository.
In addition, we need to correct the quantification column-heads (like 'Levure2ug+ UPS1-100amol') and bring them to a simpler version :
Here, both the default annotation from the _xlsx_ and sdrf annotation are available and were integrated to object produced by the import-function.
```{r postTreatmPL, echo=TRUE}
head(colnames(dataPL$raw), 7)
dataPL <- cleanListCoNames(dataPL, rem=c("Levure2ug+ UPS1-"), subst=cbind(c("fmol","mol-"), c("000amol","mol_R")), mathOper="/2")
## let's check the result
head(colnames(dataPL$raw),8)
```
```{r readProlineInfo, fig.height=8, fig.width=9.5, fig.align="center", echo=TRUE}
## The number of lines and colums
dim(dataPL$quant)
## A quick summary of some columns of quantitation data
summary(dataPL$quant[,1:8]) # the first 8 cols
table(dataPL$annot[,"SpecType"], useNA="always")
```
Now we can summarize the presence of UPS1 proteins after treatment by Proline :
In sum, `r sum(UPS1$ac %in% dataPL$annot[,1])` UPS1 proteins were found, `r sum(!UPS1$ac %in% dataPL$annot[,1])` are missing.
### Uniform Re-Arranging Of Data
For easy and proper comparisons we need to make sure all columns are in the same order, however using different software, this is not immediately the case.
The basic names of the groups have already been figured out using the sample meta-data, notably the sdfr.
```{r rearrange1, echo=TRUE}
## bring all results (MaxQuant,ProteomeDiscoverer, ...) in same ascending order
## as reference will use the order from ProteomeDiscoverer, it's output is already in a convenient order
sampNa <- colnames(dataPD$quant)
## it is more convenient to re-order columns this way in each project
dataPD <- corColumnOrder(dataPD, sampNames=sampNa) # already in good order
dataMQ <- corColumnOrder(dataMQ, replNames=paste0("UPS1_",sub("amol_", "amol_R", colnames(dataMQ$quant))), sampNames=sampNa) # incl canged names
dataPL <- corColumnOrder(dataPL, replNames=paste0("UPS1_",colnames(dataPL$quant)), sampNames=sampNa) # incl canged names
```
At import we made use of the argument _specPref_ (specifying '_mainSpecies_', '_conta_' and '_spike_') which allows to build categories based on searching keywords based on the initial annotation.
In turn, we obtain the labels :
'_main Spe_' for yeast (ie matrix), '_species2_' for the UPS1 (ie spike) 'conta' for contaminants.
Let's replace the first two generic terms by more specific ones (ie '_Yeast_' and '_UPS1_') :
```{r postTreatm1, echo=TRUE}
## Need to rename $annot[,"SpecType"]
dataPD <- replSpecType(dataPD, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataMQ <- replSpecType(dataMQ, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
dataPL <- replSpecType(dataPL, replBy=cbind(old=c("mainSpe","species2"), new=c("Yeast","UPS1")))
## Need to address missing ProteinNames (UPS1) due to missing tags in Fasta
dataPD <- replMissingProtNames(dataPD)
dataMQ <- replMissingProtNames(dataMQ)
dataPL <- replMissingProtNames(dataPL)
table(dataPD$annot[,"SpecType"])
## synchronize order of groups
(grp9 <- dataMQ$sampleSetup$level)
names(grp9) <- rep(paste0(UPSconc,"amol"), each=3)
dataPL$sampleSetup$groups <- dataMQ$sampleSetup$groups <- dataPD$sampleSetup$groups <- grp9 # synchronize order of groups
```
```{r postTreatmCheck, echo=TRUE}
## extract names of quantified UPS1-proteins
NamesUpsPD <- dataPD$annot[which(dataPD$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsMQ <- dataMQ$annot[which(dataMQ$annot[,"SpecType"]=="spike"), "Accession"]
NamesUpsPL <- dataPL$annot[which(dataPL$annot[,"SpecType"]=="spike"), "Accession"]
```
```{r postTreatmTables, echo=TRUE}
tabS <- mergeVectors(PD=table(dataPD$annot[,"SpecType"]), MQ=table(dataMQ$annot[,"SpecType"]), PL=table(dataPL$annot[,"SpecType"]))
tabT <- mergeVectors(PD=table(dataPD$annot[,"Species"]), MQ=table(dataMQ$annot[,"Species"]), PL=table(dataPL$annot[,"Species"]))
tabS[which(is.na(tabS))] <- 0
tabT[which(is.na(tabT))] <- 0
kable(cbind(tabS[,2:1], tabT), caption="Number of proteins identified, by custom tags, species and software")
```
The initial fasta file also contained the yeast strain number, this has been stripped off when using default parameters.
------
## Basic Data Treatment
### Structure of Experiment
The global structure of experiments can be provided as sdrf-file and/or from meta-data stored with the experimental data read.
For convenience, this information about the groups of replicates was already deduced and can be found (for example) in _dataMQ$sampleSetup$sdrf_.
```{r metaData2, echo=TRUE}
kable(cbind(dataMQ$sampleSetup$sdrf[,c(23,7,19,22)], groups=dataMQ$sampleSetup$groups))
```
### Normalization
No additional normalization is needed, all data were already median normalized to the host proteins (ie _Saccharomyces cerevisiae_) after importing the
initial quantification-output using '_readMaxQuantFile()_', '_readProlineFile()_' and '_readProteomeDiscovererFile()_'.
### Presence of NA-values
As mentioned in the general vignette of [this package](https://CRAN.R-project.org/package=wrProteo), 'wrProteoVignette1',
it is important to investigate the nature of NA-values.
In particular, checking the hypothesis that NA-values originate from very low abundance instances is very important for deciding how to treat NA-values furtheron.
```{r NA_ProteomeDiscoverer, echo=TRUE}
## Let's inspect NA values from ProteomeDiscoverer as graphic
matrixNAinspect(dataPD$quant, gr=grp9, tit="ProteomeDiscoverer")
```
```{r NA_MaxQuant, echo=TRUE}
## Let's inspect NA values from MaxQuant as graphic
matrixNAinspect(dataMQ$quant, gr=grp9, tit="MaxQuant")
```
```{r NA_Proline, echo=TRUE}
## Let's inspect NA values from Proline as graphic
matrixNAinspect(dataPL$quant, gr=grp9, tit="Proline")
```
A key element to understand the nature of NA-value is to investigate their NA-neighbours.
If a given protein has for just one of the 3 replicates an NA, the other two valid quantifications can be considered as NA-neighbours.
In the figures above all NA-neighbours are shown in the histogram and their mode is marked by an arrow.
One can see, that NA-neighbours are predominantely (but not exclusively) part of the lower quantitation values.
This supports the hypothesis that NAs occur most frequently with low abundance proteins.
### NA-Imputation and Statistical Testing for Changes in Abundance
NA-values represent a challange for statistical testing. In addition, techniques like PCA don't allow NAs, neither.
The number of NAs varies between samples : Indeed, very low concentrations of UPS1 are difficult to get detected and contribute largely to the NAs (as we will see later in more detail).
Since the amout of yeast proteins (ie the matrix in this setup) stays constant across all samples, yeast proteins should always get detected the same way.
```{r nNA1, echo=TRUE}
## Let's look at the number of NAs. Is there an accumulated number in lower UPS1 samples ?
tabSumNA <- rbind(PD=sumNAperGroup(dataPD$raw, grp9), MQ=sumNAperGroup(dataMQ$raw, grp9), PL=sumNAperGroup(dataPL$raw, grp9) )
kable(tabSumNA, caption="Number of NAs per group of samples", align="r")
```
In the section above we investigated the circumstances of NA-instances and provided evidence that NA-values typically represent proteins with low abundance which frequently ended up as non-detectable (NA).
Thus, we hypothesize that (in most cases) NA-values might also have been detected in quantities like their NA-neighbours.
In consequence, we will model a normal distribution based on the NA-neighbours and use for substituting.
The function `testRobustToNAimputation()` from this package (wrProteo) allows to perform NA-imputation and subsequent statistical testing (after repeated imputation) between all groups of samples (see also the general vignette).
One of the advantages of this implementation, is that multiple rounds of imputation are run, so that final results (including pair-wise testing) get stabilized to (rare) stochastic effects. For this reason one may also speak of stabilized NA-imputations.
The statistical tests used underneith make use of the shrinkage-procedure provided from the empirical Bayes procedure as implemented to the Bioconductor package [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), see also [Ritchie et al 2015](https://doi.org/10.1093/nar/gkv007).
In addition, various formats of multiple testing correction can be added to the results : Benjamini-Hochberg FDR (lateron referred to as BH or BH-FDR, see [FDR on Wikipedia](https://en.wikipedia.org/wiki/False_discovery_rate), see also [Benjamini and Hochberg 1995](https://mathscinet.ams.org/mathscinet-getitem?mr=1325392)), local false discovery rate (lfdr, using the package [fdrtool](https://CRAN.R-project.org/package=fdrtool), see [Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209)), or modified testing by [ROTS](https://bioconductor.org/packages/release/bioc/html/ROTS.html), etc ... In this vignette we will make use of the BH-FDR.
We are ready to launch the NA-imputation and testing for data from ProteomeDiscoverer.
Please note, that the procedure including repetive NA-imputations may take a few seconds.
```{r testProteomeDiscoverer, echo=TRUE}
testPD <- testRobustToNAimputation(dataPD, imputMethod="informed") # ProteomeDiscoverer
```
Then for MaxQuant ...
```{r testMaxQuant, echo=TRUE}
testMQ <- testRobustToNAimputation(dataMQ, imputMethod="informed") # MaxQuant , ok
```
And finally for Proline :
```{r testProline, echo=TRUE}
testPL <- testRobustToNAimputation(dataPL, imputMethod="informed") # Proline
```
From these results we'll use i) the NA-imputed version of our datasets for plotting principal components (PCA) and ii) the (stabilized) testing results for counting TP, FP, etc and to construct ROC curves.
Let's add the NA-imputed data to our main object :
```{r testReorganize1, echo=TRUE}
dataPD$datImp <- testPD$datImp # recuperate imputeded data to main data-object
dataMQ$datImp <- testMQ$datImp
dataPL$datImp <- testPL$datImp
```
------
## Analysis Using All Proteins Identified (Matrix + UPS1)
In this section we'll consider all proteins identified and quantified in a pair-wise fashion, using the t-tests already run in the previous section.
As mentioned, the experimental setup is very special, since all proteins that are truly changing are known in advance (the UPS1 _spike-in_ proteins).
Tables get constructed by counting based on various thresholds for considering given protein abundances as differential or not.
A traditional 5 percent FDR cut-off is used for Volcano-plots, while ROC-curves allow inspecting the entire range of potential cut-off values.
### Pairwise Testing Summary
A very universal and simple way to analyze data is by checking as several pairwise comparisons, in particular, if the experimental setup does not include complete multifactorial plans.
This [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) _spike-in_ experiment has `r ncol(dataPD$quant)` samples organized (according to meta-information) as `r length(UPSconc)` groups.
Thus, one obtains in total `r ncol(testPD$BH)` pair-wise comparisons which will make comparisons very crowded.
The publication by [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011) focussed on 3 pairwise comparisons only.
In this vignette it is shown how all of them can get considered.
Now, we'll construct a table showing all possible pairwise-comparisons. Using the function *numPairDeColNames()* we can easily extract the UPS1 concentrations as numeric content and show the (log-)ratio of the pairwise comparisons (column 'log2rat'), the final concentrations (columns 'conc1' and 'conc2', in amol) and the number of differentially abundant proteins passing 5% FDR (using classical Benjamini-Hochberg FDR (columns 'sig.xx.BH') or lfdr
([Strimmer 2008](https://doi.org/10.1093/bioinformatics/btn209), columns 'sig._xx_.lfdr' ).
```{r pairWise2, echo=TRUE}
## The number of differentially abundant proteins passing 5% FDR (ProteomeDiscoverer and MaxQuant)
signCount <- cbind( sig.PD.BH=colSums(testPD$BH < 0.05, na.rm=TRUE), sig.PD.lfdr=if("lfdr" %in% names(testPD)) colSums(testPD$lfdr < 0.05, na.rm=TRUE),
sig.MQ.BH=colSums(testMQ$BH < 0.05, na.rm=TRUE), sig.MQ.lfdr=if("lfdr" %in% names(testMQ)) colSums(testMQ$lfdr < 0.05, na.rm=TRUE),
sig.PL.BH=colSums(testPL$BH < 0.05, na.rm=TRUE), sig.PL.lfdr=if("lfdr" %in% names(testPL)) colSums(testPL$lfdr < 0.05, na.rm=TRUE) )
table1 <- numPairDeColNames(testPD$BH, stripTxt="amol", sortByAbsRatio=TRUE)
table1 <- cbind(table1, signCount[table1[,1],])
rownames(table1) <- colnames(testMQ$BH)[table1[,1]]
kable(table1, caption="All pairwise comparisons and number of significant proteins", align="c")
```
```{r check2, echo=TRUE}
resMQ1 <- extractTestingResults(testMQ, compNo=1, thrsh=0.05, FCthrs=2)
resPD1 <- extractTestingResults(testPD, compNo=1, thrsh=0.05, FCthrs=2)
resPL1 <- extractTestingResults(testPL, compNo=1, thrsh=0.05, FCthrs=2)
```
You can see that in numerous cases much more than the `r length(UPS1$ac)` UPS1 proteins showed up significant,
ie yeast proteins supposed to remain constant also showed up in part as 'sigificantly changing'.
However, some proteins with enthousiastic FDR values have very low log-FC amplitude and will be removed by filtering in the following steps.
```{r pairWise3, fig.height=4.5, fig.width=9.5, fig.align="center", echo=TRUE}
par(mar=c(5.5, 4.7, 4, 1))
imageW(table1[,c("sig.PD.BH","sig.MQ.BH","sig.PL.BH" )], col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")),
transp=FALSE, tit="Number of BH.FDR passing proteins by the quantification approaches")
mtext("Dark red for high number signif proteins", cex=0.75)
```
In the original [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011) et al paper only 3 pairwise comparisons were further analyzed :
```{r pairWiseSelect2, echo=TRUE}
## Selection in Ramus paper
kable(table1[which(rownames(table1) %in% colnames(testPD$BH)[c(2,21,27)]),], caption="Selected pairwise comparisons (as in Ramus et al)", align="c")
```
Here we'll consider all possible pairwise comparisons, as shown below.
### Volcano Plots
[Volcano-plots](https://en.wikipedia.org/wiki/Volcano_plot_(statistics)) offer additional insight in how statistical test results relate to log-fold-change of pair-wise comparisons.
In addition, we can mark the different protein-groups (or species) by different symbols, see also the general vignette 'wrProteoVignette1' (from this package) and the vignette to the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
Counting the number of proteins passing a classical threshold for differential expression combined with a filter for minimum log-fold-change is a good way to start.
As mentioned, the dataset from [Ramus et al 2016](https://doi.org/10.1016/j.jprot.2015.11.011)
contains `r length(UPSconc)` different levels of [UPS1](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) concentrations,
in consequence `r ncol(testPD$BH)` pair-wise comparisons are possible.
Again, plotting all possible Volcano plots would make way too crowded plots, instead we'll try to summarize (see ROC curves), cluster into groups and finally plot only a few representative ones.
### ROC for Multiple Pairs
_Receiver Operator Curves_ ([ROC](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)) curves
display _sensitivity_ (True Positive Rate) versus _1-Specificity_ (False Positive Rate).
They are typically used as illustrate and compare the discriminiative capacity of a yes/no decision system (here: differential abundance or not),
see eg also the original publication [Hand and Till 2001](https://doi.org/doi:10.1023/A:1010920819831).
The data get constructed by sliding through a panel of threshold-values for the statistical tests instead of just using 0.05.
Due to the experimental setup we know that all yeast proteins should stay constant and only [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) are expected to change.
For each of these threshold values one counts the number of true positives (TP), false positives (FP) etc, allowing then to calculate _sensitivity_ and _specificity_.
In the case of bechmarking quantitation efforts, ROC curves are used to judge how well heterologous spikes [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) can be recognized as differentially abundant while constant yeast matrix proteins should not get classified as differential.
Finally, ROC curves let us also gain some additional insights in the question which cutoff may be optimal or if the commonly used 5-percent FDR threshld cutoff allows getting the best out of the testing system.
The next step consists in calculating the area under the curve (AUC) for the individual profiles of each pairwise comparison.
Below, these calculations of _summarizeForROC()_ are run in batch.
```{r ROC_main1, echo=TRUE}
## calulate AUC for each ROC
layout(1)
rocPD <- lapply(table1[,1], function(x) summarizeForROC(testPD, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocMQ <- lapply(table1[,1], function(x) summarizeForROC(testMQ, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
rocPL <- lapply(table1[,1], function(x) summarizeForROC(testPL, useComp=x, annotCol="SpecType", spec=c("mainSpecies","spike"), tyThr="BH", plotROC=FALSE,silent=TRUE))
# we still need to add the names for the pair-wise groups:
names(rocPD) <- names(rocMQ) <- names(rocPL) <- rownames(table1)
```
```{r ROC_main2, echo=TRUE}
AucAll <- cbind(ind=table1[match(names(rocPD), rownames(table1)),"index"], clu=NA,
PD=sapply(rocPD, AucROC), MQ=sapply(rocMQ, AucROC), PL=sapply(rocPL, AucROC) )
```
To provide a quick overview, the clustered AUC values are displayed as PCA :
```{r ROC_biplot, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
try(biplot(prcomp(AucAll[,names(methNa)]), cex=0.7, main="PCA of AUC from ROC Curves"))
```
On this PCA one can see the three software types in red.
We can see that AUC values from MaxQuant correlate somehow less to Proline and ProteomeDiscoverer (red arrows).
The pair-wise ratios constructed from the different rations are shown in black.
They form a compact area with mostly wide ratios (one rather high and one low concentration of UPS1 proteins).
Besides, there is a number of disperse points, typically containig the point of 125 and/or 250 fmol.
These disperse points do not replicate well and follow their own characteristics captured by PC2.
Now we are ready to inspect the 5 clusters in detail :
### Grouping of ROC Curves to Display Representative Ones
As mentioned, there are too many pair-wise combinations available for plotting and inspecting all ROC-curves.
So we can try to group similar pairwise comparison AUC values into clusters and then easily display representative examples for each cluster/group.
Again, we (pre)define that we want to obtain 5 groups (like customer-ratings from 5 to 1 stars), a k-Means clustering approach was chosen.
```{r ROC_segm, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## number of groups for clustering
nGr <- 5
## K-Means clustering
kMAx <- stats::kmeans(standardW(AucAll[,c("PD","MQ","PL")]), nGr)$cluster
table(kMAx)
AucAll[,"clu"] <- kMAx
```
```{r ROC_segm2, echo=TRUE}
AucAll <- reorgByCluNo(AucAll, cluNo=kMAx, useColumn=c("PD","MQ","PL"))
AucAll <- cbind(AucAll, iniInd=table1[match(rownames(AucAll), rownames(table1)), "index"])
colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)] <- paste("Auc",colnames(AucAll)[1:(which(colnames(AucAll)=="index")-1)], sep=".")
AucAll[,"cluNo"] <- rep(nGr:1, table(AucAll[,"cluNo"])) # make cluNo descending
kMAx <- AucAll[,"cluNo"] # update
table(AucAll[,"cluNo"])
## note : column 'index' is relative to table1, iniInd to ordering inside objects from clustering
```
To graphically summarize the AUC values, the clustered AUC values are plotted accompagnied by the geometric mean:
```{r ROC_profFig, echo=TRUE}
try(profileAsClu(AucAll[,c(1:length(methNa),(length(methNa)+2:3))], clu="cluNo", meanD="geoMean", tit="Pairwise Comparisons as Clustered AUC from ROC Curves",
xlab="Comparison number", ylab="AUC", meLty=1, meLwd=3))
```
From this figure we can see clearly that there are some pairwise comparisons where all initial analysis-software results yield high AUC values,
while other pairwise comparisons less discriminative power.
Again, now we can select a representative pairwise-comparison for each cluster (from the center of each cluster):
```{r ROC_segmTable, echo=TRUE}
AucRep <- table(AucAll[,"cluNo"])[rank(unique(AucAll[,"cluNo"]))] # representative for each cluster
AucRep <- round(cumsum(AucRep) -AucRep/2 +0.1)
## select representative for each cluster
kable(round(AucAll[AucRep,c("Auc.PD","Auc.MQ","Auc.PL","cluNo")],3), caption="Selected representative for each cluster ", align="c")
```
Now we can check if some experimental UPS1 log-fold-change have a bias for some clusters.
```{r freqOfFCperClu, echo=TRUE}
ratTab <- sapply(5:1, function(x) { y <- table1[match(rownames(AucAll),rownames(table1)),]
table(factor(signif(y[which(AucAll[,"cluNo"]==x),"log2rat"],1), levels=unique(signif(table1[,"log2rat"],1))) )})
colnames(ratTab) <- paste0("\nclu",5:1,"\nn=",rev(table(kMAx)))
layout(1)
imageW(ratTab, tit="Frequency of rounded log2FC in the 5 clusters", xLab="log2FC (rounded)", col=RColorBrewer::brewer.pal(9,"YlOrRd"),las=1)
mtext("Dark red for enrichment of given pair-wise ratio", cex=0.7)
```
We can see, that the cluster of best ROC-curves (cluster 5) covers practically all UPS1 log-ratios from this experiment without being restricted just to the high ratios.
#### Plotting ROC Curves for the Best Cluster (the '+++++')
```{r ROC_grp5tab, echo=TRUE}
colPanel <- 2:5
gr <- 5
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for best pairwise-comparisons ", align="c")
```
```{r ROC_grp5fig, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu5, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## This required package 'wrGraph' at version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5") {
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for 2nd Best Cluster (the '++++')
```{r ROC_grp4tab, echo=TRUE}
gr <- 4
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++++' pairwise-comparisons ", align="c")
```
```{r ROC_grp4fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu4, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the 3rd Best Cluster (the '+++')
```{r ROC_grp3tab, echo=TRUE}
gr <- 3
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+++' pairwise-comparisons ", align="c")
```
```{r ROC_grp3fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]],rocMQ[[jR]],rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu3, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the 4th Best Cluster (the '++')
```{r ROC_grp2tab, echo=TRUE}
gr <- 2
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '++' pairwise-comparisons ", align="c")
```
```{r ROC_grp2fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu2, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
#### ROC Curves for the Weakest Cluster 1 (the '+')
```{r ROC_grp1tab, echo=TRUE}
gr <- 1
j <- match(rownames(AucAll)[AucRep[6-gr]], colnames(testPD$t))
## table of all proteins in cluster
useLi <- which(AucAll[,"cluNo"]==gr)
tmp <- cbind(round(as.data.frame(AucAll)[useLi,c("cluNo","Auc.PD","Auc.MQ","Auc.PL")],3),
as.data.frame(table1)[match(names(useLi),rownames(table1)), c(2,5,7,9)])
kable(tmp, caption="AUC details for cluster '+' pairwise-comparisons ", align="c")
```
```{r ROC_grp1fig, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
## frequent concentrations :
layout(matrix(1:2, ncol=1), heights=c(1,2.5))
plotConcHist(mat=tmp, ref=table1)
## representative ROC
jR <- match(rownames(AucAll)[AucRep[6-gr]], names(rocPD))
plotROC(rocPD[[jR]], rocMQ[[jR]], rocPL[[jR]], col=colPanel, methNames=methNa, pointSi=0.8, xlim=c(0,0.45),
txtLoc=c(0.12,0.1,0.033), tit=paste("Cluster",gr," Example: ",names(rocPD)[jR]), legCex=1)
```
```{r VolcanoClu1, fig.height=10, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4,ncol=2))
try(VolcanoPlotW(testPD, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[1], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testMQ, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[2], expFCarrow=TRUE, silent=TRUE),silent=TRUE)
try(VolcanoPlotW(testPL, useComp=j, FCthrs=1.5, FdrThrs=0.05, annColor=c(4,2,3), ProjNa=methNa[3], expFCarrow=TRUE, silent=TRUE),silent=TRUE)}
```
------
## Analysis Focussing on UPS1 Spike-In Proteins Only
We know from the experimental setup that there were 48 [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1))
present in the commercial mix added to a constant background of yeast-proteins.
The lowest concentrations are extremely challenging and it is no surprise that many of them were not detected at the lowest concentration(s).
In order to choose among the various concentrations of UPS1, let's look how many NAs are in each group of replicates (ie before NA-imputation),
and in particular, the number of NAs among the UPS1 proteins.
Previsouly we've looked at the total number of NAs, now let's focus just on the UPS1 proteins.
Obviously, instances of non-quantified UPS1 proteins make the following comparisons using these samples rather insecure, since NA-imputation is just an 'educated guess'.
```{r nNA2, echo=TRUE}
tab1 <- rbind(PD=sumNAperGroup(dataPD$raw[which(dataPD$annot[,"SpecType"]=="UPS1"),], grp9),
MQ=sumNAperGroup(dataMQ$raw[which(dataMQ$annot[,"SpecType"]=="UPS1"),], grp9),
PL= sumNAperGroup(dataPL$raw[which(dataPL$annot[,"SpecType"]=="UPS1"),], grp9) )
kable(tab1, caption="The number of NAs in the UPS1 proteins", align="c")
```
One can see that starting the 5th level of UPS1 concentrations almost all UPS1 proteins were found in nearly all samples.
In consequence we'll avoid using all of them at all times, but this should be made depending on the very protein and quantification method.
Let's look graphically at the number of NAs in each of the UPS1 proteins along the quantification methods :
```{r nNAfig1, fig.height=3.5, fig.width=9.5, fig.align="center", echo=TRUE}
countRawNA <- function(dat, newOrd=UPS1$ac, relative=FALSE) { # count number of NAs per UPS protein and order as UPS
out <- rowSums(is.na(dat$raw[match(newOrd,rownames(dat$raw)),]))
if(relative) out/nrow(dat$raw) else out }
sumNAperMeth <- cbind(PD=countRawNA(dataPD), MQ=countRawNA(dataMQ), PL=countRawNA(dataPL) )
UPS1na <- sub("_UPS","",dataPL$annot[UPS1$ac,"EntryName"])
par(mar=c(6.8, 3.5, 4, 1))
imageW(sumNAperMeth, rowNa=UPS1na, tit="Number of NAs in UPS proteins", xLab="", yLab="",
transp=FALSE, col=rev(RColorBrewer::brewer.pal(9,"YlOrRd")))
mtext("Dark red for high number of NAs",cex=0.7)
```
Typically the number of NAs is similar when comparing the different quantitation approaches, it tends to be a bit higher with MaxQuant.
This means that some UPS1 proteins which are easier to (detect and) quantify than others.
We can conclude, the capacity to successfully quantify a given protein depends on its abundance and its composition.
### Similarity by PCA (UPS1 proteins only)
Plotting the [principal components (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) typically allows to gain
an overview on how samples are related to each other.
This type of experiment is special for the fact that the majority of proteins is expected to remain constant (yeast matrix),
while only the [UPS1 proteins](https://www.sigmaaldrich.com/FR/en/product/sigma/ups1) vary.
Since we are primarily intereseted in the UPS1 proteins, the regular plots of PCA are not shown here, but PCA of the lines identified as UPS1.
[Principal component analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) cannot handle NA-values. Either all lines with any NAs have to be excluded, or data after NA-imputation have to be used.
Here, the option of plotting data after NA-imputation was chosen (in the context of filtering UPS1 lines only one whould loose too many lines, ie proteins).
Below plots are be made using the function `plotPCAw()` from the package [wrGraph](https://CRAN.R-project.org/package=wrGraph).
Via indexing we choose only the lines./proteins with the annoation 'spike' (ie UPS1).
#### PCA of UPS1 for ProteomeDiscoverer
```{r PCA2PD, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on ProteomeDiscoverer, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
#### PCA of UPS1 for MaxQuant
```{r PCA2MQ, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on MaxQuant, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
#### PCA of UPS1 for Proline
```{r PCA2PL, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
try(plotPCAw(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], sampleGrp=grp9, tit="PCA on Proline, UPS1 only (NAs imputed)", rowTyName="proteins", useSymb2=0, silent=TRUE), silent=TRUE)
```
Based on PCA plots one can see that the concentrations 125 - 500 aMol are very much alike and detecting differences may perform better when not combining them, as also confirmed by ROC part later.
In the Screeplot we can see that the first principal component captures almost all variability.
Thus, displaying the 3rd principal component (as done above) finally has no importance.
### CV of Replicates
In order to have more data available for linear regression modelling it was decided to use UPS1 abundance values after NA-Imputation for linear regressions.
Previously it was shown that NA values originate predominantly from absent or very low abundance quantitations, which justified relplacing NA values by low abundance values in a shrinkage like fashion.
As general indicator for data-quality and -usability let's look at the intra-replicate variability.
Here we plot all intra-group CVs (defined by UPS1-concentration), either the CVs for all quantified proteins or the UPS1 proteins only.
In the figure below the complete series (including yeast) is shown on the left side, the human UPS1 proteins only on the right side.
Briefly, vioplots show a kernel-estimate for the distribution, in addition, a box-plot is also integrated (see vignette to package [wrGraph](https://CRAN.R-project.org/package=wrGraph)).
```{r intraReplicCV1, fig.height=10, fig.width=12, fig.align="center", echo=TRUE}
## combined plot : all data (left), Ups1 (right)
layout(1:3)
sumNAinPD <- list(length=18)
sumNAinPD[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp, grp9))))
sumNAinPD[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPD$datImp[which(testPD$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPD)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPD)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPD, halfViolin="pairwise", tit="CV Intra Replicate, ProteomeDiscoverer", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinMQ <- list(length=18)
sumNAinMQ[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp, grp9))))
sumNAinMQ[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testMQ$datImp[which(testMQ$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinMQ)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9)) # paste(unique(grp9),"all",sep=".")
names(sumNAinMQ)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".") #paste(unique(grp9),"Ups1",sep=".")
try(vioplotW(sumNAinMQ, halfViolin="pairwise", tit="CV intra replicate, MaxQuant",cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
sumNAinPL <- list(length=18)
sumNAinPL[2*(1:length(unique(grp9))) -1] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp, grp9))))
sumNAinPL[2*(1:length(unique(grp9))) ] <- as.list(as.data.frame(log2(rowGrpCV(testPL$datImp[which(testPL$annot[,"SpecType"]=="spike"),], grp9))))
names(sumNAinPL)[2*(1:length(unique(grp9))) -1] <- sub("amol","",unique(grp9))
names(sumNAinPL)[2*(1:length(unique(grp9))) ] <- paste(sub("amol","",unique(grp9)),"Ups",sep=".")
try(vioplotW(sumNAinPL, halfViolin="pairwise", tit="CV Intra Replicate, Proline", cexNameSer=0.6))
mtext("left part : all data\nright part: UPS1",adj=0,cex=0.8)
```
The distribution of intra-group CV-values showed (without major surprise) that the highest UPS1 concentrations replicated best.
This phenomenon also correlates with the content of NAs in the original data.
When imputing NA-values it is a challange to respect the variability of the respective data (NA-neighbours) before NA-imputation.
Many NA-values can be observed when looking at very low UPS1-doses and too few initial quantitations values may remain for meaningful comparisons.
Of course, with an elevanted content of NAs the mechanism of NA-substitution will also contribute to masking (in part) the true variability.
In consequence pair-wise comparisons using one of the higher UPS1-concentrations group are expected to have a decent chance to rather specifically reveil a high number of UPS1 proteins.
Once can see that lower concentrations of UPS1 usually have worse CV (coefficient of variance) in the respective samples,
### Testing All Individual UPS1 Proteins By Linear Regression
First, we construct a container for storing various measures and results which we will look at lateron.
```{r linModel0, echo=TRUE}
## prepare object for storing all results
datUPS1 <- array(NA, dim=c(length(UPS1$ac),length(methNa),7), dimnames=list(UPS1$ac,c("PD","MQ","PL"),
c("sco","nPep","medAbund", "logp","slope","startFr","cluNo")))
```
Now we'll calculate the linear models, extract slope & pval for each UPS1 protein.
The functions used also allow plotting the resulting regression results, but plotting each UPS1 protein would make very crowded figures.
Instead, we'll plot representative examples only after clustering the regression-results.
#### Linear Regression for each UPS1 : ProteomeDiscoverer
```{r linModelPD, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
lmPD <- list(length=length(NamesUpsPD))
doPl <- FALSE
lmPD[1:length(NamesUpsPD)] <- lapply(NamesUpsPD[1:length(NamesUpsPD)], linModelSelect, dat=dataPD,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPD) <- NamesUpsPD
```
```{r linModelPD2, echo=TRUE}
## We make a little summary of regression-results (ProteomeDiscoverer)
tmp <- cbind(log10(sapply(lmPD, function(x) x$coef[2,4])), sapply(lmPD, function(x) x$coef[2,1]), sapply(lmPD, function(x) x$startLev))
datUPS1[,1,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPD)), ]
datUPS1[,1,"medAbund"] <- apply(wrMisc::.scale01(dataPD$datImp)[match(UPS1$ac,rownames(dataPD$datImp)),],1,median,na.rm=TRUE)
```
#### Linear Regression for each UPS1 : MaxQuant
```{r linModelMQ, echo=TRUE}
lmMQ <- list(length=length(NamesUpsMQ))
lmMQ[1:length(NamesUpsMQ)] <- lapply(NamesUpsMQ[1:length(NamesUpsMQ)], linModelSelect, dat=dataMQ,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmMQ) <- NamesUpsMQ
```
```{r linModelMQ2, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
## We make a little summary of regression-results (MaxQuant)
tmp <- cbind(log10(sapply(lmMQ, function(x) x$coef[2,4])), sapply(lmMQ, function(x) x$coef[2,1]), sapply(lmMQ, function(x) x$startLev))
datUPS1[,2,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmMQ)), ]
datUPS1[,2,"medAbund"] <- apply(wrMisc::.scale01(dataMQ$datImp)[match(UPS1$ac,rownames(dataMQ$datImp)),],1,median,na.rm=TRUE)
```
#### Linear Regression for each UPS1 : Proline
```{r linModelPL, echo=TRUE}
lmPL <- list(length=length(NamesUpsPL))
lmPL[1:length(NamesUpsPL)] <- lapply(NamesUpsPL[1:length(NamesUpsPL)], linModelSelect, dat=dataPL,
expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=doPl, silent=TRUE)
names(lmPL) <- NamesUpsPL
```
```{r linModelPLsum, fig.height=17, fig.width=9.5, fig.align="center", echo=TRUE}
tmp <- cbind(log10(sapply(lmPL, function(x) x$coef[2,4])), sapply(lmPL, function(x) x$coef[2,1]), sapply(lmPL, function(x) x$startLev))
datUPS1[,3,c("logp","slope","startFr")] <- tmp[match(rownames(datUPS1), names(lmPL)), ]
datUPS1[,3,"medAbund"] <- apply(wrMisc::.scale01(dataPL$datImp)[match(UPS1$ac,rownames(dataPL$datImp)),],1,median,na.rm=TRUE)
```
#### Frequency Of Starting Levels For Regression
To get a general view, let's look where regressions typically have their best starting-site (ie how many low concentrations points are usually better omitted):
```{r linModelStartStat, echo=TRUE}
## at which concentration of UPS1 did the best regression start ?
stTab <- sapply(1:5, function(x) apply(datUPS1[,,"startFr"],2,function(y) sum(x==y)))
colnames(stTab) <- paste("lev",1:5,sep="_")
kable(stTab, caption = "Frequency of starting levels for regression")
```
### Global Comparison Of Regression Models
Next, we'll inspect the relation between regression-slopes and p-values (for H0: slope=0) :
```{r linModelPlotAll, fig.height=12, fig.width=9.5, fig.align="center", echo=TRUE}
layout(matrix(1:4,ncol=2))
subTi <- "fill according to median abundance (blue=low - green - red=high)"
xyRa <- apply(datUPS1[,,4:5], 3, range, na.rm=T)
plotMultRegrPar(datUPS1, 1, xlim=xyRa[,1], ylim=xyRa[,2],tit="ProteomeDiscoverer UPS1, p-value vs slope",subTit=subTi) # adj wr 9jan23
plotMultRegrPar(datUPS1, 2, xlim=xyRa[,1], ylim=xyRa[,2],tit="MaxQuant UPS1, p-value vs slope",subTit=subTi)
plotMultRegrPar(datUPS1, 3, xlim=xyRa[,1], ylim=xyRa[,2],tit="Proline UPS1, p-value vs slope",subTit=subTi)
```
We can observe, that sope and (log)p-value of the resultant regressions do not necessarily correlate well.
Thus, considering only one of these resultant values may not be sufficient.
### Summarize Linear Regression Results
When judging results for indivual UPS1 proteins one may see that both the value of the slope as well as the p-value (for H0:slope=0) are important to consider.
For example, there are some cases where the quantitations lign up well giving a good p-value but with slopes < 0.4.
This is definitely not the type of dose-response characteristics we are looking for.
In consequence, let's construct a **combined score** for these components _slope_ and _p-value_ for easier consideration of both elements at once :
```{r combRegrScore1, echo=TRUE}
for(i in 1:(dim(datUPS1)[2])) datUPS1[,i,"sco"] <- -datUPS1[,i,"logp"] - (datUPS1[,i,"slope"] -1)^2 # cut at > 8
```
Next, let's bring together all linear-model scores, the number of peptides and meadian protein abundance for each of UPS1 proteins in one object to facilite further steps.
```{r combRegrScore2, echo=TRUE}
datUPS1[,1,2] <- rowSums(dataPD$count[match(UPS1$ac,dataPD$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
datUPS1[,2,2] <- rowSums(dataMQ$count[match(UPS1$ac,dataMQ$annot[,1]),,1], na.rm=TRUE)
datUPS1[,3,2] <- rowSums(dataPL$count[match(UPS1$ac,dataPL$annot[,1]),,"NoOfPeptides"], na.rm=TRUE)
```
Now we can explore the regression score and its context to other parameters, below it's done graphically.
```{r combRegrScore3, fig.height=6, fig.width=9.5, fig.align="center", echo=TRUE}
layout(matrix(1:4, ncol=2))
par(mar=c(5.5, 2.2, 4, 0.4))
col1 <- RColorBrewer::brewer.pal(9,"YlOrRd")
imageW(datUPS1[,,1], col=col1, tit="Linear regression score", xLab="",yLab="",transp=FALSE)
mtext("red for bad score", cex=0.75)
imageW(log(datUPS1[,,2]), tit="Number of peptides", xLab="",yLab="", col=col1, transp=FALSE)
mtext("dark red for high number of peptides", cex=0.75)
## ratio : regression score vs no of peptides
imageW(datUPS1[,,1]/log(datUPS1[,,2]), col=rev(col1), tit="Regression score / Number of peptides", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/peptide ratio)", cex=0.75)
## score vs abundance
imageW(datUPS1[,,1]/datUPS1[,,3], col=rev(col1), tit="Regression score / median Abundance", xLab="",yLab="", transp=FALSE)
mtext("dark red for high (good) lmScore/abundance ratio)", cex=0.75)
```
From the heatmap-like plots we can see that some proteins are rather consistently quantified by any of the methods.
Some of the varaibility may be explained by the number of peptides (in case of MaxQuant 'razor-peptides' were used), see plot of 'regression score / number of peptides'.
In contrast, UPS-protein median abundance does not correlate or explain this phenomenon (see last plot 'regression score / median abundance').
So we cannot support the hypothesis that highly abundant proteins get quantified better.
### Grouping of UPS1 Proteins to Display Representative Proteins
Using the linear regression score defined above we can rank UPS1 proteins and display representative ones in order to avoid crowded and repetitive figures.
Now, we can try to group the regression scores into groups and easily display representative examples for each group.
Here, we (pre)define that we want to obtain 5 groups (like ratings from 1 -5 starts), a k-Means clustering approach was chosen.
```{r combScore1, echo=TRUE}
## number of groups for clustering
nGr <- 5
chFin <- is.finite(datUPS1[,,"sco"])
if(any(!chFin)) datUPS1[,,"sco"][which(!chFin)] <- -1 # just in case..
## clustering using kMeans
kMx <- stats::kmeans(standardW(datUPS1[,,"sco"], byColumn=FALSE), nGr)$cluster
datUPS1[,,"cluNo"] <- matrix(rep(kMx, dim(datUPS1)[2]), nrow=length(kMx))
geoM <- apply(datUPS1[,,"sco"], 1, function(x) prod(x)^(1/length(x))) # geometric mean across analysis soft
geoM2 <- lrbind(by(cbind(geoM,datUPS1[,,"sco"], clu=kMx), kMx, function(x) x[order(x[,1],decreasing=TRUE),])) # organize by clusters
tmp <- tapply(geoM2[,"geoM"], geoM2[,"clu"], median)
geoM2[,"clu"] <- rep(rank(tmp, ties.method="first"), table(kMx))
geoM2 <- geoM2[order(geoM2[,"clu"],geoM2[,"geoM"],decreasing=TRUE),] # order as decreasing median.per.cluster
geoM2[,"clu"] <- rep(1:max(kMx), table(geoM2[,"clu"])[rank(unique(geoM2[,"clu"]))]) # replace cluster-names to increasing
try(profileAsClu(geoM2[,2:4], geoM2[,"clu"], tit="Clustered Regression Results for UPS1 Proteins", ylab="Linear regression score"))
```
```{r combScore2, echo=TRUE}
datUPS1 <- datUPS1[match(rownames(geoM2), rownames(datUPS1)),,] # bring in new order
datUPS1[,,"cluNo"] <- geoM2[,"clu"] # update cluster-names
### prepare annotation of UPS proteins
annUPS1 <- dataPL$annot[match(rownames(datUPS1), dataPL$annot[,1]), c(1,3)]
annUPS1[,2] <- substr(sub("_UPS","",sub("generic_ups\\|[[:alnum:]]+-{0,1}[[:digit:]]\\|","",annUPS1[,2])),1,42)
```
```{r combScore3, echo=TRUE}
## index of representative for each cluster (median position inside cluster)
UPSrep <- tapply(geoM2[,"geoM"], geoM2[,"clu"], function(x) ceiling(length(x)/2)) + c(0, cumsum(table(geoM2[,"clu"]))[-nGr])
```
Previously we organized all UPS1 proteins according to their regression characteristics into 5 clusters and each cluster was ordered for descending scores.
Now we can use the median position within each cluster as representative example for this cluster.
#### Representative UPS1-protein of the Best Group (the '+++++')
```{r regr5star, echo=TRUE}
gr <- 1
useLi <- which(datUPS1[,1,"cluNo"]==gr)
colNa <- c("Protein",paste(colnames(datUPS1), rep(c("slope","logp"), each=ncol(datUPS1)), sep=" "))
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot5star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
## Plotting the best regressions, this required package wrGraph version 1.2.5 (or higher)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 2nd Best Group (the '++++')
```{r regr4star, echo=TRUE}
gr <- 2
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption=paste("Regression details for cluster of the",length(useLi),"2nd best UPS1 proteins "), col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot4star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 3rd Group (the '+++')
```{r regr3star, echo=TRUE}
gr <- 3
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
```
```{r regrPlot3star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 4th Group (the '++')
```{r regrPlot2star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
gr <- 4
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 3rd cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
#### Representative UPS1-protein of the 5th (And Last) Group (the '+')
```{r regrPlot1star, fig.height=9, fig.width=9.5, fig.align="center", echo=TRUE}
gr <- 5
useLi <- which(datUPS1[,1,"cluNo"]==gr)
try(kable(cbind(annUPS1[useLi,2], signif(datUPS1[useLi,,"slope"],3), signif(datUPS1[useLi,,"logp"],3)),
caption="Regression details for 5th cluster UPS1 proteins ", col.names=colNa, align="l"),silent=TRUE)
if(packageVersion("wrGraph") >= "1.2.5"){
layout(matrix(1:4, ncol=2))
tit <- paste0(methNa,", ",annUPS1[UPSrep[gr],1])
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPD, tit=tit[1], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataMQ, tit=tit[2], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE)
try(tm <- linModelSelect(annUPS1[UPSrep[gr],1], dat=dataPL, tit=tit[3], expect=names(grp9), startLev=1:5, cexXAxis=0.7, logExpect=TRUE, plotGraph=TRUE, silent=TRUE),silent=TRUE) }
```
## Additional Comments
\normalsize
The choice of the 'best suited' approach to quantify and compare proteomics data is not trivial at all.
Particular attention has to be given to the choice of the numerous 'small' parameters which may have a very strong impact on the final outcome,
as it has been experienced when preparing the data for this vignette or at other places (eg [Chawade et al 2015](https://doi.org/10.1021/pr500665j)).
Thus, knowing and understanding well the software/tools one has chosen is of prime importance !
Of course, this also concerns the protein-identifcation part/software.
The total number of proteins identified varies considerably between methods, this information may be very important to the user in real-world settings
but is only taken in consideration in part in the comparisons presented.
ROC curves allow us to gain more insight on the impact of cutoff values (alpha) for statistical testing.
Frequently the ideal threshold maximizing sensitivity and specificity lies quite distant to the common 5-percent threshold.
This indicates that many times the common 5-percent threshold may not be the 'optimal' compromise for calling differential abundant proteins.
However, the _optimal_ point varies very much between data-sets and in a real world setting with unknown samples this type of analysis is not possible.
As mentioned before, the dataset used in this vignette is not very recent, much better performing mass-spectrometers have been introduced since then.
The main aim of this vignette consists in showing _how to use wrProteo_ with a smaller example (allowing to limit file-size of this package).
Thus, for rather scientific conclusions the user is encouraged to run the same procedure using data run on more recent mass-spectrometers.
## Acknowledgements
The author wants to acknowledge the support by the [IGBMC](https://www.igbmc.fr) (CNRS UMR 7104, Inserm U 1258, UdS),
[CNRS](http://www.cnrs.fr/en), [Université de Strasbourg](https://www.unistra.fr) and [Inserm](https://www.inserm.fr)
and of course all collegues from the [IGBMC proteomics platform](https://proteomics.igbmc.fr).
The author wishes to thank the [CRAN](https://CRAN.R-project.org) -staff for all their help with new entries and their efforts in maintaining this repository of R-packages.
Furthermore, many very fruitful discussions with colleages on national and international level have helped to formulate ideas and improve the tools presented here.
Thank you for you interest. This package is constantly evolving, new featues/functions may get added to the next version of [this package](https://CRAN.R-project.org/package=wrProteo).
## Session-Info
For completeness :
\small
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrProteo/vignettes/wrProteoVignetteUPS1.Rmd
|
#' Settings for AA fragments
#'
#' This function provides basic settings for what types of fragments may accomodate which type of modifications : $knownMods: information about which modifications may be considered, $specAAMod: specifc AA sites (if applicable), $specAAMod: specifc AA sites (if applicable).
#' For example, here 'p' codes for gain of mass for HPO3 only at S, T and Y residues.
#' Note: $knownMods$Nterm and $knownMods$Cterm are treated as mutually exclusive
#' @param outTy (character) default "all" or any of the list-elements
#' @return list ($knownMods, $knspecAAMods, $modChem, $neutralLossOrGain)
#' @seealso \code{\link{makeFragments}}, \code{\link{fragmentSeq}}, \code{\link[wrProteo]{massDeFormula}}
#' @examples
#' AAfragSettings()
#' @export
AAfragSettings <- function(outTy="all"){
knownMods <- list(noMod=c(""), Nterm=c("a","b","c"), Cterm=c("x","y","z"), NCterm=c("d","i"),any=c(),
intern=c("a","b","c","f","j","x","y","z"), spcAA=c("p","h","k","o","r","s","m","n","u"), spcNterm=c("g","n"),spcCterm=c("u"))
specAAMod <- list(p=c("T","S","Y"), h=c("S","T","E","D"),k=c("Q","K","R","N"),o=c("M"), r=c("C"),s="S",m=c("R","K"),n=c("Q"),u=c("R"))
## if possible avoid overlapping 'intern' 'NCterm' overlapping, may cause problems in countPotMofifAAs() !!
modChem <- matrix(c("","+0H","a","-C-2H-2O", "b","-2H-O","c","-O+N+2H", "x","+C+O-2H","y","","z","-N-2H", "d","-2H-O","i","-C-O",
"f","-2H-O", "g","-C-2H-O-N", "j","-C-O","p","HP3O","h","-2H-O","k","-3H-N", "o","+O",
"r","+O","s","+2C+2H+O","m","+C+2H","n","-3H-N","u","-C-4H-2N-O","q","-3H-P-4O"), ncol=2, byrow=TRUE, dimnames=list(NULL,c("ty","mod")))
neutralLossOrGain <- c("b","d","f","h","k","n","z")
switch(outTy,
knownMods=knownMods,
specAAMod=specAAMod,
modChem=modChem,
neutralLossOrGain=neutralLossOrGain,
all=list(knownMods=knownMods, specAAMod=specAAMod, modChem=modChem,neutralLossOrGain=neutralLossOrGain))}
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/AAfragSettings.R
|
#' Add modifications to peptide mass
#'
#' Adjust/add mass for modifications from 'modTy' to all peptides in 'pepTab' based on count 'cou' of occurances of modifications :
#' Either fixed or variable modifications will be added to the mass of initial peptides from argument \code{papTab}.
#' Terminal ionization (like 'b' or 'y' -fragments) is treated as fixed modification and the resulting masses will correspond to standard mono-protonated ions.
#' Since variable and fixed modification types can't be run in a single instance, the function has to get calles twice, it is recommended to always start with the fixed modfications,
#' In the case of fixed modifications (like defining 'b' or 'y' fragments) neutral peptide masses should be given to add the corresponding mass-shift (and to obtain mono-protonated ions).
#' In case of variable modifications (like 'd' or 'p'), the corresponding ions from the fixed modifications should get furnished to add the corresponding mass-shift,
#' the masses resulting from the initial fixed modifications run can be used.
#' Note, that transforming a neutral precursor M into MH+ is also considered a modification.
#' The results are also correct with obligatory fragments that can't occur the same time (eg x & y ions can't be same time, need to make add'l lines...).
#' This function has a multiprocessor mode, with small data-sets (like the toy example below) there is typcally no gain in performance.
#' @param cou (list) list of matrixes with counts for number of modifications per peptide
#' @param pepTab (matrix) table with peptide properties
#' @param combTerm (matrix) table with separate rows for $basMod that are exclusive (ie can't be accumulated, eg x & y ions)
#' @param modTy (character) list of modification types to be considered
#' @param lastIndex (integer) index-1 (ie last index from prev matrix) from which new peptide-variants should start from
#' @param modChem (character) optional modifications
#' @param basVarMod (character) toggle if fixed ('basMod') or variable ('varMod') modificatons should be calculated
#' @param massTy (character) default 'mono'
#' @param knownMods (list) optional custom definition whoch modification is N-term, etc (see \code{\link{AAfragSettings}}
#' @param nProc (integer) number of processors in case of multi-processor use (requires Bioconductor package \code{BiocParallel})
#' @param parallDefault (logical) for use of other/previously set \code{register(bpstart())} in case \code{.parCombinateAllAndSum} is called
#' @param silent (logical) suppress messages
#' @param debug (logical) for bug-tracking: more/enhanced messages and intermediate objects written in global name-space
#' @param callFrom (character) allows easier tracking of message(s) produced
#' @return list of $pepTab (table of peptide as single charge positive ions), $abc ('representative' list of all combinations to add). Main result in $pepTab
#' @seealso \code{\link[wrMisc]{convToNum}}
#' @examples
#' pep1 <- c(pe1="KPEPTI")
#' # The table of possible terminal fragments (for simplicity terminal only)
#' pepTab1 <- makeFragments(pep1, min=3, max=7, internFra=FALSE)
#' # Which fragment may be subject to how many modification (including ionization by H+)
#' cou1 <- countPotModifAAs(pepTab=pepTab1, modTy=list(basMod=c("b","y")))
#' # Add modifications (here: ionize all pepptides by H+)
#' preMa1 <- addMassModif(cou=cou1$cou, pepTab=pepTab1, combTerm=cou1$combTerm,
#' modTy=list(basMod=c("b","y")), basVarMod="basMod")
#' preMa1
#'
#' ## Example including variable modifications
#' modT3 <- list(basMod=c("b","y"),varMod=c("p","h","d"))
#' cou3 <- countPotModifAAs(pepTab=pepTab1, modTy=modT3)
#' ## Now we re-use/inject the results for the fixed modificatons
#' preMa3 <- addMassModif(cou=cou3$cou, pepTab=preMa1$pepTab, combTerm=cou1$combTerm,
#' modTy=modT3, basVarMod="varMod")
#' head(preMa3$pepTab,12)
#' @export
addMassModif <- function(cou, pepTab, combTerm, modTy, lastIndex=NULL, modChem=NULL, basVarMod="basMod", massTy="mono", knownMods=NULL, nProc=1, parallDefault=TRUE, silent=FALSE, debug=FALSE, callFrom=NULL){
## adjust/add mass for modifications from 'modTy' to all peptides in 'pepTab' based on count 'cou' :
## also OK woth obligatory fragments that can't occur the same time (eg x & y ions can't be same time, need to make add'l lines...)
## return list of $pepTab,$basMod,$varMod,$abc ('representative' list of all combinations to add)
## main result in $varMod (inlcudes basMod, has add'l col 'isoMasMod' for warning) if empty (no var modifs done) use $basMod
fxNa <- wrMisc::.composeCallName(callFrom,newNa="addMassModif")
if(is.null(modChem)) modChem <- AAfragSettings(outTy="all")$modChem
msg <- " 'basVarMod' should be either 'basMod' or 'varMod' (length=1) "
if(length(basVarMod) >1) {message(fxNa,"truncating"); basVarMod <- basVarMod[1]}
if(!any(c("basMod","varMod") %in% basVarMod)) stop(msg)
modTy <- checkModTy(modTy,knownMods=knownMods,silent=silent,callFrom=fxNa)
abc <- NULL
dataOK <- FALSE
if(length(cou) <1 | length(pepTab) <1) {
if(!silent) message(fxNa," 'cou' and/or 'pepTab' is/are empty - nothing to do !")
} else if(nrow(pepTab) >0) {
dataOK <- TRUE
if(length(lastIndex) <1) lastIndex <- if("no" %in% colnames(pepTab)) max(pepTab[,"no"], na.rm=TRUE) else nrow(pepTab)
}
if(dataOK) {
## finish defining mass-modifications :
thisIsBasMod <- any(nchar(modTy$basMod) >0) & basVarMod %in% c("basMod","all")
uniqCo1 <- wrMisc::firstOfRepLines(cou[[if(thisIsBasMod) "basMod" else "varMod"]], outTy="all") # unique combination schemes (for easy repeating)
if(debug) {message(" .. xxaddMassModif0 \n")}
couX <- cou[[if(thisIsBasMod) "basMod" else "varMod"]][uniqCo1$ind,] # changed 26oct17
if(!is.matrix(couX)) couX <- matrix(couX, nrow=length(uniqCo1$ind), dimnames=list(
rownames(cou[[if(thisIsBasMod) "basMod" else "varMod"]])[uniqCo1$ind], colnames(cou[[if(thisIsBasMod) "basMod" else "varMod"]]) ))
if(any(couX >0)) {
## remove cols/modifs not encountered
chCol <- !(colSums(couX) <1 & colnames(couX) %in% names(AAfragSettings("specAAMod")))
if(all(!chCol)) {
couX <- 0
abc <- NULL
if(!silent) message(fxNa," nothing to do for mass-modifications")
} else if(any(!chCol)) {couX <- if(sum(chCol) >1) couX[,which(chCol)] else {
matrix(couX[,which(chCol)], ncol=1, dimnames=list(rownames(couX), colnames(couX[which(chCol)]))) }}}
if(debug) {message(" .. xxaddMassModif0b\n")}
## complete/finish for non-AAspec ie terminal modifications
if(any(combTerm >1)) { # continue (add more lines) for obligatory modifs that can't occur the same time (eg y & z ions)
ii <- if(nrow(combTerm) >2) 2 else 1
for(i in ii:nrow(combTerm)) {
useCol <- which(!combTerm[i,] %in% combTerm[(i-1):1,])
if(length(useCol) >0){
useLi <- sort(unique(unlist(lapply(combTerm[-i,useCol], grep, add2FraNames))))
massMod2 <- wrProteo::massDeFormula(modChem[match(combTerm[i,], modChem[,1]),2], massTy=massTy, silent=TRUE, callFrom=fxNa) # mass modifications (simple)
couX <- cou$basMod[useLi,]
colnames(couX) <- massMod2
massMod2 <- rowSums(matrix(as.numeric(wrMisc::conv01toColNa(couX, pasteCol=FALSE)), nrow=nrow(couX)), na.rm=TRUE) # mass modifications in order of output
colnames(couX) <- combTerm[i,]
tmX <- cbind(no=useLi, modif=wrMisc::.pasteCols(wrMisc::conv01toColNa(couX)), mass=as.numeric(pepTab[useLi,"mass"]) +massMod2)
basMod <- rbind(basMod,tmX) }
} }
## note : dephospho q and loss of water will give same mass !
## prepare mass changes, split as fixed OR var modif
if(any(couX >0)) {
if(any(nchar(modTy$basMod) >0) & basVarMod %in% c("basMod","all")) {
## this is FIXED modif !
if(debug) {message(" .. xxaddMassModif1a - fixed modif \n")}
if(!"mod" %in% colnames(pepTab)) pepTab <- cbind(pepTab, mod=rep("", nrow(pepTab)))
newNa <- wrProteo::massDeFormula(modChem[match(if(is.matrix(couX)) colnames(couX) else names(couX),modChem[,1]),2], massTy=massTy, silent=TRUE, callFrom=fxNa)
chNewN <- newNa==0
if(any(chNewN)) names(newNa)[which(chNewN)] <- ""
if(is.matrix(couX)) colnames(couX) <- newNa else couX <- matrix(couX, nrow=1, dimnames=list(NULL,newNa))
mod <- .multMatByColNa(couX)
mod <- mod[uniqCo1$num]
add2FraNames <- wrMisc::.pasteCols(wrMisc::conv01toColNa(cou[[1]])) # only oblig modifs
protMa <- wrProteo::.atomicMasses()["H",massTy] # need to ionize in pos mode ...
pepTab[,"mass"] <- as.numeric(pepTab[,"mass"]) +mod +protMa
pepTab[,"mod"] <- paste0(pepTab[,"mod"],add2FraNames,sep="")
## need to re-check for iso-masses
chAmb <- pepTab[,"ambig"]=="isoMass"
if(any(wrMisc::naOmit(chAmb))) pepTab[which(chAmb),"ambig"] <- NA
chMa <- duplicated(as.numeric(pepTab[,"mass"]), fromLast=FALSE)
if(any(chMa)) {
chM2 <- duplicated(as.numeric(pepTab[,"mass"]), fromLast=TRUE)
pepTab[which(chMa | chM2),"ambig"] <- "isoMass" }
## better reconstruct full name
supNa <- gsub(" ",".",pepTab[,"modSpec"])
chHeadPo <- nchar(supNa) >0 & substr(supNa,1,1) != "."
if(any(chHeadPo)) supNa[which(chHeadPo)] <- paste0(".",supNa,sep="") # add heading '.' separartor if any special modif
rownames(pepTab) <- paste0(pepTab[,"origNa"],".",pepTab[,"beg"],"-",pepTab[,"end"],supNa,".",pepTab[,"mod"],sep="")
} else {
## this is VARIABLE modif !
if(debug) {message(" .. xxaddMassModif1b - variable modif \n")}
chMod <- sum(cou$varMod, na.rm=TRUE)
if(chMod <1) {
## no sites found, nothing to do ..
abc <- NULL
} else {
## cou$varMo2 remove q (since p+q =0 modif); ==> finally use $varMo2
if(length(cou$varMo2) < length(cou$varMod)) {
cou$varMo2 <- cou$varMod
chMo <- colnames(cou$varMod) %in% "q"
if(all(c("p","q") %in% colnames(cou$varMod))) cou$varMo2 <- if(sum(!chMo) >1) cou$varMo2[,which(-chMo)] else {
matrix(cou$varMo2[,which(-chMo)], ncol=1, dimnames=list(rownames(cou$varMod), colnames(cou$varMod)[which(-chMo)]))}}
uniqCo2 <- wrMisc::firstOfRepLines(cou$varMo2, outTy="all", callFrom=fxNa) # unique combination schemes (for easy repeating)
tm2 <- match(colnames(cou$varMo2), modChem[,1])
massModV <- wrProteo::massDeFormula(modChem[tm2,2], massTy=massTy, silent=TRUE, callFrom=fxNa) # variable mass modifications
names(massModV) <- modChem[tm2,1]
nVMod <- unique(sort(cou$varMo2)) # number of types of indiv modifications (across cou$varMod)
nVMod <- nVMod[nVMod >0]
uniqCo <- cou$varMo2[uniqCo2$ind,] # max no of modifications by type/group (lines)
## values not exceeding max no of modifs normally already considered in countPotModifAAs() making cou
if(length(dim(uniqCo)) <2) uniqCo <- matrix(uniqCo, nrow=length(uniqCo2$ind), ncol=ncol(cou$varMo2), dimnames=list(rownames(cou$varMo2)[uniqCo2$ind],colnames(cou$varMo2)))
chPa <- requireNamespace("BiocParallel", quietly=TRUE)
isWin <- "windows" %in% .Platform$OS.type
if(!chPa) { message(fxNa,": package 'BiocParallel' not installed, can't run parallel processing")
nProc <- 1}
## main
if(debug){
msg <- if(nrow(uniqCo)==1) "too few peptides for multi-proc" else { if(nProc <2) "not multi-proc" else "multi-proc"}
message(fxNa," - ",msg) }
## may take more time when nProc >1
abc <- if(nrow(uniqCo)==1) combinateAllAndSum(as.numeric(uniqCo), massModV, notSingle=c("q","p"), callFrom=fxNa, silent=silent) else { # (representative) list of all combinations to add
if(nProc <2) { apply(uniqCo,1, combinateAllAndSum, massModV, notSingle=c("q","p"), callFrom=fxNa, silent=silent)
} else .parCombinateAllAndSum(uniqCo, massModV, nProc=nProc, parRegDefault=parallDefault, callFrom=fxNa)}
if(!is.list(abc)) {abc <- list(abc); names(abc) <- as.character(rownames(uniqCo))}
if(debug) {message(" .. xxaddMassModif3\n")}
## now add/dispatch abc to each peptide concerned (uses abc !)
ab0 <- lapply(abc, function(x) {ch0 <- x %in% 0; if(all(ch0)) NULL else {if(any(ch0)) x[which(!ch0)] else x}}) # remove ocuurances of 0 mass shift
couY <- cou$varMo2[which(rowSums(cou$varMo2) >0),]
if(length(dim(couY)) <2) couY <- matrix(couY, ncol=ncol(cou$varMo2), dimnames=list(rownames(cou$varMo2)[which(rowSums(cou$varMo2) >0)], colnames(cou$varMo2)))
uniqCo3 <- wrMisc::firstOfRepLines(couY, outTy="all", callFrom=fxNa) # unique combination schemes (for easy repeating)
chLe <- sapply(ab0,length) <1
if(all(chLe)) { message(fxNa," no modifications left !!"); return(list(pepTab=pepTab, abc=abc))
} else { if(any(chLe)) ab0 <- ab0[which(!chLe)] }
if(debug) {message(" .. xxaddMassModif3a\n") }
## clean list of types of mass-changes (ab0) for repeating mass-changes , sort by alphabet names to get 'd' displayed instead of 'h'
ab0 <- lapply(ab0, function(x) {if(length(x) >1) {x <- x[order(names(x))]; x[!duplicated(x, fromLast=FALSE)]} else x })
## need to introduce mass-change of modifs & names of modifs to subset of main table
names(ab0) <- NULL # will get composed modif-names otherwise
modNa <- names(unlist(ab0))
if(length(modNa) <1) modNa <- unlist(sapply(abc,function(x) rownames(x)[which(x !=0)])) else {
if(any(nchar(names(modNa)) <1)) modNa <- unlist(sapply(abc, function(x) rownames(x)[which(x !=0)]))}
nRep <- sapply(ab0,length)[uniqCo3$num]
repI <- rep(which(rowSums(cou$varMod) >0), nRep)
pepTa3 <- if(length(repI)==1) matrix(pepTab[repI,], nrow=1, dimnames=list(rownames(pepTab)[repI],colnames(pepTab))) else pepTab[repI,]
addToNa <- unlist(sapply(ab0[uniqCo3$num],names))
if(debug) {message(" .. xxaddMassModif3b\n") }
names(ab0) <- NULL # will get composed modif-names otherwise
ab0 <- unlist(ab0[uniqCo3$num])
pepTa3[,"mass"] <- as.numeric(pepTa3[,"mass"]) + ab0
modColNo <- wrMisc::naOmit(match(c("mod","modif","modSpec"), colnames(pepTa3)))[1] # search for comumn to use for adding modif-names
if(any(nchar(names(ab0)) <1)) message(fxNa," Trouble ahead !? Some variable modifications names don't appear !")
pepTa3[,modColNo] <- paste0(pepTa3[,modColNo], addToNa, sep="") # add var mod name to modif column
addSpe <- gsub(" ", ".", pepTa3[,"modSpec"])
chSpe <- nchar(addSpe) >0 & substr(addSpe,1,1) != "."
if(any(chSpe)) addSpe[which(chSpe)] <- paste0(".",addSpe[which(chSpe)],sep="") # obtain heading '.' if followed by something
chModSpe <- grep("modSpe",colnames(pepTa3))
if(length(chModSpe) >0) colnames(pepTa3)[chModSpe[1]] <- "mod" # reset pepTa3 to basic colnames
rownames(pepTa3) <- paste0(pepTa3[,"origNa"],".",pepTa3[,"beg"],"-",pepTa3[,"end"], addSpe,sep="") # add var mod name to rownames
if(debug) {message(" .. xxaddMassModif3e\n") }
## make unique (new) index for var modif
pepTa3[,"no"] <- as.integer(lastIndex) +1 + 1:nrow(pepTa3) # increase index
chIso <- pepTa3[,"ambig"] %in% "isoMass"
if(any(chIso)) pepTa3[which(chIso),"ambig"] <- NA # need to re-check iso-mass once combined ...
if(debug) {message(" .. xxaddMassModif4\n") }
## final fusing identif fixed modif and var modif (& re-check for 'isoMass')
rownames(pepTab) <- paste0(pepTab[,"seqNa"], ".", pepTab[,"modSpec"], sep="")
pepTab <- rbind(pepTab, pepTa3)
chInd <- duplicated(pepTab[,"no"])
if(any(chInd)) message(fxNa," BEWARE ! Some index numbers not unique !!")
chIso <- duplicated(pepTab[,"mass"], fromLast=FALSE)
if(any(chIso)) {
chIs2 <- duplicated(pepTab[,"mass"], fromLast=TRUE)
pepTab[which(chIso | chIs2),"ambig"] <- "isoMass" }
## why does this add a duplicted column named "seqNa" -> remove
if(colnames(pepTab)[ncol(pepTab)]=="seqNa") pepTab <- pepTab[,-ncol(pepTab)] # varModif : remove redundant column 'seqNa'
} } } }
list(pepTab=pepTab, abc=abc)}
#' @export
.multMatByColNa <- function(mat,sumByRow=TRUE,...) {
## multiply values of 'mat' by its colnames (numeric equivalent to conv01toColNa() which repates concatenated text)
out <- matrix(as.numeric(mat)*as.numeric(rep(colnames(mat), each=nrow(mat))), nrow=nrow(mat))
if(sumByRow) {out <- rowSums(out); names(out) <- rownames(mat)} else names(out) <- rownames(mat)
out }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/addMassModif.R
|
#' Check & complete mixed of variable and fixed modifications
#'
#' Check & complete settings for mixed of variable and fixed modifications.
#' The final format is a list with $basMod, $varMod and $varMo2
#'
#' @param modTy (character) list of modification types to be considered
#' @param knownMods (character) optonal custom list of known modifications, default from \code{AAfragSettings(outTy="all")$knownMods}
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return corrected list of mixed of variable and fixed modifications ($basMod, $varMod and $varMo2)
#' @seealso \code{\link{AAfragSettings}}
#' @examples
#' modTy1 <- list(basMod=c("b","y","h"),varMod=c("p","o","q"))
#' checkModTy(modTy1)
#' @export
checkModTy <- function(modTy,knownMods=NULL,silent=TRUE,callFrom=NULL){
## check & complete
fxNa <- wrMisc::.composeCallName(callFrom, newNa="checkModTy")
chMod <- which(names(modTy) %in% c("basMod","varMod","varMo2") & !(sapply(modTy, function(x) is.null(x) | identical(x,""))))
if(length(chMod) <1) stop(" Problem with 'modTy' : either incorrect names or empty !")
# check for repeated
chRep <- lapply(modTy, duplicated)
if(any(unlist(chRep))) for(i in which(sapply(chRep,sum) >0)) {modTy[[i]] <- unique(modTy[[i]])
if(!silent) message(fxNa," correcting duplicated modification-terms to ",wrMisc::pasteC(chRep[[i]]))}
## check for unknown labels
chMod <- lapply(modTy,function(x) x %in% unlist(knownMods))
if(any(unlist(chMod))) for(i in which(sapply(chMod, function(x) sum(!x)) >0)) {
if(!silent) message(fxNa," removing unknown modification-labels ", wrMisc::pasteC(modTy[[i]][which(!chMod[[i]])], quoteC="'"))
modTy[[i]] <- modTy[[i]][which(chMod[[i]])]}
if(all(c("varMod","varMo2") %in% names(modTy) ==c(TRUE,FALSE))) { # if modTy$varMo2 missing -> create new ...
modTy$varMo2 <- modTy$varMod # modTy$varMo2 for variable modifs really counted (ie wo 'q' since same mass as without 'p')
chMod <- modTy$varMo2 %in% "q"
if(!silent) message(fxNa," adding $varMo2 to 'modTy'")
if(any(chMod) & !"p" %in% modTy$basMod) modTy$varMo2 <- modTy$varMo2[which(!chMod)] }
modTy }
#' @export
.checkModTy <- function(modTy,knownMods,phoDePho=c("p","q"),modTyGr=c("basMod","varMod"),silent=FALSE,callFrom=NULL) {
## checking of 'modTy'
## return verified/corrected 'modTy'
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".checkModTy")
chModFx <- function(mod, possMod=knownMods) {
ch1 <- which(mod %in% unlist(possMod))
if(length(ch1) >0) mod[ch1] else "" }
if(length(unlist(modTy)) >0) { # if fragmentation/modification types given, check for known entries
modTyIni <- modTy
if(is.list(modTy)) if(any(modTyGr %in% names(modTy))) { # clean modTy to known modifications only
modTy <- list(c(),c())
names(modTy) <- modTyGr
if(modTyGr[1] %in% names(modTyIni)) {
modTy[[1]] <- chModFx(modTyIni[[modTyGr[1]]])
if(identical(phoDePho %in% modTy[[1]], c(FALSE,TRUE))) {
if(!silent) message(callFrom,"de-phosphorylation without phosphorylation not realistic -> omit")
modTy[[1]] <- modTy[[1]][which(!modTy[[1]] %in% phoDePho[2])] } }
if(modTyGr[2] %in% names(modTyIni)) {
modTy[[2]] <- chModFx(modTyIni[[modTyGr[2]]])
## include de-phosho when phospho in $varMod
if(identical(phoDePho %in% modTy[[2]], c(TRUE,FALSE))) {
if(!silent) message(callFrom,"add de-phosphorylation to optional modifications")
modTy[[2]] <- c(modTy[[2]],phoDePho[2])}
if(identical(phoDePho %in% modTy[[2]], c(FALSE,TRUE))) {
if(!silent) message(callFrom,"add phosphorylation to optional modifications (since de-phospho found)")
modTy[[2]] <- c(phoDePho[1],modTy[[2]])}
modTy$varMo2 <- if(phoDePho[2] %in% modTy[[2]]) modTy[[2]][which(!modTy[[2]] ==phoDePho[2])] else modTy[[2]] #variant: wo de-phospho for single modif
}
} else {modTy <- list(""); names(modTy) <- modTyGr[1];
if(!silent) message(callFrom,"no fragmentation/modification types recognized, calculate as unmodified 'pep'")}}
modTy }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/checkModTy.R
|
#' Full combinatorial and cumulative values
#'
#' Use for all preparing all combinations of non-compulsatory, ie variable, mass modifications
#' Variable modifications may or may not be present. Thus, for a given amino-acid with a variable modification two versions of the molecular weight need to be considered.
#' Most (variable) modifications are linked to a type of amino acid, like serine-residues for phosphorlylation.
#' Thus in this case, each instance of the amino acid in question may or may not be modified.
#' So, for example if there are 2 serines, 0, 1 or 2 phosphorylation modifications may be present.
#' For this reason the is the argument \code{nMax} to stay within biologically relevant ranges (external knowledge) and reduce complexity significantly.
#' Some modifications are exclusive to others, argument \code{notSingle} : An (artificially occuring) de-phosphorylation event during fragmentation can only happen if the amino acid was already phosphorylated in the first place.
#'
#'
#' @param nMax (integer or data.frame with 1 line) maximum number of modifications
#' @param modVal (numeric, has to have names !) the change of molecular mass introduced by given modifications (as specified by the name of the value)
#' @param notSingle (character) names of 'modVal' where 1st element of 'notSingle' cannot happen/appear if 2nd element not present (eg de-phospho/phosphorylation)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return named (concatenated names of modVal) numeric vector
#' @seealso \code{\link[wrMisc]{convToNum}}
#' @examples
#' ## to follow easily the results, hypothetical mass-modification values were chosen
#' mo1 <- c(a=10, b=1, c=0.1, d=0.01); nMa1 <- c(1,2,0,3)
#' combinateAllAndSum(nMa1, mo1)
#' ## # like 'b' for phospho & 'd' for de-phospho (which can't happen without phospho event)
#' combinateAllAndSum(nMa1, mo1, notSingle=c("d","b"))
#' @export
combinateAllAndSum <- function(nMax, modVal, notSingle=NULL, silent=TRUE, callFrom=NULL){
## full combinatorial and cumulative values : all values of 'modVal' up to 'nMax' times in all combinations (and summed at end)
## use for all combinations of non-compulsatory mass modifications
## return named (concatenated names of modVal) numeric vector
fxNa <- wrMisc::.composeCallName(callFrom, newNa="combinateAllAndSum")
msg <- "'modVal' should be numeric, 'nMax' integer & of same length"
if(is.data.frame(nMax)) if(ncol(nMax) ==length(modVal)) {
nMaNa <- colnames(nMax)
nMax <- as.integer(nMax)
names(nMax) <- nMaNa }
if(!all(length(nMax) >= length(modVal), is.numeric(nMax), is.numeric(modVal))) stop(msg)
if(length(nMax) > length(modVal)) {nMax <- nMax[which(nMax >0)]; if(length(nMax) != length(modVal)) stop(msg)} # adjust if some nMax==1
if(any(nMax <1)) {modVal <- modVal[which(nMax >0)]; nMax <- nMax[which(nMax >0)] }
if(length(modVal) <1) return(0)
if(sum(nMax >0) >1) {
a3a <- wrMisc::combinatIntTable(nMax, include0=TRUE, asList=FALSE, silent=TRUE, callFrom=fxNa)
if(length(notSingle)==2) {
if(is.character(notSingle)) notSingle <- wrMisc::naOmit(match(notSingle, names(modVal)))
if(!silent) message(fxNa," prohibit more occurances of '",names(modVal)[notSingle[1]],"' than '",names(modVal)[notSingle[2]],"'")
resCo <- which(a3a[,,notSingle[1]] > a3a[,,notSingle[2]])
if(length(resCo) >0) a3a[,,notSingle[1]][resCo] <- 0 # set to 0 (ie combination will become redundant, remove later)
}
useNa <- apply(a3a, c(1,2), function(x) paste(rep(names(modVal), x), collapse="")) # for names
out <- array(rep(modVal, each=prod(dim(a3a)[1:2])), dim=dim(a3a))*a3a # still array
out <- as.numeric(apply(out,c(1,2), sum))
names(out) <- useNa
out <- out[wrMisc::firstOfRepeated(names(out))$indUniq] # problem when 2 modifs with same mass change : filtering for unique value/number -> need to work as unique names !
## note: BUT working by names() will allow different type mass changes of same numeric value (if same AA concerned this can't be resolved at this place !)
} else { out <- modVal[which(nMax >0)]*(0:max(nMax))
names(out) <- sapply(0:max(nMax), function(x) paste(rep(names(modVal)[which(nMax >0)],x),collapse=""))
}
out }
#' @export
.parCombinateAllAndSum <- function(uniqCo,massModV,nProc=NULL,firstOfRepeated=NULL,parRegDefault=TRUE,silent=FALSE,callFrom=NULL){
## version for multi-processor execution
fxNa <- wrMisc::.composeCallName(callFrom, newNa=".parCombinateAllAndSum")
chPa <- requireNamespace("BiocParallel", quietly=TRUE)
if(!chPa) stop(fxNa,": package 'BiocParallel' not installed, please install from Bioconductor")
maxNProc <- 12
nCoresAvail <- parallel::detectCores()
nProcs <- if(is.null(nProc)) round(nCoresAvail*0.8) else min(nProc, nCoresAvail) # all cores out of 2, n-1 when 4&6, n-2 at 10&12
nProcs <- as.integer(nProcs)
isWin <- length(grep("ming.32", R.Version()$platform)) > 0
out <- NULL
## need to cut lines into named list for running bplapply() (instead of apply() over initial lines of uniqCo)
uniqCoL <- by(uniqCo, 1:nrow(uniqCo), function(x) {names(x) <- colnames(uniqCo); x}) # cut in lines while keeping colnames
##
fxPar <- function(x,massModV,combinateAllAndSum,fxNa,silent) {
## functions from outside (like combinateAllAndSum()) need to get 'imported' explicitetly
combinateAllAndSum(x, massModV, notSingle=c("q","p"), callFrom=fxNa, silent=silent) }
if(!silent) message(fxNa," ready to configure/launch as ",nProcs," processors (nProcs) ")
oldOp <- options() # for restoring from backup
on.exit(options(oldOp))
##based on comment from MMorgan remove quote within options
options(MulticoreParam=BiocParallel::MulticoreParam(workers=nProcs))
out <- BiocParallel::bplapply(uniqCoL, fxPar, massModV=massModV, combinateAllAndSum=combinateAllAndSum, fxNa=fxNa, silent=silent)
BiocParallel::bpstop()
out }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/combinateAllAndSum.R
|
#' Identify Children/Parent settings as a+b=c
#'
#' This functions helps identifying fragments ('parent') characterized by a start- and end-position, that got split into 2 'children' fragments.
#' So, each one of the new 'children' conserves either the start- or end-site of the parent and the the remaining ends are on consecutive positions.
#' For example if the sequence 'BCDEFG' (parent) gets split into 'BCD' (positions 1-3) and 'EFG' (positions 4-6),
#' this will be identified as a children/parent 'family' which could be represented as 'a+b=c' case.
#' Note : At this point only settings with 2 children are considered, for more complex scenarions one may build trees using \code{\link[wrMisc]{buildTree}} (however, this function does not identify 'parents').
#' In proteomics-applications some start- and end-sites may occur multiple times, representing eg unmodified and modified versions of the same basal peptide-sequence.
#' Such duplicated start- and end-cases are handeled as allowed, a 'child' (characterized by its start- and end-position) may occur multiple times, and the
#' corresponding redundant rownames (eg peptide sequence like 'BCD') will be conserved. However, information reflecting eg different peptide modifications must be stored separately.
#' If redudant start- and end-sites accur with different row-names, repeated start- and end-sites will display \code{NA}.
#'
#' @param fragments (matrix or data.frame) integer values in 1st column, for start site of fragment, and in 2nd column as end-sites of fragments, rownames as IDs
#' @param output (character) choose simply returning results as counts or as list with \code{$counts} and \code{$detailIndex} (list with details showing each child1,child2 & parent)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allows easier tracking of message(s) produced
#' @return either numeric vector with cumulated counts (corresponding to rows of \code{fragments}) or list with $count and $detailIndex (list with indexes refering to non-redundant entries of all a+b=c settings identified)
#' @seealso \code{\link[wrMisc]{simpleFragFig}} for graphical representation,\code{\link{countSameStartEnd}}; for building longer consecutive trees (without identification of 'parent') \code{\link[wrMisc]{buildTree}} and \code{\link{contribToContigPerFrag}}
#' @examples
#' frag3 <- cbind(beg=c(4,2,3,7,13,13,15, 2,9,2,9), end=c(14,6,12,8,18,20,20, 8,12,12,18))
#' rownames(frag3) <- c("K","A","E","B","C","D","F", "H","G","I","J")
#' countChildrenParent(frag3)
#' ## example with duplicate start- and end-position positions
#' frag3c <- cbind(beg=c(4,2,3,7, 7,13, 13,13,15, 2,9,2,9,9),
#' end=c(14,6,12,8, 8,18, 18,20,20, 8,12,12,12,18))
#' rownames(frag3c) <- c("K","A","E", "B","B", "C","C","D","F", "H","G","I","G","J")
#' countChildrenParent(frag3c, out="det")
#'
#' @export
countChildrenParent <- function(fragments,output="count",silent=FALSE,callFrom=NULL) {
fxNa <- wrMisc::.composeCallName(callFrom, newNa="countChildrenParent")
msg <- "expecting matrix (or data.frame) with 2 columns with integer values for start end end-sites !"
if(length(fragments) <1 | length(dim(fragments)) <2) stop(msg)
if(is.data.frame(fragments)) fragments <- as.matrix(fragments)
if(is.null(rownames(fragments))) rownames(fragments) <- 1:nrow(fragments)
## need to remove duplicate entries
chDu <- duplicated(paste0(fragments[,1],fragments[,2]), fromLast=FALSE)
iniP <- rownames(fragments)
if(any(chDu)) {
fragments <- fragments[which(!chDu),]
if(!silent) message(fxNa, "remove ",sum(chDu)," duplicate entries") }
if(!is.numeric(fragments)) { fragments <- matrix(wrMisc::convToNum(fragments,spaceRemove=TRUE,convert=NULL,
callFrom=fxNa, silent=silent), ncol=ncol(fragments), dimnames=dimnames(fragments))}
if(!is.numeric(fragments)) stop(msg)
if(any(is.na(fragments))) { fragments <- fragments[which(rowSums(is.na(fragments)) >0),]
if(!silent) message(fxNa," expecting 'fragments' without NAs ! (remove lines)")
if(length(dim(fragments)) <2) fragments <- matrix(fragments, ncol=2)}
out <- rep(0,nrow(fragments))
names(out) <- rownames(fragments)
## strategy : parent was broken into 2 children, thus one child must have another continuing child (fragment)
## parents must duplicate either start or end-site of children, use to pre-filter data
## filter for duplicated start sites (later among those passing filter for continuing children/fragments)
chDuSt1 <- duplicated(fragments[,1], fromLast=TRUE)
chDuSt2 <- duplicated(fragments[,1], fromLast=FALSE)
uniSt <- !chDuSt1 & !chDuSt2
names(uniSt) <- rownames(fragments)
chCont3 <- NULL
if(any(!uniSt)) {
## parents do exist, now search for 'continuing other child'
st <- which(!uniSt)
if(length(st) >length(unique(st))) message(fxNa," (potential) problem with repeatedly found start fragm !")
## search for continuing children/fragments (ie after end)
chCont2 <- lapply(st, function(x) {y <- x; names(y) <- rownames(fragments)[x]; c(y, which(fragments[x,2] +1 ==fragments[,1]))})
names(chCont2) <- st
chLe <- sapply(chCont2,length) <2
if(any(chLe)) chCont2 <- chCont2[which(!chLe)]
## search for parent fragment
if(any(!chLe)) {chCont3 <- lapply(chCont2, function(x) wrMisc::naOmit(as.integer(c(x[1:2],
sapply(x[-1], function(y) which(fragments[x[1],1] ==fragments[,1] & fragments[y,2] ==fragments[,2]))))))
chL2 <- sapply(chCont3,length) <3
chCont3 <- if(any(!chL2)) chCont3[which(!chL2)] else NULL
## add names (if full output has been chosen)
if(length(chCont3) >0 & !identical(output,"count")) for(i in 1:length(chCont3)) names(chCont3[[i]]) <- rownames(fragments)[chCont3[[i]]]
}
}
if(length(chCont3) >0) {
tab <- table(unlist(chCont3))
out[as.integer(names(tab))] <- tab
}
## need to reintroduce removed duplicates
if(any(chDu)) {
tmp <- rep(NA,length(iniP))
names(tmp) <- iniP
out <- wrMisc::getValuesByUnique(tmp, out, silent=silent, callFrom=fxNa)
}
if(identical(output,"count")) out else list(count=out, detailIndex=chCont3) }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/countChildrenParent.R
|
#' Make table with counts of potential modification sites
#'
#' Makes table 'cou' with counts of (potential) modification sites based on column 'seq' in matrix 'pepTab'.
#' Note: if multiple N-or C-term modifs, then only the first is shown in resulting table 'cou'.
#'
#' @param pepTab (matrix) peptide sequences, start and end sites, typically result from \code{\link{makeFragments}}
#' @param modTy (list) modifications : $basMod for character vector of fixed modifications and $varMod for variable modifications. For one letter-code see AAfragSettings("modChem")
#' @param maxMod (integer) maximal number variable modifications will be considered in given fragment (may increase complexity and RAM consumption)
#' @param specAAMod (list) optional custom list showing which AA to be considered with which (one-letter) modification code (default \code{\link{AAfragSettings}})
#' @param knownMods (list) optional custom list showing which modification appears at what type of location, eg N-terminal, internal ... (default \code{\link{AAfragSettings}})
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @param debug (logical) for bug-tracking: more/enhanced messages and intermediate objects written in global name-space
#' @return list of matrixes $cou and $combTerm, with number of modifications per peptides (line in 'pepTab') for basMod, varMod & varMo2
#' @seealso \code{\link{AAfragSettings}}, \code{\link{makeFragments}}
#' @examples
#' protP2 <- c(mesp="MESPEPTIDES", pepe="PEPEPEP")
#' pepTab1 <- makeFragments(protTab=protP2, minFra=6, internFr=TRUE, massTy="mono")
#' cou1 <- countPotModifAAs(pepTab=pepTab1, modTy=list(basMod=c("b","y"),
#' varMod=c("p","h")), debug=FALSE)
#' modTy2 <- list(basMod=c("b","y","h"), varMod=c("x","p","o","q","e","j"))
#' cou2 <- countPotModifAAs(pepTab=pepTab1, modTy=modTy2)
#' @export
countPotModifAAs <- function(pepTab,modTy,maxMod=c(p=3,h=1,k=1,o=1,m=1,n=1,u=1,r=1,s=1),specAAMod=NULL,knownMods=NULL,silent=FALSE,callFrom=NULL,debug=FALSE){
## make table 'cou' with count of modifications based on column 'seq' in matrix 'pepTab'
## return list of matrixes with number of modifications per peptide (line in 'pepTab') for basMod, varMod & varMo2
## note: if multiple N-or C-term modifs, then only the first is shown in resulting table 'cou'
fxNa <- wrMisc::.composeCallName(callFrom, newNa="countPotModifAAs")
if(nrow(pepTab) <1) return(list(cou=NULL, combTerm=NULL)) else {
restrMod <- c("basMod","varMod")
if(is.null(specAAMod)) specAAMod <- AAfragSettings(outTy="all")$specAAMod
if(is.null(knownMods)) knownMods <- AAfragSettings(outTy="all")$knownMods
if(is.null(maxMod)) maxMod <- c(p=3,h=1,k=1,o=1,m=1,n=1,u=1,r=1,s=1)
## table for converting names of fragment types:
useKnoMo <- cbind(c("Nterm","Cterm","NCterm","intern","any"), c("Nter","Cter","NCter","inter","any"))
modTy <- checkModTy(modTy, knownMods=useKnoMo[1:2,1], silent=silent, callFrom=fxNa)
useModT <- which(names(modTy) %in% restrMod & sapply(modTy, function(x) {if(length(x) >0) any(nchar(x) >0) else FALSE}))
if(debug) {message(" .. xxcountPotModifAAs0\n")}
pepTSup <- cbind(protIndex=as.integer(as.factor(pepTab[,"origNa"])), isTerm=pepTab[,"ty"] %in% c("Nter","Cter","full"))
## multiple obligat modif (basMod) may be exclusive (can't be on same fragm) : eg multiple Cterm modifications :
## select single pair (and add results adjusted by additive factor at end)
useGr <- as.list(useKnoMo[,2])
names(useGr) <- useKnoMo[,1]
termMod <- lapply(modTy[useModT], function(x) sapply(knownMods[useKnoMo[,1]], function(y) y %in% x)) # any terminal or internal
chMultNC <- sapply(lapply(modTy[useModT], function(x) sapply(knownMods[useKnoMo[1:4,1]], function(y) y %in% x)),sapply,sum) #which known (exclusive) N or C-term modifs present in modTy
if(any(unlist(termMod[[1]]))) { # terminal modifs exist ..
combTerm <- sapply(termMod[[1]], sum, na.rm=TRUE)
combTerm <- matrix(0+combTerm, nrow=1, dimnames=list(NULL,names(combTerm))) # matrix, 2nd and later lines: which modification(pairs) need to be done later !!
} else combTerm <- matrix(nrow=0, ncol=2, dimnames=list(NULL,c("Nterm","Cterm")))
## reduce/adjust init testing so that no mutually exclusive modifs remain present
if(any(chMultNC[c("Nterm","Cterm"),] >1)) {
modTy <- lapply(modTy,function(x) x[c(which(x %in% knownMods[["Nterm"]])[1],which(x %in% knownMods[["Cterm"]])[1],
which(x %in% unlist(knownMods[-1*match(c("Nterm","Cterm","intern"),names(knownMods))] ))) ] )
if(!silent) message(" ",fxNa," avoid exclusive modifications : adjusting modTy$basMod modifications to ",modTy$basMod," ")
}
if(debug) {message(" .. xxcountPotModifAAs1 \n")}
## make table 'cou' with count of modifications : count no of AA for dependent modifs for basMod/varMo2 (varMo2 wo dephospho -> use to create varMod later)
## NEW CHANGES 2oct19: add col with protein-index !
## 1st step : 'protIndex', 'isTerm' terminal info in pepTSup
## 2nd : make non-redund (?, need index of orig ?)
## how to integrate shared between mult prot ??? (accompagnig list -of same length-with prot indexes ?
## make pep seq unique within prot ? (if no terminal/internal mixing ?)
## .. before counting aa spec events per/peptide
cou <- lapply(modTy[useModT],function(x) if(length(x)>0) .countModif(pepTab[,"seq"], modTyp=x, specAAMod, knownMods=knownMods)) # count $basMod (ie wo $varMod)
if(debug) {message(" .. xxcountPotModifAAs2\n")} #,chMod=chMod
## consider max number of optional modifications : (eg max phospho & max de-pho )
if(length(maxMod) >0) {
for(i in names(cou)) {
## complete cou : search and modify which parts contain terminal modif (not yet integrated to cou)
chTerm <- colnames(cou[[i]]) %in% c(knownMods$NCterm)
## mark internal
chInt <- pepTab[,"ty"]=="inter"
chMoTy <- colnames(cou[[i]]) %in% c(knownMods$intern)
if(any(chInt) & any(chMoTy)) {
cou[[i]][which(chInt),which(chMoTy)] <- 1 }
## mark various variants of terminal
if(any(chTerm)) for(k in which(chTerm)) {
ch2 <- pepTSup[,"isTerm"] >0
if(any(ch2)) cou[[i]][which(ch2),k] <- 1 }
chTerm <- colnames(cou[[i]]) %in% c(knownMods$Nterm)
if(any(chTerm)) for(k in which(chTerm)) {
ch2 <- pepTab[,"ty"] =="Nter"
if(any(ch2)) cou[[i]][which(ch2),k] <- 1 }
chTerm <- colnames(cou[[i]]) %in% c(knownMods$Cterm)
if(any(chTerm)) for(k in which(chTerm)) {
ch2 <- pepTab[,"ty"] =="Cter"
if(any(ch2)) cou[[i]][which(ch2),k] <- 1 }
chTerm <- colnames(cou[[i]]) %in% c(knownMods$spcNterm)
if(any(chTerm)) for(k in which(chTerm)) { # spcNterm : set non-terminal to 0
ch2 <- pepTab[,"ty"] !="Nter"
if(any(ch2)) cou[[i]][which(ch2),k] <- 0 }
chTerm <- colnames(cou[[i]]) %in% c(knownMods$spcCterm)
if(any(chTerm)) for(k in which(chTerm)) { # spcCterm; set non-terminal to 0
ch2 <- pepTab[,"ty"] !="Cter"
if(any(ch2)) cou[[i]][which(ch2),k] <- 0 }
## complete cou : correct maxMod
chMaxM <- colnames(cou[[i]]) %in% names(maxMod) # see if any defind type of modif with max number (of modif) to consider present
if(any(chMaxM)) for(k in which(chMaxM)) { # loop along modifs
chLi <- cou[[i]][,k] > maxMod[colnames(cou[[i]])[k]]
if(any(chLi)) cou[[i]][which(chLi),k] <- maxMod[colnames(cou[[i]])[k]]
}}}
if(debug) {message(" .. xxcountPotModifAAs3\n")} #
if(any(sapply(termMod[[1]][c("Nterm","Cterm","NCterm")], sum) >1)) message(" ",fxNa," NOTE : MULTIPLE terminal modifications for ","(finish by new fx)") # make function to give row&colnames of elements >thrsh
## so far the speaciall AA-linked modifs are counted
## complete cou : search and modify which parts contain terminal modif (not yet integrated to cou) ; limit to fixed modif ?
if(debug) {message(" .. xxcountPotModifAAs3b \n")}
## mutually excluding modifs
whMod <- sapply(modTy,function(x) any(nchar(x))) >0
if(any(whMod)) whMod <- names(modTy)[which(whMod)[1]] else message(fxNa,"don't understad which group of modifications to treat !?!")
## make 'varMo2' (with de-phspho) if given in 'modTy$varMod'
if(all(c("p","q") %in% modTy$varMod,"p" %in% colnames(cou$varMod))) { # for not running de-phospho alone ! (use cou$varMo2 ONLY for combinations)
cou$varMo2 <- cbind(cou$varMod, q=cou$varMod[,"p"])}
## making $varMo2 (including potential de-phospho) is useful but increases memory charge ! avoid !?!
if(debug) {message(" .. xxcountPotModifAAs4 \n")}
list(cou=cou,combTerm=combTerm) }}
#' @export
.countModif <- function(sequ,modTyp,specAAMod,knownMods,detailedCount=FALSE){
## count for all protein 'sequ' the occurance of modification types defined in list 'modTyp' (only if in names(specAAMod))
## 'modTyp'.. character vector
## 'specAAMod','knownMods' for matching modification type to single letter AA code
## 'detailedCount' .. if TRUE return list of matrixes with counts per 'sequ' (rows) & all elements (AA) of each modTyp
## (otherwise) default return matrix with 'sequ' (rows) and sum of counts per 'modTyp' (cols)
fxNa <- ".countModif : "
if(is.list(modTyp)) {
if(is.list(modTyp[[1]])) message(fxNa," argument 'modTyp' not designed as list of lists (will loose/append layers)")
modTyp <- unlist(modTyp) }
modTyp <- modTyp[which(modTyp %in% unlist(knownMods))]
msg <- " 'modTyp' seems empty -nothing to count !"
if(is.null(modTyp)) stop(fxNa,msg) else {if(all(modTyp== "")) stop(fxNa,msg)}
chModTy <- modTyp %in% names(specAAMod)
exclTy <- if(any(!chModTy)) modTyp[which(!chModTy)] else NULL
addBl <- FALSE
if(all(!chModTy) | length(sequ) <1) count <- matrix(0,nrow=length(sequ), ncol=length(chModTy), dimnames=list(sequ, modTyp)) else {
modTyp <- modTyp[which(chModTy)]
chM <- lapply(modTyp,function(x) {z <- specAAMod[[which(names(specAAMod) %in% x)]]; if(length(z) >0) z else NULL}) # AA-letters to consider for each modTyp
names(chM) <- modTyp
chM <- chM[which(sapply(chM,length) >0)]
if(length(sequ) <2) { sequ <- c(sequ,"")}
if(sequ[length(sequ)]=="" & length(sequ)>1) addBl <- TRUE # remove last sequence (since empty)
if(all(sapply(chM,length)==0)) return(matrix(0, nrow=length(sequ)-addBl, ncol=length(modTyp), dimnames=list(sequ[1:(length(sequ)-addBl)],modTyp)))
chN <- unlist(chM)
names(chN) <- rep(names(chN), sapply(chN,length))
count <- sapply(chN,function(x) .countLET(sequ,x,silent=TRUE))
if(!detailedCount) { tmp <- matrix(nrow=length(sequ), ncol=length(chM), dimnames=list(sequ,names(chM)))
preCol <- 0
for(i in 1:length(chM)) {
tmp[,i] <- if(length(chM[[i]]) <2) count[,preCol+1] else rowSums(count[,preCol+(1:length(chM[[i]]))])
preCol <- preCol +length(chM[[i]])}
count <- if(length(exclTy) >0) cbind(matrix(0, nrow=length(sequ), ncol=length(exclTy), dimnames=list(sequ,exclTy)),tmp) else tmp
} }
if(addBl) count <- matrix(count[1,], nrow=1, dimnames=list(sequ[1],colnames(count)))
count }
#' @export
.countLET <- function(sequ,countCh="K",silent=FALSE){
## return numeric vector of counts for 'countCh' (single element !) in each element of 'seq'
if(length(countCh) >1) {countCh <- countCh[1]
if(!silent) message(" .countLET : trim argument 'countCh' to length 1 !")}
x <- grep(countCh,sequ)
out <- rep(0,length(sequ))
if(length(x) >0) out[x] <- nchar(sequ[x]) - nchar(gsub(countCh,"",sequ[x]))
names(out) <- sequ
out }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/countPotModifAAs.R
|
#' Evaluate selected lines of pepTab (iso-mass) for preferential cutting sites
#'
#' Evaluate selected lines of pepTab (iso-mass) for preferential cutting sites. Such sites are taken by default from \code{.prefFragPattern()} simplified from a publication
#' by the Kelleher group (Haverland 2017, J Am Soc Mass Spectrom) or can be furnished by the user.
#' @param z (matrix) main input, must contain cols specified as seqCol and "no","tailAA","precAA"
#' @param prefFragPat (matrix) specifies preferential fragmentation (which combination of AA to consider cols cTer,nTer,score), default made by \code{.prefFragPattern()}
#' @param seqCol (character) column names for the column containing the sequence to search for preferential cutting sites
#' @param silent (logical) suppress messages
#' @param callFrom (character) allows easier tracking of message(s) produced
#' @return line ID-numbers (pepTab[,"no"]) for those below median score (ie to remove from pepTab) or NULL if nothing to remove due to preferential fragmentation
#' @seealso \code{\link{makeFragments}}
#' @examples
#' peTab <- matrix(c("9","13","14","15", "LPVIAGHEAAG","PVIAGHEAAGI","EKKPFSI","KKPFSIE",
#' "P","L","E","E", "I","V","E","E"),nr=4,dimnames=list(NULL,c("no","seq","precAA","tailAA")))
#' evalIsoFragm(peTab)
#' @export
evalIsoFragm <- function(z,prefFragPat=NULL,seqCol="seq",silent=FALSE,callFrom=NULL){
## evaluate selected lines of pepTab (iso-mass) for preferential cutting sites
## 'z' .. matrix, must contain cols specified as seqCol and "no","tailAA","precAA"
## 'prefFragPat' .. matrix specifying preferential fragmentation (which combination of AA to consider cols cTer,nTer,score)
## return line ID-numbers (pepTab[,"no"]) for those below median score (ie to remove from pepTab) or NULL if nothing to remove due to preferential fragmentation
fxNa <- wrMisc::.composeCallName(callFrom, newNa="evalIsoFragm")
if(is.null(prefFragPat)) prefFragPat <- .prefFragPattern()
z <- as.matrix(z)
nAA <- nchar(z[,seqCol])
chRep <- duplicated(nAA, fromLast=FALSE) | duplicated(nAA, fromLast=TRUE)
if(all(!chRep)) return(NULL) else {
if(any(!chRep)) {z <- z[which(chRep),]; nAA <- nchar(z[,seqCol])} # remove (unexpected case of) single instance of given AA length
chLe <- table(nAA)
if(length(chLe) >1) {
z <- unlist(by(z, nAA, .evalIsoFra, prefFragPat=prefFragPat, seqCol=seqCol))
} else .evalIsoFra(z, prefFragPat=prefFragPat, seqCol=seqCol)
}}
#' @export
.evalIsoFra <- function(x,prefFragPat=NULL,seqCol="seq") {
## evaluate selected lines of pepTab of SAME AA-length AND iso-mass for preferential cutting sites
## return line ID-numbers (pepTab[,"no"]) for those below median score (ie to remove from pepTab)
x <- as.matrix(x)
nAA <- nchar(x[,seqCol])
prefSi <- if(is.null(prefFragPat)) .prefFragPattern() else prefFragPat
sc <- rep(0,nrow(x))
out <- NULL
locSi <- paste0(x[,"precAA"],substr(x[,seqCol],1,1)) %in% paste0(prefSi[,1],prefSi[,2]) # N-terminal fragmentation sites
if(any(locSi)) sc[which(locSi)] <- prefSi[match(paste0(x[which(locSi),"precAA"], substr(x[which(locSi),seqCol],1,1)), paste0(prefSi[,1],prefSi[,2])),3]
nAA <- nchar(x[,seqCol])
locSi <- paste0(substr(x[,seqCol], nAA, nAA), x[,"tailAA"]) %in% paste0(prefSi[,1],prefSi[,2]) # C-terminal fragmentation sites
if(any(locSi)) sc[which(locSi)] <- sc[which(locSi)] + prefSi[match(paste0(substr(x[which(locSi), seqCol],nAA,nAA), x[which(locSi), "tailAA"]), paste0(prefSi[,1],prefSi[,2])), 3]
if(any(sc >0)) {med <- stats::median(sc, na.rm=TRUE) # evaluate scores for N & C-term
goodSc <- which(sc >= if(med ==0) 0.1 else med)
if(length(goodSc) < length(sc)) out <- as.integer(x[-1*goodSc,1])}
out }
#' @export
.prefFragPattern <- function() {
## return data.frame with pattern of perferential fragmentation sites x|y (1st & 2nd col) and score (3rd col)
## here a simplified version (elaborate see Kelleher group: Haverland 2017, J Am Soc Mass Spectrom)
AA <- wrProteo::AAmass()[1:20] # so far exclude ornithine O & selenocysteine U
pat <- data.frame(cTer=c(rep(c("D","E"), each=length(AA)), names(AA)[c(-4,-6)]), nTer=c(rep(names(AA),2), rep("P",length(AA)-2 )), score=0.5)
## include K, L, V ??
pat[which((pat[,1]=="D" | pat[,1]=="E") & pat[,2]=="P"),3] <- 1
pat }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/evalIsoFragm.R
|
#' Fragment protein or peptide sequence
#'
#' Makes internal/terminal fragments of a SINGLE peptide/protein input (as single letter amino-acid code) and returns list of all possible sequences ($full, $Nter, $Cter, $inter).
#'
#' @param sequ (character, length=1) sequence used for fragmenting, as as mono-aminoacid letter code (so that cuting will be perfomed between all the letters/characters)
#' @param minSize (integer) min number of AA residues for considering peptide fragments
#' @param maxSize (integer) max number of AA residues for considering peptide fragments
#' @param internFragments (logical) logical (return only terminal fragments if 'FALSE')
#' @param separTerm (logical) if 'TRUE', separate N-terminal, C-terminal and internal fragments in list
#' @param keepRedSeqs (logical) if 'FALSE' remove fragments with redundant content (but my be from different origin in 'sequ'); remove redundant so far only when no separation of Nterm/Cterm/intern as list
#' @param prefName (logical) alternative name for all fragments (default the sequence itself), avoid separators '.' and '-'
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return numeric vector with mass
#' @seealso \code{\link{makeFragments}}; \code{\link[wrProteo]{convAASeq2mass}}
#' @examples
#' fragmentSeq("ABCDE")
#' fragmentSeq("ABCDE", minSize=3, internFragments=FALSE)
#' fragmentSeq("ABCDE", minSize=3, internFragments=TRUE)
#'
#' ## Run multiple peptides/proteins
#' twoPep <- cbind(c("a","ABCABCA"), c("e","EFGEFGEF"))
#' apply(twoPep, 2, function(x) fragmentSeq(x[2], mi=3, kee=FALSE, sep=TRUE, pre=x[1]))
#'
#' ## Ubiquitin example
#' P0CG48 <- "MQIFVKTLTGKTITLEVEPSDTIENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVLRLRGG"
#' system.time( fra1 <- (fragmentSeq(P0CG48, mi=5, kee=FALSE))) # < 0.5 sec
#'
#' @export
fragmentSeq <- function(sequ,minSize=3,maxSize=300,internFragments=TRUE,separTerm=FALSE,keepRedSeqs=TRUE,prefName=NULL,silent=FALSE,callFrom=NULL){
## make internal/terminal fragments as list ($full, $Nter, $Cter, $inter) of SINGLE input sequence 'sequ' and return as list
## needs wrMisc::firstOfRepeated()
fxNa <- wrMisc::.composeCallName(callFrom,newNa="fragmentSeq")
tx <- c("argument '","minSize","' shoud be of length 1 (truncating !!)","sequ")
if(length(sequ) >1) {if(!silent) message(fxNa,tx[c(1,4,3)]); sequ <- sequ[1]}
if(length(minSize) <1) {minSize <- 3; if(silent) message(fxNa,"setting 'minSize' to default =3")}
maxSize <- c(minSize, maxSize)
minSize <- min(minSize, na.rm=TRUE)
maxSize <- max(maxSize, na.rm=TRUE)
if(is.null(prefName)) prefName <- if(length(unique(names(sequ)))==length(sequ)) names(sequ) else sequ
if(nchar(sequ) < minSize & !silent) {
message(fxNa," sequence given as 'sequ' already shorter than 'minSize'");return(NULL)}
cut1 <- .termPepCut(sequ, mi=minSize, ma=maxSize ,sepNC=TRUE, mainName=prefName)
## now cut1 may be list -> force to list !!
if(!is.list(cut1)) cut1 <- list(cut1)
## make internal fragments (run loop to reduce 'sequ' at both ends by 1 unit & re-run terminal fragments )
if(internFragments) { nCha <- nchar(sequ)
frTo <- cbind(from=2:floor(nCha/2), to=(nCha-1):ceiling(1+nCha/2))
frTo <- cbind(frTo,seqc=apply(frTo,1, function(x) substr(sequ,x[1],x[2])))
chLe <- nchar(frTo[,"seqc"]) <minSize
if(!all(chLe)) { if(any(chLe)) frTo <- frTo[which(!chLe),]
cut1$inter <- if(nrow(frTo) >1) {unlist(apply(frTo[,c(3,1)],1, function(x) .termPepCut(x[1], mi=minSize, ma=maxSize, indexOffs=as.numeric(x[2])-1, mainName=prefName,sepNC=FALSE)))
} else .termPepCut(frTo[1,3], mi=minSize, ma=maxSize, indexOffs=as.numeric(frTo[1,1])-1, mainName=prefName, sepNC=FALSE) }}
nFrag <- sum(sapply(cut1,length))
chRed <- unique(unlist(cut1))
if(nFrag > length(chRed) & !silent) message(fxNa,nFrag- length(chRed)," out of ",if(nFrag >10e3)c(" ~",signif(nFrag,4)) else nFrag," fragments not unique")
if(!keepRedSeqs) {
uniq <- wrMisc::naOmit(match(chRed,unique(cut1$Nter)))
if(length(cut1$Nter) >0) {
if(length(uniq) <length(cut1$Nter)) cut1$Nter <- cut1$Nter[uniq] # keep only non-redundant of Nter
if(length(uniq) >0) chRed <- chRed[-1*uniq] }
uniq <- wrMisc::naOmit(match(chRed, unique(cut1$Cter)))
if(length(chRed) >0 & length(cut1$Cter) >0) {
if(length(uniq) <length(cut1$Cter)) cut1$Cter <- cut1$Cter[uniq] # keep only non-redundant of Cter
if(length(uniq) >0) chRed <- chRed[-1*uniq] }
uniq <- wrMisc::naOmit(match(chRed, unique(cut1$inter)))
if(length(chRed) >0 & length(cut1$inter) >0) {
if(length(uniq) <length(cut1$inter)) cut1$inter <- cut1$inter[uniq] } # keep only non-redundant of inter
}
if(separTerm) cut1 else unlist(cut1) }
#' @export
.termPepCut <- function(pe,mi,ma=1000,se1=".",se2="-",mainName=NULL,sepNC=FALSE,indexOffs=NULL) {
## make named character vector of sequential terminal fragments
## 'pe' .. single (!) peptide (character vector, length=1)
## 'mi','ma' .. min/max fragment length, should be <= length(pe) (otherwise the full length of 'pe' ALWATYS returned !)
## 'se1', 'se2' .. separators for adding numbers to specify partial/fragment locations
## 'sepNC' .. if TRUE, separate fragments from both ends as $Nter & $Cter in list
## 'indexOffs' .. offset to add for custom numbering in names (numeric, length=1), ie '1' will already increase by +1
mi <- min(mi,nchar(pe)) # can't be shorter than 'pe'
if(nchar(pe) <ma) ma <- nchar(pe)
if(is.null(mainName)) mainName <- pe
indexOffs <- if(is.null(indexOffs)) 0 else as.numeric(indexOffs)
names(pe) <- paste(mainName,se1,indexOffs+1,se2,indexOffs+nchar(pe),sep="")
if(mi==nchar(pe) & ma==nchar(pe)) return(if(sepNC) list(full=pe) else pe)
x <- substring(pe,1,mi:min(ma,nchar(pe)-1)) # N-term part
chMa <- nchar(x) > ma
if(any(chMa)) x <- x[which(!chMa)]
y <- if(nchar(pe) > mi) substring(pe, (2:(nchar(pe)-mi+1)), nchar(pe)) else "" # C-term part
chMa <- nchar(y) > ma
if(any(chMa)) y <- y[which(!chMa)]
basInd <- list(xL=1, xU=mi:min(nchar(pe)-1, ma), yL=max(nchar(pe)-ma+1,2):(nchar(pe)-mi+1), yU=nchar(pe))
indexNa <- if(indexOffs==0) basInd else lapply(basInd, function(x) x +indexOffs[1])
names(x) <- paste(mainName,se1,indexNa[[1]],se2,indexNa[[2]],sep="")
if(identical(y,"")) {if(sepNC) list(Nter=x) else x} else {
names(y) <- if(identical(y,"")) "" else paste(mainName,se1,indexNa[[3]],se2,indexNa[[4]],sep="")
fu <- if(nchar(pe) > ma) NULL else pe
if(length(fu)>0) names(fu) <- paste(mainName,se1,1+indexOffs,se2,nchar(fu)+indexOffs,sep="")
if(sepNC) list(full=fu, Nter=x, Cter=y) else c(fu, x, y)}
}
#' @export
.CtermPepCut <- function(pe,mi,se1=".",se2="-",mainName=NULL,indexOffs=NULL) {
## make named character vector of sequential terminal fragments
## 'pe' .. single peptide (character vector, length=1)
## 'mi' .. min fragment length
## 'se1', 'se2' .. separators for adding numbers to specify partial/fragment locations
## 'indexOffs' .. offset for custom numbering in names (numeric, length=1)
mi <- min(mi,nchar(pe)) # can't be shorter than 'pe'
y <- if(nchar(pe) > mi) substring(pe,(2:(nchar(pe)-mi+1)), nchar(pe)) else "" # C-term part
namX <- paste(pe,se1,"1",se2,mi:nchar(pe),sep="")
if(is.null(mainName)) mainName <- if(length(names(pe)) >0) names(pe)[1] else pe
basInd <- list(xL=1, xU=mi:nchar(pe), yL=2:(nchar(pe) -mi +1), yU=nchar(pe))
indexNa <- if(is.null(indexOffs)) basInd else lapply(basInd, function(x) x +indexOffs[1])
names(y) <- if(identical(y,"")) "" else paste(mainName,se1,indexNa[[3]],se2,indexNa[[4]],sep="")
y }
#
#' @export
.NtermPepCut <- function(pe,mi,se1=".",se2="-",mainName=NULL,sepNC=FALSE,indexOffs=NULL) {
## make named character vector of sequential terminal fragments
## 'pe' .. single peptide (character vector, length=1)
## 'mi' .. min fragment length
## 'se1', 'se2' .. separators for adding numbers to specify partial/fragment locations
## 'sepNC' .. if TRUE, separate fragments from both ends as $Nter & $Cter in list
## 'indexOffs' .. offset for custom numbering in names (numeric, length=1)
## bugfix : won't return full lenfth query any more
mi <- min(mi, nchar(pe)) # can't be shorter than 'pe'
x <- substring(pe, 1, mi:(nchar(pe) -1)) # N-term part
namX <- paste(pe,se1,"1",se2,mi:(nchar(pe) -1), sep="")
if(is.null(mainName)) mainName <- pe
basInd <- list(xL=1, xU=mi:(nchar(pe)-1), yL=2:(nchar(pe)-mi), yU=nchar(pe))
indexNa <- if(is.null(indexOffs)) basInd else lapply(basInd, function(x) x+indexOffs[1])
names(x) <- paste(mainName,se1,indexNa[[1]],se2,indexNa[[2]],sep="")
x }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/fragmentSeq.R
|
#' Identify Fixed Modifications
#'
#' Identify peptide/protein fragment based on experimental m/z values 'expMass' for given range of aa-length.
#' Internally all possible fragments will be predicted and their mass compared to the experimental values (argument \code{expMass}).
#'
#' @param prot (character) amino-acid sequene of peptide or protein
#' @param expMass (numeric) erperimental masses to identify peptides from
#' @param minFragSize (integer) min number of AA residues for considering peptide fragments
#' @param maxFragSize (integer) max number of AA residues for considering peptide fragments
#' @param indexStart (integer) for starting at correct index (if not 1)
#' @param suplPepTab (matrix) additional peptides to be add to theoretical peptides
#' @param internFra (logical) decide whether internal fragments should be cosiered
#' @param filtChargeCatch (logical) by default removing of all fragments not containing a (polar) charge-cathing residue
#' @param maxMod (integer) maximum number of residue modifications to be consiered in fragments (values >1 will increase complexity and RAM consumption)
#' @param modTy (character) type of fixed and variable modifications
#' @param specModif (list) supplemental custom fixed or variable modifications (eg Zn++ at given residue)
#' @param knownMods (character) optional custom alternative to \code{AAfragSettings(ou="all")$knownMods}
#' @param identMeas (character) default 'ppm'
#' @param limitIdent (character) thershold for identification in 'identMeas' units
#' @param filtAmbiguous (logical) allows filtering/removing ambiguous results (ie same mass peptides)
#' @param recalibrate (logical or numeric) may be direct recalibration-factor (numeric,length=1), if 'TRUE' fresh determination of 'recalibFact' or 'FALSE' (no action); final recalibration-factor used exported in result as $recalibFact
#' @param chargeCatchFilter (logical) optionally remove all peptides not containing charge-catch AAs (K, R, H, defined via \code{.chargeCatchingAA()} )
#' @param massTy (character) 'mono' or 'average'
#' @param prefFragPat (numeric) pattern for preferential fragmentation (see also Haverland 2017), if \code{NULL} default will be taken (in function \code{evalIsoFragm}) from \code{.prefFragPattern()}
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @param debug (logical) additional messages and objects exportet to current session for debugging
#' @return list, ie result of massMatch() on 'pepTab' and 'expMass'
#' @seealso \code{\link{makeFragments}}
#' @examples
#' protP <- c(protP="PEPTIDEKR")
#' obsMassX <- cbind(a=c(199.1077,296.1605,397.2082,510.2922,625.3192),
#' b=c(227.1026,324.1554,425.2031,538.2871,653.3141),
#' x=c(729.2937,600.2511,503.1984,402.1507,289.0666),
#' y=c(703.3145,574.2719,477.2191,376.1714,263.0874))
#' rownames(obsMassX) <- c("E","P","T","I","D") # all 1 & 7 ions not included
#' identP1 <- identifFixedModif(prot=protP, expMass=as.numeric(obsMassX), minFragSize=2,
#' maxFragSize=7,modTy=list(basMod=c("b","y"))) # looks ok
#' identP2 <- identifFixedModif(prot=protP, expMass=as.numeric(obsMassX), minFragSize=2,
#' maxFragSize=7, modTy=list(basMod=c("a","x"), varMod=c("h","o","r","m")))
#' head(identP1$preMa,n=17) # predicted masses incl fixed modif
#' head(identP2$preMa,n=17) # predicted masses incl fixed modif
#' @export
identifFixedModif <- function(prot,expMass,minFragSize=5, maxFragSize=60,indexStart=1,suplPepTab=NULL,internFra=TRUE,filtChargeCatch=TRUE,
maxMod=c(p=3,h=1,k=1,o=1,m=1,n=1,u=1,r=1,s=1),modTy=NULL,specModif=NULL, knownMods=NULL, identMeas="ppm",limitIdent=5,filtAmbiguous=FALSE,
recalibrate=FALSE,chargeCatchFilter=TRUE,massTy="mono",prefFragPat=NULL,silent=FALSE,debug=FALSE,callFrom=NULL){ #
## identify predicted mass based on 'prot' (AA-sequence) compared to 'expMass' for given range of aa-length
## return list, ie result of massMatch() on 'pepTab' and 'expMass'
## need to consider min intensity of experim values ??
fxNa <- wrMisc::.composeCallName(callFrom, newNa="identifFixedModif")
if(is.null(maxFragSize)) maxFragSize <- 400
if(debug) {message(" .. xxidentifFixedModif0 \n")} #
AAmass <- wrProteo::AAmass(massTy=massTy, inPept=TRUE)
mH20 <- wrProteo::massDeFormula("2HO", massTy=massTy, callFrom=fxNa)
docTi <- rep(NA,7)
names(docTi) <- c("ini_identifFixedModif","makeFragments","countPotModifAAs","addMassModif","finUniqCheck","findCloseMatch","recalib")
docTi[1] <- Sys.time() #
recalibFact <- 0
if(length(recalibrate)==1) {
if(any(recalibrate %in% c("F","T","FALSE","TRUE"))) recalibrate <- as.logical(recalibrate) else {
if(is.numeric(recalibrate)) {recalibFact <- recalibrate; recalibrate <- FALSE }} }
##
## basic mass predictions : peptides (wo modification)
massIni <- cbind(na=names(prot), se=prot, ma=wrProteo::convAASeq2mass(prot, seqName=TRUE, callFrom=fxNa))
## make table of peptides (wo considering optional modifications)
if(debug) {message(" .. xxidentifFixedModif1 \n")} #
pepTab <- makeFragments(protTab=massIni, minFragSize=minFragSize,maxFragSize=maxFragSize, internFra=internFra,
knownMods=knownMods, massTy=massTy, prefFragPat=prefFragPat, silent=silent,debug=debug, callFrom=fxNa)
if(!identical(indexStart,1) & !silent) message(fxNa," ** increase pepTab index from ",min(as.integer(pepTab[,"no"]))," by ", indexStart," to ",min(as.integer(pepTab[,"no"])+indexStart-1))
if(!identical(indexStart,1)) pepTab[,"no"] <- as.integer(pepTab[,"no"]) +indexStart -1
if(nrow(pepTab) <4) message(fxNa," NOTE : only ", nrow(pepTab)," initial fragments predicted from makeFragments !!!")
docTi[2] <- Sys.time() # makeFragments() consumes 95-99% of time !!
## need to document number of peptides removed (in nRemPep) !!
## optional filter to remove all peptides wo charge-catching AAs
nRemPep <- 0
if(filtChargeCatch) {
chAA <- .chargeCatchingAA()[,1]
filtCh <- unique(unlist(sapply(chAA, grep, pepTab[,2]))) # lines to keep
if(length(filtCh) < nrow(pepTab)) {
nRemPep <- nrow(pepTab) -length(filtCh)
if(!silent) message(fxNa," removing ",nRemPep," out of ",nrow(pepTab)," (initial) peptides not containing any charge-catching residues")
pepTab <- pepTab[filtCh,]
} }
## add custom/specific single location mass modifications (eg bound ions)
if(debug) {message(" .. xxidentifFixedModif2 - ready for single location mass modifications (bound ions) \n")}
if(length(specModif) >0) pepTab <- .singleSpecModif(pepTab, specModif, callFrom=fxNa, silent=silent, debug=debug)
if(length(suplPepTab) >0) {if(ncol(pepTab)==ncol(suplPepTab)) pepTab <- rbind(suplPepTab, pepTab) else message(fxNa," Problem with incompatible 'suplPepTab', ignoring !")}
if(debug) {message(" .. xxidentifFixedModif3 ++\n")}
if(chargeCatchFilter) {
chaLi <- unique(unlist(sapply(.chargeCatchingAA()[,1], function(x) grep(x, pepTab[,"seq"])) ))
if(length(chaLi) >0) {
if(length(chaLi) <nrow(pepTab)) pepTab <- pepTab[chaLi,]
} else {
message(fxNa," PROBLEM : NO peptides remaining when filtering for peptides containing one of ",nrow(.chargeCatchingAA())," charge-catching AAs !! (keep only 1st & 2nd)")
pepTab <- pepTab[1:2,]} }
## fixed modifications .. (later consider variable modif only when fixed modif found in experimtal data)
modTb <- list(basMod=checkModTy(modTy)$basMod, varMod=NULL, varMo2=NULL) # copy for treating fixed modif only
cou <- countPotModifAAs(pepTab=pepTab, modTy=modTb, maxMod=maxMod, silent=silent, debug=debug, callFrom=fxNa)
docTi[3] <- Sys.time() #
preMa <- addMassModif(cou=cou$cou, pepTab=pepTab, combTerm=cou$combTerm, modTy=modTb, basVarMod="basMod", silent=silent, debug=debug, callFrom=fxNa) # wo $varMod
chColN <- colnames(preMa[[1]])=="mass"
if(any(chColN)) colnames(preMa[[1]])[which(colnames(preMa[[1]])=="mass")] <- "finMass"
docTi[4] <- Sys.time() #
if(debug) {message(" .. xxidentifFixedModif4\n")}
preMa <- preMa$pepTab
docTi[5] <- Sys.time() #
## compare 'preMa' & 'expMass' : findCloseMatch(),.compareByPPM(),.compareByDiff()
## now filter experim values to go within range of predicted (no sense in testing even further ..)
expMass <- as.numeric(expMass) + recalibFact
names(expMass) <- 1:length(expMass) #
preMaRa <- range(as.numeric(preMa[,"finMass"]), na.rm=TRUE) +c(-1,1)
chExpM <- expMass > preMaRa[1] & expMass < preMaRa[2]
if(all(!chExpM)) { if(!silent) message(fxNa," not hits found !!")
return(list(massMatch=list(), preMa=preMa, pepTab=pepTab, recalibFact=recalibFact, recalibData=NULL, docTi=docTi))
} else {
if(!silent) message(fxNa,sum(chExpM)," out of ",length(chExpM)," experim masses in range of ",nrow(preMa)," predicted (max ",signif(preMaRa[2]-0.5,3),")")
if(any(!chExpM)) { if(any(!chExpM)) expMass <- expMass[which(chExpM)]}
## 1st run of identification (and later address/identify fixed modifications) :
## last 'complete' table of fragments before 1st run of matching : xxFrag5$preMa
## massMatch1 is simple list with index-names of matches close enough & mass values
predMa <- as.numeric(preMa[,"finMass"])
names(predMa) <- preMa[,"no"]
## also limit expMass to stay within range of predicted (and adjust names)
massMatch1 <- wrMisc::findCloseMatch(x=predMa, y=expMass, compTy=identMeas, limit=limitIdent, sortMatch=FALSE, callFrom=fxNa, silent=silent) # 'sortMatch' as F for not inversung
## 'x' corrsp to preMa line, name of match to expMass
if(!silent) message(fxNa," 1st pass: compare ",nrow(preMa)," predicted (incl ",sum(!is.na(preMa[,"ambig"]))," ambiguous : ",
wrMisc::pasteC(unique(wrMisc::naOmit(preMa[,"ambig"]))),"\n with ",length(expMass)," input (measured) masses, found " ,length(massMatch1),
" groups of matches to experimental masses (total=",if(length(massMatch1)>0) sum(sapply(massMatch1,length)) else 0,"))")
if(length(massMatch1) <1) message("\n ** NO matches found !! **\n to masses like ",paste0(utils::head(preMa[,"finMass"]),collapse=" "),"...\n")
if(debug) {message(" .. xxFrag6\n")}
docTi[6] <- Sys.time() #
## problem : so far need also to export full pepTab for 2nd round search -> gains in RAM diminish ... ==> need to add test for var modif
##
## RECALIBRATE
dif <- NULL
if(length(massMatch1) <21) {
if(recalibrate) {recalibrate <- FALSE
if(!silent) message(fxNa," ",length(massMatch1),"matches are insufficient for determining calibration factor") }
recalibFact <- 0; dif <- NULL}
if(recalibrate) {
chLe1 <- sapply(massMatch1,length)==1
if(sum(chLe1) > length(chLe1)/1.5 & sum(chLe1) >20) { # > 66.7% single hit and >20 pep
msg <- c(fxNa," RECALIBRATION : based on diff of ",sum(chLe1)," single hits : ")
if("diff" %in% identMeas) {
dif <- unlist(massMatch1[which(chLe1)]) # data finally used for recalibration
} else { # prec for extracting diff in case of ppm
expMaN <- massMatch1[which(chLe1)]
names(expMaN) <- NULL
expMaN <- names(unlist(expMaN)) # names of expMass to use
dif <- as.numeric(preMa[match(names(massMatch1[which(chLe1)]), preMa[,"no"]), "finMass"]) -expMass[expMaN]
}
} else {
chLe2 <- sapply(massMatch1,length) <4 # now also consider up to 3 matches
if(sum(chLe2) < length(chLe2)/10) {recalibFact <- 0; dif <- 0;
if(!silent) message("\n++++",fxNa," TROUBLE finding right data for recalibration ? (too few data below 4 matches)\n")
} else {
if(sum(chLe2) <15) { chLe2 <- sapply(massMatch1, length) <11
if(!silent) message(fxNa," opening recalibration to all sets of with up to 10 hits")
}
msg <- c(fxNa," RECALIBRATION : based on median diff of ",sum(chLe2)," muti-hits (below 4 hits): ")
if("diff" %in% identMeas) {
dif <- sapply(massMatch1[which(chLe2)], function(x) {xz <- abs(x); x[which(xz==min(xz))]})
} else {
match3 <- match3N <- massMatch1[which(chLe2)]
names(match3N) <- NULL
match3N <- names(unlist(match3N))
dif <- preMa[match(rep(names(match3), sapply(match3,length)), preMa[,"no"]), "finMass"] -expMass[match3N]}
} }
recalibFact <- wrMisc::stableMode(dif, histLike=TRUE, silent=silent, callFrom=fxNa) #median(c(unlist(massMatch1[which(chLe1)]),zz))
if(!silent) message(msg, signif(recalibFact,3)) }
docTi[7] <- Sys.time()
list(massMatch=massMatch1, preMa=preMa, pepTab=pepTab, recalibFact=recalibFact, recalibData=dif, docTi=docTi) }}
#' @export
.singleSpecModif <- function(pepTab,specModif,nMaxMod=1,massTy="mono",callFrom=NULL,silent=FALSE,debug=FALSE) { #massIni
## 'pepTab' matrix of fragments (cols 'no','seq','orig','ty','seqNa','beg','end','precAA','tailAA','ambig','mass')
## note : at this level pepTab is typically for neutral peptides, thus cumparsion to ions will seem 1 H+ too low (or 1 e- too high)
## 'specModif' .. list with elements 'modOrigin' (sequence), 'modPos' (position within sequence), 'modMass' (digits, ie mass to add),
## 'modName' (name of modif), 'modFixed' (fixed or , logical)
## 'nMaxMod' .. (numeric) max number a given modification may occur
fxNa <- wrMisc::.composeCallName(callFrom,newNa=".singleSpecModif")
if(debug) silent <- FALSE
.extrSpcFeat <- function(x,spc,no=1) if(spc %in% names(x)) x[[which(names(x)==spc)]] else x[[no]]
if(debug) {message(" .. xxsingleSpecModif00 \n")}
if(length(specModif) >0) { if(!is.list(specModif)) specModif <- as.list(specModif)
if(length(specModif) <5) specModif <- NULL else {
nEl <- sapply(specModif,length)
if(any(nEl > 1) & any(nEl==1)) {for(i in which(nEl==1)) specModif[[i]] <- rep(specModif[[i]], max(nEl,na.rm=TRUE))
if(!silent) message(fxNa," .. augmenting ",wrMisc::pasteC(names(specModif)[i])," to length ",max(nEl,na.rm=TRUE)) }
if(debug) {message(" .. xxsingleSpecModif0 \n")}
modOrigin <- .extrSpcFeat(specModif, spc="modOrigin", no=1)
modPos <- .extrSpcFeat(specModif, spc="modPos", no=2)
modMass <- as.numeric(.extrSpcFeat(specModif, spc="modMass", no=3))
modName <- .extrSpcFeat(specModif, spc="modName", no=4)
modFixed <- as.logical(.extrSpcFeat(specModif, spc="modFixed", no=5))}
msg <- " .. inconsistent length of args to 'specModif', ignoring 'specModif'"
if(length(unique(sapply(specModif[1:5], length))) >1) {if(!silent) message(fxNa,msg); specModif <- NULL}
nEl2 <- c(modOrigin=length(modOrigin), modPos=length(modPos), modMass=length(modMass), modName=length(modName), modFixed=length(modFixed))
if(length(unique(nEl2)) >1) {
unexpLe <- table(nEl2)
unexpLe <- which(nEl2==as.numeric(names(unexpLe)[which(unexpLe==min(unexpLe,na.rm=TRUE))]))
message(fxNa," Problem : 'specModif' part ",wrMisc::pasteC(names(unexpLe))," of length ",unexpLe," won't fit to rest !!") }
if(debug) {message(" .. xxsingleSpecModif1 \n")}
if(!is.numeric(modMass)) {if(!silent) message(fxNa," .. invalid 'modMass'"); specModif <- NULL}
if(!is.logical(modFixed)) {if(!silent) message(fxNa," .. invalid 'modFixed'"); specModif <- NULL}
}
oblNames <- c("orig","beg","end","mass","no","seqNa","ty","modSpec") # check 'pepTab' for these obligatory names
chNames <- match(oblNames,colnames(pepTab))
if(any(is.na(chNames))) stop(" Can't find obligatory colnames ",wrMisc::pasteC(oblNames[is.na(chNames)],quoteC="'")," in input 'pepTab'")
## main
if(length(modMass) >0) { # which protein/peptide [modOrigin], AAposition [modPos], deltaMass [modMass], name [modName], fixedModif [modFixed], (short name)
if(debug) {message(" .. xxsingleSpecModif1a \n")}
## 1st step: try to match names of proteins 'modOrigin' to those from pepTab (ie to which proteins modifications apply)
modOrigInd <- lapply(modOrigin,function(x) which(pepTab[,"origNa"]==x)) #now index to pepTab #which(pepTab[,"origNa"]==modOrigin)
naChe <- sapply(modOrigInd,function(x) (all(is.na(x)) | length(x) <1)) # find NA or empty list (no matches found)
if(all(naChe)) { ## can't find ANY of names given in modOrigin .. more elaborate search, possibly only half of name given
pepTaNa <- unique(pepTab[,"origNa"]) # want to find match these with modOrigin
pepTaNa <- matrix(unlist(sapply(pepTaNa, strsplit, "\\.")), ncol=2, byrow=TRUE) # will cause problem if any of pepTab[,"origNa"] do NOT contain '\\.' !!
chNa2 <- apply(pepTaNa, 1, function(x) sapply(modOrigin, function(y) y %in% x)) # each col represents one of unique pepTab[,"origNa"]
if(length(dim(chNa2)) <2) chNa2 <- matrix(chNa2, nrow=nrow(pepTaNa), dimnames=list(modOrigin,NULL))
colnames(chNa2) <- paste0(pepTaNa[,1],pepTaNa[,2],sep=".")
## want to find modOrigin in pepTab[,"origNa"]
## oppose unique(rownames(chNa2)) {from specModif} to colnames(chNa2) [from pepTab)]
for(i in 1:length(unique(modOrigin))) {
## check for full match
j <- unique(modOrigin)[i]
if(any(chNa2[j,])) {modOrigin[which(modOrigin==j)] <- rep(colnames(chNa2)[which(chNa2[j,])], sum(modOrigin==j))
} else {
trimI <- wrMisc::.trimFromStart(unique(modOrigin))[i]
gr2 <- grep(trimI,colnames(chNa2))
if(!silent) message(fxNa," matching '",j,"' by grep to ",wrMisc::pasteC(colnames(chNa2)[gr2], quoteC="'"))
if(length(gr2) >0) modOrigin[which(rownames(chNa2) %in% j)] <- rep(colnames(chNa2)[gr2], sum(rownames(chNa2) %in% j))
} }
## find "ecAD.E","ecAD.S" in "ecADhE.P00327","ecADhS.P00328"
## now find "ecAD_E","ecAD_S" in "ecAD_E.P00327","ecAD_S.P00328"
modOrigInd <- lapply(modOrigin, grep, pepTab[,"origNa"]) # now index to pepTab
naChe <- sapply(modOrigInd, function(x) (all(is.na(x)) | length(x) <1)) # reset: find NA or empty list (no matches found)
naChe <- if(all(sapply(modOrigInd, length)) >0) sapply(modOrigInd, function(x) (all(is.na(x)) | length(x) <1)) else TRUE # reset: find NA or empty list (no matches found)
}
if(debug) {message(" .. xxsingleSpecModif1b \n")}
##
## APPLY MODIFICATIONS to the correspoding fragments (of proteins concerned) : loop along valid (nonNA) modifications
nIter <- 1 # not really used any more !
if(any(!naChe)) {
tmp <- sapply(modName, rep, nMaxMod) # prepare repeated abbreviation for spec modif for search by grep (specModif$modName)
tmp <- if(length(dim(tmp)) <2) as.matrix(tmp, nrow=nMaxMod) else t(tmp)
maxModNa <- apply(tmp, 1, paste, collapse="\\.") # max concatenated repeat-names of each modif
for(i in which(!naChe)) { # run loop along modifications ..
modPep <- which(as.integer(pepTab[,"beg"]) <= modPos[i] & as.integer(pepTab[,"end"]) >= modPos[i] & pepTab[,"origNa"]==modOrigin[i]) #index rel to pepTab
names(modPep) <- pepTab[modPep,"no"]
if(debug & i==which(!naChe)[1]) {message(" .. xxsingleSpecModif2a \n")}
if(debug & i==which(!naChe)[2]) {message(" .. xxsingleSpecModif2b \n")}
if(length(modPep) >0) { # ie current modif indeed occurs somewhere..
chMaxMod <- grep(paste0("\\.", maxModNa[i],"\\.",sep=""), pepTab[modPep,"seqNa"]) # check if current modif already performed (eg prev loop)
if(length(chMaxMod) > 1) { # modification exists, examine max no
if(nMaxMod ==1) modPep <- modPep[-1*chMaxMod] else {
## need to count no modifs already present
nRep <- (nchar(pepTab[modPep,"seqNa"]) -nchar(gsub(paste0("\\.", maxModNa[i],"\\.",sep=""),"", pepTab[modPep,"seqNa"])))/(2+nchar(maxModNa[i]))
chRep <- nRep > nMaxMod # not yet at max no
modPep <- if(any(chRep)) modPep[which(chRep)] else NULL }}}
if(length(modPep) >0) { # valid cases of current modif
## now duplicate concerned lines of pepTab to new lines at end - if variable modif (ie keep orig at end)
nPepT0 <- nrow(pepTab) # initail no of peptides (if only fixed modif)
if(!modFixed[i]) {
pepTab <- rbind(pepTab, pepTab[modPep,]) # varModif: add new lines for keeping orig peptides
if(!silent) message(fxNa,modName[i],"/",modOrigin[i],"/",modPos[i]," variable modif: augmenting by ",length(modPep)," to ",nrow(pepTab)," peptides")
pepTab[nPepT0+(1:length(modPep)),"no"] <- max(nPepT0, as.integer(pepTab[,"no"]), na.rm=TRUE) +(1:length(modPep)) # increase no
} else if(!silent) message(fxNa,modName[i],"/",modOrigin[i],"/",modPos[i]," fixed modif ",length(modPep)," peptides OK within range for modif")
chNo <- duplicated(pepTab[,"no"])
if(any(chNo) & !silent) message(fxNa," +++ DUPLICATED 'no' in i=",i)
pepTab[modPep,"mass"] <- as.numeric(pepTab[modPep,"mass"]) + modMass[i] # add the current modification mass
## new col in pepTab initallly filled with ""#
pepTab[modPep,"modSpec"] <- paste0(sub("NA","", pepTab[modPep,"modSpec"]), modName[i]) # add modif name
pepTab[modPep,"seqNa"] <- paste0(gsub("[[:digit:]]+-[[:digit:]]+$","", pepTab[modPep,"seqNa"]), # add modif name to 'seqNa' with '.'
modName[i],".",pepTab[modPep,"beg"],"-", pepTab[modPep,"end"],sep="")
nIter <- nIter +1 }
} }
pepTab <- pepTab[order(pepTab[,"orig"], as.integer(pepTab[,"beg"]), as.integer(pepTab[,"no"]), as.integer(pepTab[,"end"])),] # re-order (1st by orig then by 'beg' & 'end')
chNo <- duplicated(pepTab[,"no"])
if(any(chNo)) message(fxNa," Trouble ahead: Problem with non-unique fragment numbers") }
pepTab }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/identifFixedModif.R
|
#' Make terminal and internal fragments from proteins
#'
#' Makes terminal and internal fragments based on protein-sequence and present as matrix including heading and/or tailing amino-acid or theoretical molecular mass of all fragments.
#' As the number of theoretically possible fragments increases with the size of the peptide/protein treated it is recommended to adopt arguments like \code{masFragSize} to
#' realizstic values for the type of mass spectrometer used, since efficient filtering will reduce considerably the amount of memory (RAM) needed and will improve overal performance.
#'
#' @param protTab (character or matrix) named vector of protein-seqences to fragment or matrix (character) with lines for initial proteins/peptides, cols as name/sequence/mass
#' @param minFragSize (integer) minimum number of amino-acids for being considered
#' @param maxFragSize (integer) maximum number of amino-acids for being considered
#' @param internFra (logical) toggle if internal framents will be produced or not
#' @param knownMods (character) optional custom alternative to \code{AAfragSettings(ou="all")$knownMods}
#' @param redRedundSeq (logical) reduce redundant sequences to 1st appearance in all further treatments
#' @param prefFragPat (matrix) for preferential fragmentation rules (see also \code{.prefFragPattern})
#' @param remNonConfPrefFragm (logical) allows to remove (peptide-)fragments non conform with preferential fragmentation rules (using \code{evalIsoFragm})
#' @param ambigLab (character) text-labels for ambiguities (first for duplicated sequences second for iso-mass)
#' @param massTy (character) default 'mono' for mono-isotopic masses (alterative 'average')
#' @param specModif (list) supplemental custom fixed or variable modifications (eg Zn++ at given residue)
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @param debug (logical) for bug-tracking: more/enhanced messages
#' @return matrix with fragment sequence, mass, start- and end-position, heading and tailing AA (or NA if terminal fragment)
#' @seealso \code{\link{makeFragments}}; \code{\link{evalIsoFragm}}, from package \href{https://CRAN.R-project.org/package=wrProteo}{wrProteo} \code{\link[wrProteo]{convAASeq2mass}}, \code{\link[wrProteo]{AAmass}}, \code{\link[wrProteo]{massDeFormula}}
#' @examples
#' protP <- c(protP="PEPTIDE")
#' pepT1 <- makeFragments(protTab=protP, minFragSize=2, maxFragSize=9, internFra=TRUE)
#' tail(pepT1)
#' @export
makeFragments <- function(protTab, minFragSize=6, maxFragSize=300, internFra=TRUE, knownMods=NULL, redRedundSeq=FALSE, prefFragPat=NULL,
remNonConfPrefFragm=TRUE, ambigLab=c(duplSequence="duplSequence",isoMass="isoMass"), massTy="mono",specModif=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
fxNa <- wrMisc::.composeCallName(callFrom, newNa="makeFragments")
docTi <- rep(Sys.time(),7) #
msg <- "expecting matrix with 3 columns (name,sequence,mass) and >=1 line"
if(debug) silent <- FALSE
if(length(dim(protTab)) !=2) {
#if(is.null(names(protTab))) names(protTab) <- protTab
if(is.null(names(protTab))) names(protTab) <- paste0("p", 1:length(protTab), sep="") # for checking
protTab <- matrix(c(names(protTab), protTab, rep(NA, length(protTab))), ncol=3)
}
if(is.null(rownames(protTab))) rownames(protTab) <- protTab[,2]
if(is.null(knownMods)) knownMods <- AAfragSettings(outTy="all")$knownMods #
names(docTi) <- c("ini","fragmentSeq",".exNamesTyDeList","convAASeq2mass","findRepeated")
docTi[2] <- Sys.time()
pep2 <- apply(protTab,1,function(x) fragmentSeq(x[2], minSize=minFragSize, maxSize=maxFragSize, internFragments=internFra,
separTerm=TRUE, keepRedSeqs=TRUE, prefName=x[1], callFrom=fxNa, silent=silent)) # takes ~18% time !
names(pep2) <- protTab[,1]
docTi[3] <- Sys.time()
if(debug) {message(" .. xxmakeFragments00 \n")}
## note: default fragmentSeq may already remove redudant sequences, ie here set keepRed=T to allow adding prefix !
## organize into table of frags for all proteins, indic if full, Nterm ... protOrig & position ... seq
pepTab <- .exNamesTyDeList(pep2, fullSeq=protTab[,2]) # cols c("seq","orig","origNa","ty","seqNa","beg","end","precAA","tailAA","mass")
docTi[3] <- Sys.time()
## determine duplicated sequences, determine mass (but can't separate yet to nonredundant set, otherwise problem with preferential fragmentation sites)
duplS1 <- duplicated(pepTab[,"seq"], fromLast=FALSE)
if(any(duplS1)) {
duplS2 <- duplicated(pepTab[,"seq"], fromLast=TRUE)
pepTab[which(duplS1 | duplS2),"ambig"] <- ambigLab[1] # "duplSequence"
pepMa <- wrProteo::convAASeq2mass(pepTab[which(!duplS1),"seq"], massTy=massTy, callFrom=fxNa) - wrProteo::.atomicMasses()["e",massTy] # also subtract 1 electron mass for making (single charge) ions
pepTab[which(!duplS1),"mass"] <- pepMa
## propagate mass ..
pepTab[which(duplS1),"mass"] <- pepTab[which(duplS2)[match(pepTab[which(duplS1),"seq"], pepTab[which(duplS2),"seq"])],"mass"] # reduce search space of match
} else {
pepTab[,"mass"] <- wrProteo::convAASeq2mass(pepTab[,"seq"], massTy=massTy, callFrom=fxNa) - wrProteo::.atomicMasses()["e",massTy] # also subtract 1 electron mass for making (single charge) ions
}
if(debug) {message(" .. xxmakeFragments1 \n")}
## determine precAA AND tailAA
heaPo <- as.integer(pepTab[,"beg"])
chHe <- heaPo >1
if(any(chHe)) {chHe <- which(chHe); pepTab[chHe,"precAA"] <- substr(pepTab[chHe,"orig"], heaPo[chHe]-1, heaPo[chHe]-1)}
taiPo <- as.numeric(pepTab[,"end"])
chTa <- taiPo < nchar(pepTab[,"orig"])
if(any(chTa)) {chTa <- which(chTa); pepTab[chTa,"tailAA"] <- substr(pepTab[chTa,"orig"], taiPo[chTa]+1, taiPo[chTa]+1)}
if(debug) {message(" .. xxmakeFragments2 \n")}
## find iso-fragments (later: choose preferential cleavage sites xD.xx, xE.xx, xx.Px, need heading&tailing AA)
pepTab <- pepTab[order(as.numeric(pepTab[,"mass"])),]
chMa <- duplicated(pepTab[,"mass"], fromLast=FALSE) # wo 1st instance
if(any(chMa)) { # redundant iso-masses exist, look for preferential cleavage
chM2 <- chMa | duplicated(pepTab[,"mass"], fromLast=TRUE) # all iso masses
chM3 <- is.na(pepTab[which(chM2),"ambig"]) # check for lines to mark as isoMass (sam masse but not yet marked as 'duplSequence')
if(any(chM3)) pepTab[which(chM2)[which(chM3)],"ambig"] <- ambigLab[2] #"isoMass"
chM5 <- which(chM2 & pepTab[,"ty"] =="inter") # consider for preferent cleavage (same mass & internal fragm), still too much due to duplicated seq
if(length(chM5) >0) {
pTa <- pepTab[chM5,c("no","origNa","seq","precAA","tailAA","mass","beg")] #c("no","origNa","seq","precAA","tailAA","beg","end","mass")
pTa <- wrMisc::sortBy2CategorAnd1IntCol(pTa, categCol=c("origNa","mass"),numCol="beg", findNeighb=TRUE, decreasing=FALSE, callFrom=fxNa) # ad col "neiGr"
chNA <- is.na(pTa[,"neiGr"])
if(debug) {message(" .. xxmakeFragments3")}
## remove fragments not expected due to preferential fragmentation sites
if(!all(chNA) & remNonConfPrefFragm) { # lines to inspect exist
if(any(chNA)) pTa <- pTa[which(!chNA),]
badLi <- as.integer(unlist(by(pTa,pTa[,"neiGr"],function(y) {y <- as.matrix(y); if(nrow(y) >1) evalIsoFragm(y, prefFragPat=prefFragPat, callFrom=fxNa)})))
if(length(badLi) >0) { pepTab <- pepTab[-1*match(badLi,pepTab[,"no"]),]
if(!silent) message(fxNa," due to preferential fragmentation sites discard ",length(badLi)," fragments, ",nrow(pepTab)," remain")}}}}
pepTab <- cbind(pepTab,modSpec=rep("",nrow(pepTab))) # needed for documenting specific modifications in .singleSpecModif()
pepTab }
#' @export
.exNamesTyDeList <- function(x,subLiNames=c("full","Nter","Cter","inter"),inclNo=TRUE, fullSeq=NULL,
outCol=c("seq","orig","origNa","ty","seqNa","beg","end","precAA","tailAA","ambig","mass"),silent=FALSE,callFrom=NULL) { #"ambigTy"
## function to extract all information from pep2 (list of lists with peptides) & organipredMae by groups in output as matrix (all full, all Nter,...)
## 'x' .. list of lists with charcter vectors of sequences with names that can be parsed eg 'x.1-7' to extract 'beg'&'end' otherwise ALL output will be NA (+message form extractLast2numericParts())
## 'inclNo' .. add 1st col with number
## 'fullSeq' .. to reinject full sequence which may not be used in names of 'x' and not be in x[[1]][["full"]]
## NOTE : cols 'precAA' & 'tailAA' won't be filled since orig (parent) sequence for fragments not known with input of function
## similar for cols 'ambig' & 'mass'
fxNa <- wrMisc::.composeCallName(callFrom,newNa=".exNamesTyDeList")
chLe <- sapply(x,length) <1
if(any(chLe)) {if(all(chLe)) stop("'x' is empty !") else x <- x[which(!chLe)]}
out <- matrix(NA, nrow=sum(sapply(x,function(y) sum(sapply(y,length)))), ncol=length(outCol), dimnames=list(NULL,outCol))
iniNa <- names(x)
names(x) <- NULL
fullSequ <- unlist(sapply(x,function(y) y$full))
if(length(fullSequ) < length(x)) fullSequ <- if(is.null(fullSeq)) iniNa else fullSeq # try to find read full sequence, otherwise return to names of 'x'
predMa <- 1
subLiNames <- subLiNames[subLiNames %in% unlist(lapply(x,names))]
for(i in 1:length(subLiNames)) {
tm <- lapply(x,function(x) x[[subLiNames[i]]])
chLe <- sapply(tm,length) >0
if(any(chLe)) {
if(any(!chLe)) tm <- tm[which(chLe)]
if(length(tm) >0) {
protNa <- rep(iniNa[which(chLe)], sapply(tm,length))
fullSe <- rep(fullSequ[which(chLe)], sapply(tm,length))
tm <- unlist(tm)
tm <- cbind(seq=tm, orig=fullSe, origNa=protNa, ty=rep(subLiNames[i],length(tm)), seqNa=names(tm), wrMisc::extractLast2numericParts(names(tm)))
colnames(tm)[ncol(tm)+(-1:0)] <- c("beg","end")
addCol <- (!outCol %in% colnames(tm))
if(sum(addCol) >0) tm <- cbind(tm, matrix(NA, nrow=nrow(tm), ncol=sum(addCol), dimnames=list(NULL,outCol[which(addCol)])))
if(!identical(colnames(tm),outCol)) {tm <- tm[,wrMisc::naOmit(match(outCol, colnames(tm)))]
if(!silent) message(fxNa,": reduce cols to match argument 'outCol'")}
out[predMa:(predMa +nrow(tm)-1),1:ncol(tm)] <- tm
predMa <- predMa + nrow(tm) }}}
if(inclNo) cbind(no=as.character(1:nrow(out)),out) else out }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/makeFragments.R
|
#' Plot the number of theoretical random fragments
#'
#' This simple function allows plotting the expected number of theoretical fragments from random fragmentation of peptides/proteins (in mass spectrometry).
#' Here, only the pure fragmentation without any variable fragmentation is considered, all fragment-sizes are included (ie, no gating).
#' For simplicity, possible (variable) modifications like loss of neutrals, etc, are not considered.
#'
#' @param x (integer) length (in amino-acids) of input peptides/proteins to be considered
#' @param tit (character) custom title
#' @param xlab (character) custom x-axis label
#' @param ylab (character) custom y-axis label
#' @param col (character or integer) cutsom colors
#' @param log (character) define which axis should be log (use "xy" for drawing both x- and y-axis as log-scale)
#' @param mark (matrix) first column for text and second column for where it should be stated along the top border of the figure (x-coordinate)
#' @param cexMark (numeric) cex expansion-factor for text from argument \code{mark}
#' @return figure only
#' @seealso \code{\link{AAfragSettings}}
#' @examples
#' marks <- data.frame(name=c("Ubiquitin\n76aa", "Glutamate dehydrogenase 1\n501aa"),
#' length=c(76,501))
#' plotNTheor(x=20:750, log="", mark=marks)
#' @export
## here simple function to plot the number of theoretical fragments, assume just b- & y- fragments
plotNTheor <- function(x,tit="Number of term and intern fragm",xlab="Number of aa",ylab="",col=2:3,log="",mark=NULL,cexMark=0.75) {
## plot number of theoretical fragments
nTerm <- function(x) (x-1)*2 # terminal fragments
nInte <- function(x) sapply(x, function(y) sum((y-2):1)) # internal fragments
graphics::plot(x,nTerm(x) + nInte(x),type="l", las=1, xlab=xlab, ylab="", log=log, main=tit, col=col[2])
graphics::mtext("Number of Fragments", side=2, line=5)
graphics::lines(x, nTerm(x), lty=2, col=col[1])
if(length(mark) >0) {graphics::abline(v=as.numeric(mark[,2]), lty=2, col=grDevices::grey(0.8))
graphics::mtext(mark[,1], at=as.numeric(mark[,2]), side=3, line=-1.6 -(1:nrow(mark)), cex=cexMark)}
}
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/plotNTheor.R
|
#' Scoring of charge catching potential for peptides
#'
#' Make score based on cumulative search for AA with given potential to catch charge (H+, or optionally any charge).
#' Note : at current cumulative scoring large peptides may get priviliged.
#'
#' @param resTab (matrix or data.frame) matrix or data.frame of results for SINGLE protein (here only the column specified with argument 'pepCol' will be used)
#' @param pepCol (character) column name of 'resTab' containing the peptide sequence to be scored
#' @param scale01 (logical) linear rescale output to maximum 1.0
#' @param chargeMode (character) this value may be 'pos' (default) for the positively charged amino-acids K,R and H or,
#' if this argument has any other value, than all charged amino-acids (K,R,H, S,T,N,Q, D,E, W and Y) will be considered.
#' @param silent (logical) suppress messages
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return numeric vector with score for each peptide of resTab (even if \code{scale01=TRUE} minimum may be >0 if all peptides do contain charge-catching AAs)
#' @seealso \code{\link{fragmentSeq}}
#' @examples
#' resTa <- matrix(c(1:4,"PEPTID","PEPTIK","PEPTRK","AGV"), ncol=2,
#' dimnames=list(NULL,c("predInd","seq")))
#' scoreChargeCatch(resTa)
#'
#' @export
scoreChargeCatch <- function(resTab, pepCol="seq", scale01=TRUE, chargeMode="pos", silent=FALSE, callFrom=NULL) {
## Scoring of charge catching potential for peptides
##
fxNa <- wrMisc::.composeCallName(callFrom, newNa="scoreChargeCatch")
chatchCha <- sapply(.chargeCatchingAA(chargeMode=chargeMode)[,1], grep, resTab[,pepCol])
chLi <- sapply(chatchCha,length) <1
pepSco <- rep(0,nrow(resTab))
names(pepSco) <- rownames(resTab)
if(all(chLi)) {
if(!silent) message(fxNa,"no charge catching AAs found in peptides")
} else {
if(any(chLi)) chatchCha <- chatchCha[which(!chLi)]
for(i in 1:length(chatchCha)) {
sco <- as.numeric(.chargeCatchingAA(chargeMode=chargeMode)[,2][which(names(chatchCha)[1] ==.chargeCatchingAA(chargeMode=chargeMode)[,1])])
## is additive function the right way ? (long peptides will be priviliged)
pepSco[chatchCha[[i]]] <- pepSco[chatchCha[[i]]] +sco
}}
if(scale01) {
maxSc <- max(pepSco,na.rm=TRUE)
if(maxSc >0) pepSco <- round(pepSco/maxSc,3) }
pepSco }
#' @export
.chargeCatchingAA <- function(chargeMode="pos"){
## produce matrix with values for capacity of catching (extra) charges
chargeCatching <- if(identical(as.character(chargeMode),"pos")) {
cbind(AA=c("K","R","H"), sco=rep(c(1),c(3)))
} else {
cbind(AA=c("D","E", "S","T","N","Q", "K","R","H", "W","Y"), sco=rep(c(1,1,0.7,0.7),c(2,4,3,2)))}
chargeCatching }
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/R/scoreChargeCatch.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
## ----setup1, echo=TRUE, warnings=FALSE----------------------------------------
library(wrMisc)
library(wrProteo)
library(wrTopDownFrag)
## ----AAfragSettings1, echo=TRUE-----------------------------------------------
## common settings
str(AAfragSettings())
## ----massDeFormula1, echo=TRUE------------------------------------------------
# Standard way to obtain the (monoisotopic) mass of water (H20) or a phosphorylation (PO3)
# Note that the number a molecule appears must be written in front of the molecule (no number means one occurance)
massDeFormula(c("2HO", "P3O"))
# Undereith this runs (for H20):
2*.atomicMasses()["H","mono"] +.atomicMasses()["O","mono"] # H2O
## ----convAASeq2mass1, echo=TRUE-----------------------------------------------
# Let's define two small amino-acid sequences
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT", pepC2="PECEPTRT")
# The sequence converted to mass
convAASeq2mass(protP)
## ----nFragm1, out.width="150%", out.heigth="80%", echo=TRUE-------------------
marks <- data.frame(name=c("Ubiquitin\n76aa","Glutamate dehydrogenase 1\n501aa"),length=c(76,501))
layout(matrix(1:2,ncol=2))
plotNTheor(x=20:750, log="", mark=marks)
plotNTheor(x=20:750, log="xy", mark=marks)
mtext("log/log scale", cex=0.8, line=0.1)
## ----fragmentSeq1, echo=TRUE--------------------------------------------------
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
## Basic output
fragmentSeq(protP[1], minSize=3, internFragments=TRUE, pref="pepK")
## ----makeFragments1, echo=TRUE------------------------------------------------
## Elaborate output
protP2 <- cbind(na=names(protP), se=protP, ma=wrProteo::convAASeq2mass(protP,seqName=TRUE))
pepT1 <- makeFragments(protTab=protP2, minFragSize=3, maxFragSize=9, internFra=TRUE)
## ----makeFragments2, echo=TRUE------------------------------------------------
head(pepT1)
dim(pepT1)
## The repartition between types of fragments :
table(pepT1[,"ty"])
## Types of ambiguities encountered
table(pepT1[,"ambig"])
## ----toyData1, echo=TRUE------------------------------------------------------
# The toy peptide/protein sequnce
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
obsMass1 <- cbind(a=c(424.2554,525.3031,638.3872,753.4141,909.5152,1006.5680,1135.6106),
b=c(452.2504,553.2980,666.3821,781.4090,937.5102,1034.5629,1163.6055),
x=c(524.2463,639.2733,752.3573,853.4050,950.4578,1079.5004,1176.5531),
y=c(498.2671,613.2940,726.3781,827.4258,924.4785,1053.5211,1150.5739),
bdH=c(434.2398,535.2875,648.3715,763.3985,919.4996,1016.5524,1145.5949),
ydH=c(480.2565,1132.5633,595.2835,708.3675,809.4152,906.4680,1035.5106),
bi=c(498.2307,583.3198,583.3198,611.3148,680.3726,712.3624,712.3624),
bidH=c(662.3620,694.3519,694.3519,791.4046,791.4046,791.4046,888.4574),
bidN=c(663.3461,695.3359,695.3359,792.3886,792.3886,792.3886,889.4414),
ai=c(652.3777,684.3675,684.3675,781.4203,781.4203,781.4203,878.4730) )
rownames(obsMass1) <- c("P","T","I","D","R","P","E") # only for N-term (a & b)
## This example contains several iso-mass cases
length(obsMass1)
length(unique(as.numeric(obsMass1)))
## ----toyData2, echo=TRUE------------------------------------------------------
## have same mass
## 480.2201 DRPE-H2O & y4-H2O
## 583.3198 PTIDR & TIDRP ; 565.3093 PTIDR-H2O & TIDRP-H2O
## 809.4152 y7-H2O & EPTIDRP
## 684.3675 TIDRPE-CO & EPTIDR-CO ; 694.3519 EPTIDR-H2O & TIDRPE-H2O
## 712.3624 TIDRPE & EPTIDR
## ----toyData3, echo=TRUE------------------------------------------------------
# Now we'll add some random noise
set.seed(2020)
obsMass2 <- as.numeric(obsMass1)
obsMass2 <- obsMass2 + runif(length(obsMass2), min=-2e-4, max=2e-4)
## ----identifFixedModif1, echo=TRUE--------------------------------------------
identP1 <- identifFixedModif(prot=protP[1], expMass=obsMass2, minFragSize=3,
maxFragSize=7, modTy=list(basMod=c("b","y"))) # should find 10term +10inter
## This function returns a list
str(identP1)
## $masMatch identifies each
## ----sessionInfo, echo=FALSE--------------------------------------------------
sessionInfo()
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/inst/doc/wrTopDownFragVignette1.R
|
---
title: "Getting started with wrTopDownFrag"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrProteoVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains tools for the use in [TopDown Proteomics](https://en.wikipedia.org/wiki/Top-down_proteomics).
Proteomics is referred to the technique of idenifying all proteins in a given sample. Typically this is done by using
[mass spectrometry](https://en.wikipedia.org/wiki/Mass_spectrometry).
This technqiue returns primarily 'm/z' (mass over charge) measures, in most cases this can be resolved to molecular masses.
Since mass spectrometry is rather well suited to identifying small molecules, it has become common practice to first digest proteins into smaller units (ie peptides),
which can be easily identified by mass spetrometry. This technique is referred to as __"bottom-up" proteomics__.
Further high energy fragmentation of proteins or peptides directly within the mass spetrometer also helps very much improving the identification rate (MS-MS or MS2).
To overcome some of the drawbacks associated with the bottom-up approach, more recent developments of mass spectrometers allow the
identification of full length proteins (from samples with few proteins). This approach is called __"top-down proteomics"__ (see also
[Chen et al, 2018](https://doi.org/10.1021/acs.analchem.7b04747), [Skinner et al, 2018](https://doi.org/10.1038/nchembio.2515) or
[Li et al, 2018](https://doi.org/10.1038/nchem.2908)).
In this context high energy random fragmentation of proteins/peptides within the mass spectrometer plays an important role, too.
This approach produces fragments conainting on of the original start/end-sites (terminal fragments) and, depending on the energy settings, furher internal fragments.
Of course, larger parent proteins/peptides will give even more complex patterns of internal fragments.
The pattern of resulting fragments for a given precursor protein/peptide allows better identification and
provides further valuable information about the (3-dimensional) conformation of the initial proteins (see also [Haverland et al, 2017](https://doi.org/10.1007/s13361-017-1635-x)).
This project got started to help analyzing internal fragments from [FT-ICR mass-spectrometry](https://en.wikipedia.org/wiki/Fourier-transform_ion_cyclotron_resonance).
At the time of beginning none or only very limited tools were available for this task (this situation is changing since 2019).
Since there is already software available for transforming initial lists of m/z values into monoisotopic values, the aim was to continue further to the identification of m/z peaks after a deconvolution step to assign the most likely peptide/proptein sequence.
Please refer eg to [Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass) for details on molecular mass and deconvolution.
Initial developments were performed based on data from [FT-ICR mass-spectrometry](https://en.wikipedia.org/wiki/Fourier-transform_ion_cyclotron_resonance), but the overal concept may be applied to any kind of mass-spectrometry data.
When developing this package particular attention was brought to the fact that entire proteins may very well still carry embedded ions in catalytic cores or other rare modifications.
With the tools pesented here, the link between parent- and children fragments was not further taken into account since we did not expect 100 percent pure parent species entering the fragmentation step.
In summary, this package aims to provide tools for the identification of proteins from monoisotopic m/z lists, and in particular,
to consider and identify all possible internal fragments resulting from fragmentation performed during mass-spectromery analysis.
To get started, we need to load the packages [wrMisc](https://CRAN.R-project.org/package=wrMisc) and
[wrProteo](https://CRAN.R-project.org/package=wrProteo), available from [CRAN](https://cran.r-project.org/).
And of course we need to charge this package.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r setup1, echo=TRUE, warnings=FALSE}
library(wrMisc)
library(wrProteo)
library(wrTopDownFrag)
```
Further information about the package versions used for making this vignette can be found in the appendix 'Session-info'.
For manipulating peptide/protein sequences we will use functions for working with one-letter code amino-acid sequences provided by package [wrProteo](https://CRAN.R-project.org/package=wrProteo).
## Nomenclature
This describes how chemical modifications on amino-acids (like oxygenation) are abbreviated and what exact chemical modification it referrs to.
In term of nomenclature we'll stick to these abbreviations :
```{r AAfragSettings1, echo=TRUE}
## common settings
str(AAfragSettings())
```
Here we can see that 'a','b' and 'c'-ions are grouped as 'Nterm' or that the phosporylation modification 'p' is taken into consideration at 'S','T' or 'Y' amino-acid residues.
The section __$modChem__ describes/defines exactely how many molecules will be added or removed with the various modifications available in this package.
Molecular (mono-isotopic) masses of the atomes used here are taken the package [wrProteo](https://CRAN.R-project.org/package=wrProteo), intially they were taken from [Unimod](http://www.unimod.org/masses.html). They can be easily updated, if in the future, (mono-isotopic) molecular masses will be determined with higher precision (ie more digits).
## Obtaining Molecular Mass For Chemical Structures (using wrProteo)
The package [wrProteo](https://CRAN.R-project.org/package=wrProteo) is used to convert summed chemical formulas into molecular mass.
```{r massDeFormula1, echo=TRUE}
# Standard way to obtain the (monoisotopic) mass of water (H20) or a phosphorylation (PO3)
# Note that the number a molecule appears must be written in front of the molecule (no number means one occurance)
massDeFormula(c("2HO", "P3O"))
# Undereith this runs (for H20):
2*.atomicMasses()["H","mono"] +.atomicMasses()["O","mono"] # H2O
```
Atomic masses can be calulated either as 'average mass' or 'monoisotopic mass' ([Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass)), the latter is commonly used in mass-spectrometry and will be used by default in this package.
### Molecular mass of peptides and proteins
At this level we can compute the expected mass of uncharged proteins/peptides as defined by their size.
Of couse, protein isomers (ie same total composition but different sequence) get the same mass.
```{r convAASeq2mass1, echo=TRUE}
# Let's define two small amino-acid sequences
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT", pepC2="PECEPTRT")
# The sequence converted to mass
convAASeq2mass(protP)
```
As mentinned, this package assumes that experimental values have already been deconvoluted, ie that only the mono-charged peak provided and isotopic patterns have been reduced to the main representative isotope.
In line with this assumption, default predictions are mono-isotopic masses.
## Fragmenting a Peptide/Protein -Sequence
With 'Fragmentation' techniques we refer to technqiues like shooting electrons or IR-waves allowing to break larger molecules into smaller molecules (of different composition).
In order to check if fragmentation yields random cleavage or raher directed distribution of cleavage sites,
it is necessary to predict all possible cleavage sites.
The complexity of this simple task increases about exponentially with protein size.
At this level, the complexity increases so much, that only a few (longer) full length proteins can be treated at once within a reasonable amount of time.
With large proteins (more than 600 aa length) this may consume considerable amounts of RAM.
When designing this package care has been taken to run infractructure intensive as efficent as possible, but working with complex samples/proteomes it is still beyond technical limits.
Here a very simplified view on the theoretical number of terminal and internal fragments.
Fixed modifications do not change the number of expected fragments
Note, that this simplification does not include variable modifications (see also the next section).
```{r nFragm1, out.width="150%", out.heigth="80%", echo=TRUE}
marks <- data.frame(name=c("Ubiquitin\n76aa","Glutamate dehydrogenase 1\n501aa"),length=c(76,501))
layout(matrix(1:2,ncol=2))
plotNTheor(x=20:750, log="", mark=marks)
plotNTheor(x=20:750, log="xy", mark=marks)
mtext("log/log scale", cex=0.8, line=0.1)
```
### Fragmenting a protein sequence
Random cleavege for a sample collection of proteins can be obtained using the functions \code{fragmentSeq()} or \code{makeFragments()}
```{r fragmentSeq1, echo=TRUE}
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
## Basic output
fragmentSeq(protP[1], minSize=3, internFragments=TRUE, pref="pepK")
```
```{r makeFragments1, echo=TRUE}
## Elaborate output
protP2 <- cbind(na=names(protP), se=protP, ma=wrProteo::convAASeq2mass(protP,seqName=TRUE))
pepT1 <- makeFragments(protTab=protP2, minFragSize=3, maxFragSize=9, internFra=TRUE)
```
```{r makeFragments2, echo=TRUE}
head(pepT1)
dim(pepT1)
## The repartition between types of fragments :
table(pepT1[,"ty"])
## Types of ambiguities encountered
table(pepT1[,"ambig"])
```
Even such a small example gives already 61 possible peptides (without counting modfications).
Of course, it is quite common to obatin a high degree of ambiguities with short peptides since they are less likely unqique.
## Fixed And Variable Modifications
In real-world biology protein modifcations are common.
Conceptually one can distuinguish two cases : With 'fixed modificatons' it is presumed that all protein moleculs of a given species (ie sequence)
do carry exactely the same modification(s). Alteratively one may suggest that only a portion of the molecules for a given protein-species carry a given modification.
Fixed modifications do not increase the search space, since a given value corresponding to the change of mass gets added or subtracted.
In the case of varaible modifications this increases the search space since the modificed as well as the unmodified mass will be considered when comparig to experimental masses. In particular, when multiple amino-acids on the same protein may get modified alternatively this increases the search space.
Then one may consider multiple modifications, for example 0 to 2 out of 5 Serine residues in the protein sequence may carry a phosporylation ...
In order to reduce the apparent complexity, several conceptual compromises have been taken.
*) The presence of charged amino-acids has been used to dismiss all fragments not containing a charged amino-acid,
default has been set to positive charge (K,H and R).
*) When identifying variable modifications, the non-modified isoform must be identified first to take the variable modification in account.
## Basic Identification Including Fixed Modifications
Now we are ready to compare a list of experimental m/z values to a set of protein sequences ...
### Tolerance
In mass spectrometry it is common to use relative tolerance-limits when it comes to identification as 'ppm'.
This means that peaks not having any predicted peptides/ions in a predefined ppm-range will be omitted.
When the search space gets very crowded, ie when many peptide-fragments are predicted, there is of course a considerable risk
that some predicted fragments/ions are so close that multiple predicted peptides/ions have to be consiered in a a given ppm-range.
### Identification Example
First let's make a little toy example using a hypothetic protein/peptide sequence. The molecular masses below have been predicted using the [Prospector tool](http://prospector.ucsf.edu/prospector/cgi-bin/msform.cgi?form=msproduct) from UCSF. We'll add than a litle bit of random noise as a simultation for experimental data. The table below is not exhaustive for all potentially occurring fragments, as experimental data would not be expected to be complete either.
As modifications to test some cases of loss of water (H20) and loss of ammonia (NH3) were prepared.
```{r toyData1, echo=TRUE}
# The toy peptide/protein sequnce
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
obsMass1 <- cbind(a=c(424.2554,525.3031,638.3872,753.4141,909.5152,1006.5680,1135.6106),
b=c(452.2504,553.2980,666.3821,781.4090,937.5102,1034.5629,1163.6055),
x=c(524.2463,639.2733,752.3573,853.4050,950.4578,1079.5004,1176.5531),
y=c(498.2671,613.2940,726.3781,827.4258,924.4785,1053.5211,1150.5739),
bdH=c(434.2398,535.2875,648.3715,763.3985,919.4996,1016.5524,1145.5949),
ydH=c(480.2565,1132.5633,595.2835,708.3675,809.4152,906.4680,1035.5106),
bi=c(498.2307,583.3198,583.3198,611.3148,680.3726,712.3624,712.3624),
bidH=c(662.3620,694.3519,694.3519,791.4046,791.4046,791.4046,888.4574),
bidN=c(663.3461,695.3359,695.3359,792.3886,792.3886,792.3886,889.4414),
ai=c(652.3777,684.3675,684.3675,781.4203,781.4203,781.4203,878.4730) )
rownames(obsMass1) <- c("P","T","I","D","R","P","E") # only for N-term (a & b)
## This example contains several iso-mass cases
length(obsMass1)
length(unique(as.numeric(obsMass1)))
```
Iso-peptides will show up with the same mass :
```{r toyData2, echo=TRUE}
## have same mass
## 480.2201 DRPE-H2O & y4-H2O
## 583.3198 PTIDR & TIDRP ; 565.3093 PTIDR-H2O & TIDRP-H2O
## 809.4152 y7-H2O & EPTIDRP
## 684.3675 TIDRPE-CO & EPTIDR-CO ; 694.3519 EPTIDR-H2O & TIDRPE-H2O
## 712.3624 TIDRPE & EPTIDR
```
```{r toyData3, echo=TRUE}
# Now we'll add some random noise
set.seed(2020)
obsMass2 <- as.numeric(obsMass1)
obsMass2 <- obsMass2 + runif(length(obsMass2), min=-2e-4, max=2e-4)
```
Now let's use these numbers as experimental values and compare them to all theoretical values :
For example, the peptide-sequences 'PTIDR' and 'TIDRP' may be derived from our first toy-sequence and contain exactely the same atoms and thus will have iso-masses.
Without any further information it is impossible to know which one of them is/was truly present in a given sample.
Thus, the corresponding mass will be called an ambiguous identification.
```{r identifFixedModif1, echo=TRUE}
identP1 <- identifFixedModif(prot=protP[1], expMass=obsMass2, minFragSize=3,
maxFragSize=7, modTy=list(basMod=c("b","y"))) # should find 10term +10inter
## This function returns a list
str(identP1)
## $masMatch identifies each
```
However, due to real-world imprecision during the process of measuring m/z, here we used a 5ppm default tolerance.
This brings us to one of the reasons why fragmentation is so important in proteomics :
Without fragmentation mass spectrometry of entire proteins is in big trouble to resolve most of naturally occuring iso-variants or even related proteins.
Due to fragmentation numerous/may overlapping fragments will occur and will finally allow reconstructing the most parts of original protein.
Thank you for you interest in this package.
This package is still under development, new functions will be added to the next version.
## Acknowledgements
This package would not have been possible without the very dedicated and hard work of my collaborator Huilin Li at Sun Yat-Sen University in China.
The author wants to acknowledge the support by the [IGBMC](http://www.igbmc.fr/) (CNRS UMR 7104, Inserm U 1258, UdS), the [proteomics platform of the IGBMC](http://proteomics.igbmc.fr/fr/), [CNRS](http://www.cnrs.fr/), [IGBMC](http://www.igbmc.fr/), [Université de Strasbourg](https://www.unistra.fr) and [Inserm](https://www.inserm.fr/).
## Appendix: Session-Info
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/inst/doc/wrTopDownFragVignette1.Rmd
|
---
title: "Getting started with wrTopDownFrag"
author: Wolfgang Raffelsberger
date: '`r Sys.Date()`'
output:
knitr:::html_vignette:
toc: true
fig_caption: yes
pdf_document:
highlight: null
number_sections: no
vignette: >
%\VignetteIndexEntry{wrProteoVignette1}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
## Introduction
This package contains tools for the use in [TopDown Proteomics](https://en.wikipedia.org/wiki/Top-down_proteomics).
Proteomics is referred to the technique of idenifying all proteins in a given sample. Typically this is done by using
[mass spectrometry](https://en.wikipedia.org/wiki/Mass_spectrometry).
This technqiue returns primarily 'm/z' (mass over charge) measures, in most cases this can be resolved to molecular masses.
Since mass spectrometry is rather well suited to identifying small molecules, it has become common practice to first digest proteins into smaller units (ie peptides),
which can be easily identified by mass spetrometry. This technique is referred to as __"bottom-up" proteomics__.
Further high energy fragmentation of proteins or peptides directly within the mass spetrometer also helps very much improving the identification rate (MS-MS or MS2).
To overcome some of the drawbacks associated with the bottom-up approach, more recent developments of mass spectrometers allow the
identification of full length proteins (from samples with few proteins). This approach is called __"top-down proteomics"__ (see also
[Chen et al, 2018](https://doi.org/10.1021/acs.analchem.7b04747), [Skinner et al, 2018](https://doi.org/10.1038/nchembio.2515) or
[Li et al, 2018](https://doi.org/10.1038/nchem.2908)).
In this context high energy random fragmentation of proteins/peptides within the mass spectrometer plays an important role, too.
This approach produces fragments conainting on of the original start/end-sites (terminal fragments) and, depending on the energy settings, furher internal fragments.
Of course, larger parent proteins/peptides will give even more complex patterns of internal fragments.
The pattern of resulting fragments for a given precursor protein/peptide allows better identification and
provides further valuable information about the (3-dimensional) conformation of the initial proteins (see also [Haverland et al, 2017](https://doi.org/10.1007/s13361-017-1635-x)).
This project got started to help analyzing internal fragments from [FT-ICR mass-spectrometry](https://en.wikipedia.org/wiki/Fourier-transform_ion_cyclotron_resonance).
At the time of beginning none or only very limited tools were available for this task (this situation is changing since 2019).
Since there is already software available for transforming initial lists of m/z values into monoisotopic values, the aim was to continue further to the identification of m/z peaks after a deconvolution step to assign the most likely peptide/proptein sequence.
Please refer eg to [Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass) for details on molecular mass and deconvolution.
Initial developments were performed based on data from [FT-ICR mass-spectrometry](https://en.wikipedia.org/wiki/Fourier-transform_ion_cyclotron_resonance), but the overal concept may be applied to any kind of mass-spectrometry data.
When developing this package particular attention was brought to the fact that entire proteins may very well still carry embedded ions in catalytic cores or other rare modifications.
With the tools pesented here, the link between parent- and children fragments was not further taken into account since we did not expect 100 percent pure parent species entering the fragmentation step.
In summary, this package aims to provide tools for the identification of proteins from monoisotopic m/z lists, and in particular,
to consider and identify all possible internal fragments resulting from fragmentation performed during mass-spectromery analysis.
To get started, we need to load the packages [wrMisc](https://CRAN.R-project.org/package=wrMisc) and
[wrProteo](https://CRAN.R-project.org/package=wrProteo), available from [CRAN](https://cran.r-project.org/).
And of course we need to charge this package.
```{r, include = FALSE}
knitr::opts_chunk$set(collapse=TRUE, comment = "#>")
```
```{r setup1, echo=TRUE, warnings=FALSE}
library(wrMisc)
library(wrProteo)
library(wrTopDownFrag)
```
Further information about the package versions used for making this vignette can be found in the appendix 'Session-info'.
For manipulating peptide/protein sequences we will use functions for working with one-letter code amino-acid sequences provided by package [wrProteo](https://CRAN.R-project.org/package=wrProteo).
## Nomenclature
This describes how chemical modifications on amino-acids (like oxygenation) are abbreviated and what exact chemical modification it referrs to.
In term of nomenclature we'll stick to these abbreviations :
```{r AAfragSettings1, echo=TRUE}
## common settings
str(AAfragSettings())
```
Here we can see that 'a','b' and 'c'-ions are grouped as 'Nterm' or that the phosporylation modification 'p' is taken into consideration at 'S','T' or 'Y' amino-acid residues.
The section __$modChem__ describes/defines exactely how many molecules will be added or removed with the various modifications available in this package.
Molecular (mono-isotopic) masses of the atomes used here are taken the package [wrProteo](https://CRAN.R-project.org/package=wrProteo), intially they were taken from [Unimod](http://www.unimod.org/masses.html). They can be easily updated, if in the future, (mono-isotopic) molecular masses will be determined with higher precision (ie more digits).
## Obtaining Molecular Mass For Chemical Structures (using wrProteo)
The package [wrProteo](https://CRAN.R-project.org/package=wrProteo) is used to convert summed chemical formulas into molecular mass.
```{r massDeFormula1, echo=TRUE}
# Standard way to obtain the (monoisotopic) mass of water (H20) or a phosphorylation (PO3)
# Note that the number a molecule appears must be written in front of the molecule (no number means one occurance)
massDeFormula(c("2HO", "P3O"))
# Undereith this runs (for H20):
2*.atomicMasses()["H","mono"] +.atomicMasses()["O","mono"] # H2O
```
Atomic masses can be calulated either as 'average mass' or 'monoisotopic mass' ([Wikipedia: monoisotopic mass](https://en.wikipedia.org/wiki/Monoisotopic_mass)), the latter is commonly used in mass-spectrometry and will be used by default in this package.
### Molecular mass of peptides and proteins
At this level we can compute the expected mass of uncharged proteins/peptides as defined by their size.
Of couse, protein isomers (ie same total composition but different sequence) get the same mass.
```{r convAASeq2mass1, echo=TRUE}
# Let's define two small amino-acid sequences
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT", pepC2="PECEPTRT")
# The sequence converted to mass
convAASeq2mass(protP)
```
As mentinned, this package assumes that experimental values have already been deconvoluted, ie that only the mono-charged peak provided and isotopic patterns have been reduced to the main representative isotope.
In line with this assumption, default predictions are mono-isotopic masses.
## Fragmenting a Peptide/Protein -Sequence
With 'Fragmentation' techniques we refer to technqiues like shooting electrons or IR-waves allowing to break larger molecules into smaller molecules (of different composition).
In order to check if fragmentation yields random cleavage or raher directed distribution of cleavage sites,
it is necessary to predict all possible cleavage sites.
The complexity of this simple task increases about exponentially with protein size.
At this level, the complexity increases so much, that only a few (longer) full length proteins can be treated at once within a reasonable amount of time.
With large proteins (more than 600 aa length) this may consume considerable amounts of RAM.
When designing this package care has been taken to run infractructure intensive as efficent as possible, but working with complex samples/proteomes it is still beyond technical limits.
Here a very simplified view on the theoretical number of terminal and internal fragments.
Fixed modifications do not change the number of expected fragments
Note, that this simplification does not include variable modifications (see also the next section).
```{r nFragm1, out.width="150%", out.heigth="80%", echo=TRUE}
marks <- data.frame(name=c("Ubiquitin\n76aa","Glutamate dehydrogenase 1\n501aa"),length=c(76,501))
layout(matrix(1:2,ncol=2))
plotNTheor(x=20:750, log="", mark=marks)
plotNTheor(x=20:750, log="xy", mark=marks)
mtext("log/log scale", cex=0.8, line=0.1)
```
### Fragmenting a protein sequence
Random cleavege for a sample collection of proteins can be obtained using the functions \code{fragmentSeq()} or \code{makeFragments()}
```{r fragmentSeq1, echo=TRUE}
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
## Basic output
fragmentSeq(protP[1], minSize=3, internFragments=TRUE, pref="pepK")
```
```{r makeFragments1, echo=TRUE}
## Elaborate output
protP2 <- cbind(na=names(protP), se=protP, ma=wrProteo::convAASeq2mass(protP,seqName=TRUE))
pepT1 <- makeFragments(protTab=protP2, minFragSize=3, maxFragSize=9, internFra=TRUE)
```
```{r makeFragments2, echo=TRUE}
head(pepT1)
dim(pepT1)
## The repartition between types of fragments :
table(pepT1[,"ty"])
## Types of ambiguities encountered
table(pepT1[,"ambig"])
```
Even such a small example gives already 61 possible peptides (without counting modfications).
Of course, it is quite common to obatin a high degree of ambiguities with short peptides since they are less likely unqique.
## Fixed And Variable Modifications
In real-world biology protein modifcations are common.
Conceptually one can distuinguish two cases : With 'fixed modificatons' it is presumed that all protein moleculs of a given species (ie sequence)
do carry exactely the same modification(s). Alteratively one may suggest that only a portion of the molecules for a given protein-species carry a given modification.
Fixed modifications do not increase the search space, since a given value corresponding to the change of mass gets added or subtracted.
In the case of varaible modifications this increases the search space since the modificed as well as the unmodified mass will be considered when comparig to experimental masses. In particular, when multiple amino-acids on the same protein may get modified alternatively this increases the search space.
Then one may consider multiple modifications, for example 0 to 2 out of 5 Serine residues in the protein sequence may carry a phosporylation ...
In order to reduce the apparent complexity, several conceptual compromises have been taken.
*) The presence of charged amino-acids has been used to dismiss all fragments not containing a charged amino-acid,
default has been set to positive charge (K,H and R).
*) When identifying variable modifications, the non-modified isoform must be identified first to take the variable modification in account.
## Basic Identification Including Fixed Modifications
Now we are ready to compare a list of experimental m/z values to a set of protein sequences ...
### Tolerance
In mass spectrometry it is common to use relative tolerance-limits when it comes to identification as 'ppm'.
This means that peaks not having any predicted peptides/ions in a predefined ppm-range will be omitted.
When the search space gets very crowded, ie when many peptide-fragments are predicted, there is of course a considerable risk
that some predicted fragments/ions are so close that multiple predicted peptides/ions have to be consiered in a a given ppm-range.
### Identification Example
First let's make a little toy example using a hypothetic protein/peptide sequence. The molecular masses below have been predicted using the [Prospector tool](http://prospector.ucsf.edu/prospector/cgi-bin/msform.cgi?form=msproduct) from UCSF. We'll add than a litle bit of random noise as a simultation for experimental data. The table below is not exhaustive for all potentially occurring fragments, as experimental data would not be expected to be complete either.
As modifications to test some cases of loss of water (H20) and loss of ammonia (NH3) were prepared.
```{r toyData1, echo=TRUE}
# The toy peptide/protein sequnce
protP <- c(pepK="KPEPTIDRPEP", pepC="CEPEPTRT")
obsMass1 <- cbind(a=c(424.2554,525.3031,638.3872,753.4141,909.5152,1006.5680,1135.6106),
b=c(452.2504,553.2980,666.3821,781.4090,937.5102,1034.5629,1163.6055),
x=c(524.2463,639.2733,752.3573,853.4050,950.4578,1079.5004,1176.5531),
y=c(498.2671,613.2940,726.3781,827.4258,924.4785,1053.5211,1150.5739),
bdH=c(434.2398,535.2875,648.3715,763.3985,919.4996,1016.5524,1145.5949),
ydH=c(480.2565,1132.5633,595.2835,708.3675,809.4152,906.4680,1035.5106),
bi=c(498.2307,583.3198,583.3198,611.3148,680.3726,712.3624,712.3624),
bidH=c(662.3620,694.3519,694.3519,791.4046,791.4046,791.4046,888.4574),
bidN=c(663.3461,695.3359,695.3359,792.3886,792.3886,792.3886,889.4414),
ai=c(652.3777,684.3675,684.3675,781.4203,781.4203,781.4203,878.4730) )
rownames(obsMass1) <- c("P","T","I","D","R","P","E") # only for N-term (a & b)
## This example contains several iso-mass cases
length(obsMass1)
length(unique(as.numeric(obsMass1)))
```
Iso-peptides will show up with the same mass :
```{r toyData2, echo=TRUE}
## have same mass
## 480.2201 DRPE-H2O & y4-H2O
## 583.3198 PTIDR & TIDRP ; 565.3093 PTIDR-H2O & TIDRP-H2O
## 809.4152 y7-H2O & EPTIDRP
## 684.3675 TIDRPE-CO & EPTIDR-CO ; 694.3519 EPTIDR-H2O & TIDRPE-H2O
## 712.3624 TIDRPE & EPTIDR
```
```{r toyData3, echo=TRUE}
# Now we'll add some random noise
set.seed(2020)
obsMass2 <- as.numeric(obsMass1)
obsMass2 <- obsMass2 + runif(length(obsMass2), min=-2e-4, max=2e-4)
```
Now let's use these numbers as experimental values and compare them to all theoretical values :
For example, the peptide-sequences 'PTIDR' and 'TIDRP' may be derived from our first toy-sequence and contain exactely the same atoms and thus will have iso-masses.
Without any further information it is impossible to know which one of them is/was truly present in a given sample.
Thus, the corresponding mass will be called an ambiguous identification.
```{r identifFixedModif1, echo=TRUE}
identP1 <- identifFixedModif(prot=protP[1], expMass=obsMass2, minFragSize=3,
maxFragSize=7, modTy=list(basMod=c("b","y"))) # should find 10term +10inter
## This function returns a list
str(identP1)
## $masMatch identifies each
```
However, due to real-world imprecision during the process of measuring m/z, here we used a 5ppm default tolerance.
This brings us to one of the reasons why fragmentation is so important in proteomics :
Without fragmentation mass spectrometry of entire proteins is in big trouble to resolve most of naturally occuring iso-variants or even related proteins.
Due to fragmentation numerous/may overlapping fragments will occur and will finally allow reconstructing the most parts of original protein.
Thank you for you interest in this package.
This package is still under development, new functions will be added to the next version.
## Acknowledgements
This package would not have been possible without the very dedicated and hard work of my collaborator Huilin Li at Sun Yat-Sen University in China.
The author wants to acknowledge the support by the [IGBMC](http://www.igbmc.fr/) (CNRS UMR 7104, Inserm U 1258, UdS), the [proteomics platform of the IGBMC](http://proteomics.igbmc.fr/fr/), [CNRS](http://www.cnrs.fr/), [IGBMC](http://www.igbmc.fr/), [Université de Strasbourg](https://www.unistra.fr) and [Inserm](https://www.inserm.fr/).
## Appendix: Session-Info
```{r sessionInfo, echo=FALSE}
sessionInfo()
```
|
/scratch/gouwar.j/cran-all/cranData/wrTopDownFrag/vignettes/wrTopDownFragVignette1.Rmd
|
#' Identify Constant Features of an Object
#'
#' Identifies constant features of an object. Generic, with method for data.frame.
#'
#' @export
#' @keywords internal
#' @family constant
#' @param x object
#' @param ... passed arguments
constant <- function(x,...)UseMethod('constant')
#' Identify Constant Features of a Data Frame
#'
#' Returns columns of a data.frame whose values do not vary within subsets
#' defined by columns named in \dots. Defaults to groups(x) if none supplied,
#' or all columns otherwise.
#'
#'
#' @export
#' @family constant
#' @param x object
#' @param ... optional grouping columns (named arguments are ignored)
#' @return data.frame (should be same class as x)
#' @import dplyr
#' @importFrom tidyr gather
#' @importFrom rlang f_rhs syms as_string
#' @importFrom magrittr %<>%
#' @examples
#' library(dplyr)
#' constant(Theoph) # data frame with 0 columns and 1 row
#' constant(Theoph, Subject) # Subject Wt Dose Study
#' Theoph$Study <- 1
#' constant(Theoph) # Study
#' constant(Theoph, Study) # Study
#' constant(Theoph, Study, Subject) # Subject Wt Dose Study
#' Theoph <- group_by(Theoph, Subject)
#' constant(Theoph) # Subject Wt Dose Study
#' constant(Theoph, Study) # Study
#' foo <- data.frame(x = 1)
#' foo <- group_by(foo, x)
#' class(foo) <- c('foo', class(foo))
#' stopifnot(identical(class(foo), class(constant(foo))))
constant.data.frame <- function(x,...){
theClass <- class(x)
# determine the legitimate un-named arguments
args <- quos(...)
args <- lapply(args,f_rhs)
vars <- args[names(args) == '']
vars <- sapply(vars, as.character)
if(!length(vars)) vars <- character(0) # else was named list
stopifnot(all(vars %in% names(x)))
tars <- setdiff(names(x),vars) # target vars to summarize
# reconcile group_vars() with supplied groups (vars)
# we consciously avoid group_by(),
# which can change the class of the object
# instead, we invoke the explicit grouping
# mechanism mutate(.by)
# however, mutate(grouped_df, .by = ) is illegal.
# thus, any reconciliation between vars and group_vars()
# needs to defer to group_vars()
# conflict only exists when x is grouped_df
# (or less restrictively, when group_vars has length?)
# AND vars has length (implying attempted over-ride)
# least restrictive reconciliation is to re-group with vars
grouped <- inherits(x, 'grouped_df')
if(grouped && length(vars)) x %<>% group_by(across(all_of(vars)))
# regardless above, tars cannot include group_vars
tars %<>% setdiff(group_vars(x))
# capture names of columns where within-cell values
# are singular for all cells as defined by groups
y <- data.frame() # placeholder
if( grouped) y <- mutate(x, across(all_of(tars), ~length(unique(.x)))) # already grouped
if(!grouped) y <- mutate(x, across(all_of(tars), ~length(unique(.x))), .by = all_of(vars))
# test for singularities across groups, if any
y %<>% distinct %>% select(group_cols() | all_of(vars) | where(~ all(.x == 1)))
nms <- names(y)
# recover the order of these as in x
nms <- intersect(names(x), nms)
# limit x to just these columns
x %<>% select(all_of(nms))
# find distinct combinations of values
# but distinct() drops "decorated" from "decorated", "grouped_df"
# unique() does not
x %<>% distinct # per help file, columns not modified since ... is empty.
class(x) <- theClass
return(x)
}
# #' Identify Constant Features of a Grouped Data Frame
# #'
# #' Returns columns of a grouped_df whose values do not vary within subsets defined by groups.
# #' If any grouping arguments (dots) are supplied, existing groups are over-ridden.
# #'
# #' @export
# #' @family constant
# #' @param x object
# #' @param ... grouping columns
# #' @return grouped data.frame
# #' @import dplyr
# constant.grouped_df <- function(x,...){
# args <- quos(...)
# args <- lapply(args,f_rhs)
# vars <- args[names(args) == '']
# other <- args[names(args) != '']
# vars <- sapply(vars, as.character)
# groups <- vars
# if(!length(vars)) groups <- unlist(groups(x))
# x <- ungroup(x)
# y <- constant(x, !!!groups)
# y <- group_by(y, !!!groups)
# y
# }
|
/scratch/gouwar.j/cran-all/cranData/wrangle/R/constant.R
|
#' Join Data Safely
#'
#' Joins data safely. Generic, with method for data.frame.
#' @keywords internal
#' @param x object of dispatch
#' @param ... arguments to methods
#' @seealso \code{\link{safe_join.data.frame}}
#' @family safe_join
#' @export
#' @examples
#' example(safe_join.data.frame)
safe_join <- function(x, ...)UseMethod('safe_join')
#' Join Data Frames Safely
#'
#' Joins data frames safely. I.e., a left join that
#' cannot alter row order or number. Supports the
#' case where you only intend to augment existing
#' rows with additional columns and are expecting
#' singular matches. Gives an error if row order
#' or number would have been altered by a left join.
#' @param x data.frame
#' @param y data.frame
#' @param ... passed to dplyr::left_join
#' @family safe_join
#' @export
#' @importFrom dplyr left_join
#' @examples
#' library(magrittr)
#' x <- data.frame(code = c('a','b','c'), value = c(1:3))
#' y <- data.frame(code = c('a','b','c'), roman = c('I','II','III'))
#' x %>% safe_join(y)
#' try(
#' x %>% safe_join(rbind(y,y))
#' )
safe_join.data.frame <- function(x, y, ...){
x$safe_join <- 1:nrow(x)
before <- x$safe_join
z <- left_join(x, y, ...)
after <- z$safe_join
stopifnot(identical(length(before), length(after)))
stopifnot(all(before == after))
z$safe_join <- NULL
z
}
|
/scratch/gouwar.j/cran-all/cranData/wrangle/R/safejoin.R
|
globalVariables(c('static_','original_'))
#' Arrange by groups.
#'
#' As of 0.5, dplyr::arrange ignores groups. This function gives the old behavior as a method for generic base::sort. Borrowed from Ax3man at https://github.com/hadley/dplyr/issues/1206.
#' @param x grouped_df
#' @param decreasing logical (ignored)
#' @param ... further sort criteria
#' @import dplyr magrittr
#' @importFrom tidyr spread
#' @export
#' @family sort
#' @return grouped_df
#' @examples
#' library(dplyr)
#' head(sort(group_by(Theoph, Subject, Time)))
sort.grouped_df <- function(x, decreasing = FALSE, ...) {
x <- group_by(x, ..., .add = TRUE)
x <- dplyr::arrange(x, .by_group = TRUE )
x
}
#' Sort column subsets.
#'
#' Sort column subsets.
#' @param x data.frame
#' @param ... columns to sort
#' @export
#' @family util
#' @keywords internal
#' @return grouped_df
detect <- function(x,...) x %>%
ungroup %>%
transmute(...) %>%
group_by(across(everything())) %>%
sort
#' Show unique combinations of items in specified columns
#'
#' Shows unique combinations of items in specified columns (unquoted).
#' @param x data.frame
#' @param ... columns to show
#' @export
#' @family util
#' @return grouped_df
#' @examples
#' itemize(mtcars, cyl, gear, carb)
itemize <- function(x,...)x %>% detect(...) %>% unique
#' Count unique combinations of items in specified columns.
#'
#' Counts unique combinations of items in specified columns (unquoted).
#' @param x data.frame
#' @param ... columns to show
#' @export
#' @family util
#' @return grouped_df
#' @examples
#' enumerate(mtcars, cyl, gear, carb)
enumerate <- function(x,...)x %>% detect(...) %>% summarise(count=n())
# #' Fetch the key.
# #'
# #' Fetches the key of an object.
# #' @param x object of dispatch
# #' @param ... other arguments
# #' @family key
# #' @keywords internal
# #' @export
# #'
# key <- function(x,...)UseMethod('key')
#' Calculate naGroups.
#'
#' Calculates naGroups.
#' @param x object of dispatch
#' @param ... other arguments
#' @export
#' @family naGroups
#' @keywords internal
naGroups <- function(x,...)UseMethod('naGroups')
#' Calculate dupGroups.
#'
#' Calculates dupGroups.
#' @param x object of dispatch
#' @param ... other arguments
#' @export
#' @family dupGroups
#' @keywords internal
dupGroups <- function(x,...)UseMethod('dupGroups')
#' Report status.
#'
#' Reports the status of an object.
#' @param x object of dispatch
#' @param ... other arguments
#' @export
#' @family status
#' @keywords internal
#' @examples
#' library(dplyr)
#' status(group_by(Theoph, Subject, Time))
status <- function(x,...)UseMethod('status')
#' Show unsorted elements.
#'
#' Shows unsorted elements.
#' @param x object of dispatch
#' @param ... other arguments
#' @seealso \code{\link{unsorted.data.frame}}
#' @export
#' @family unsorted
#' @keywords internal
unsorted <- function(x,...)UseMethod('unsorted')
#' Show misplaced elements.
#'
#' Shows misplaced elements.
#' @param x object of dispatch
#' @param ... other arguments
#' @export
#' @family unsorted
#' @keywords internal
misplaced <- function(x,...)UseMethod('misplaced')
#' Index records whose relative positions would change if sorted.
#'
#' Indexes records whose relative positions would change if sorted, i.e. records that would not have the same nearest neighbors (before and after). unsorted() returns the records corresponding to this index.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family unsorted
#' @seealso \code{\link{na}} \code{\link{dup}}
#' @return logical with length nrow(x)
#' @importFrom dplyr arrange
misplaced.data.frame <- function(x,...){
args <- quos(...)
args <- lapply(args,f_rhs)
vars <- args[names(args) == '']
vars <- sapply(vars, as.character)
if(!length(vars)) vars <- character(0) # else was named list
stopifnot(all(vars %in% names(x)))
if(length(vars)) x %<>% group_by(across(all_of(vars)))
x$original_ <- as.double(seq_len(nrow(x)))
x$leads_ <- lead(x$original_, default = Inf)
x$lags_ <- lag(x$original_, default = -Inf)
x %<>% arrange(.by_group = TRUE) # does nothing if no groups present
x$now_leads_ <- lead(x$original_, default = Inf)
x$now_lags_ <- lag(x$original_, default = -Inf)
x$static_ <- with(x, leads_ == now_leads_ & lags_ == now_lags_)
x %<>% arrange(original_)
return(!x$static_)
}
#' Extract records whose relative positions would change if sorted.
#'
#' Extracts records whose relative positions would change if sorted, i.e. records that would not have the same nearest neighbors (before and after). misplaced() returns the index that extracts these records.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family unsorted
#' @seealso \code{\link{na}} \code{\link{dup}}
#' @return data.frame, possibly grouped_df
#' @importFrom dplyr arrange
unsorted.data.frame <- function(x,...)x[misplaced(x, ...), , drop = FALSE]
#' Index records with NA values of grouping variables.
#'
#' Indexes records with NA values of grouping variables.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family naGroups
#' @return logical
naGroups.data.frame <- function(x, ...){
args <- quos(...)
args <- lapply(args,f_rhs)
vars <- args[names(args) == '']
vars <- sapply(vars, as.character)
if(!length(vars)) vars <- character(0) # else was named list
stopifnot(all(vars %in% names(x)))
if(length(vars)) x %<>% group_by(across(all_of(vars)))
key <- group_vars(x)
if (!all(key %in% names(x)))
stop("nonexistent groups(s)")
if (nrow(x) == 0)
return(logical(0))
if(!length(key)){ # i.e. no groups, therefore no NA groups
return(rep(FALSE, nrow(x)))
}
y <- sapply(key, function(k) is.na(x[[k]]))
if (nrow(x) == 1)
dim(y) <- c(1, length(y))
as.logical(apply(y, 1, sum))
}
#' Index records with with duplicate or duplicated values of grouping variables.
#'
#' Indexes records with with duplicate or duplicated values of grouping variables. If b follows a and and is the same, then b is a duplicate, a is duplicated, and both are shown.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @return grouped_df
#' @export
#' @family dupGroups
#' @return logical
dupGroups.data.frame <- function(x, ...){
args <- quos(...)
args <- lapply(args,f_rhs)
vars <- args[names(args) == '']
vars <- sapply(vars, as.character)
if(!length(vars)) vars <- character(0) # else was named list
stopifnot(all(vars %in% names(x)))
if(length(vars)) x %<>% group_by(across(all_of(vars)))
# if there are no groups, then none are duplicated
if(!length(group_vars(x))) return(rep(FALSE, nrow(x)))
x %<>% select(group_cols())
# https://www.statology.org/dplyr-find-duplicates/
# duplicated(y) | duplicated(y, fromLast = TRUE)
x %<>% group_by(across(everything()))
x %<>% mutate(wrangle_dup_ = n() > 1)
x$wrangle_dup_
}
#' Report status with respect to grouping variables.
#'
#' Reports status with respect to grouping variables.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family status
#' @return returns x invisibly (as originally grouped)
#' @examples
#' library(dplyr)
#' status(Theoph)
#' status(Theoph, Subject)
#' status(group_by(Theoph, Subject, Time))
#' @seealso
#' \code{\link{na}}
#' \code{\link{dup}}
#' \code{\link{unsorted}}
#' \code{\link{informative}}
#' \code{\link{ignore}}
#' \code{\link{itemize}}
#' \code{\link{enumerate}}
#' \code{\link{sort.grouped_df}}
status.data.frame <- function (x, ...)
{
o <- x
# determine the legitimate un-named arguments
args <- quos(...)
args <- lapply(args,f_rhs)
vars <- args[names(args) == '']
vars <- sapply(vars, as.character)
if(!length(vars)) vars <- character(0) # else was named list
stopifnot(all(vars %in% names(x)))
if(length(vars)) x %<>% group_by(across(all_of(vars)))
cat("Source: local data frame ", dplyr::dim_desc(x), "\n", sep = "")
cat("Groups: ", group_vars(x), "\n", sep = " ")
cat("NAs: ", sum(naGroups(x)), "\n", sep = "")
cat("duplicates: ", sum(dupGroups(x)), "\n", sep = "")
cat("unsorted: ", sum(misplaced(x)), "\n", sep = "")
cat("\n")
invisible(o)
}
#' Show na elements.
#'
#' Shows na elements.
#' @param x object of dispatch
#' @param ... other arguments
#' @seealso \code{\link{na.data.frame}} \code{\link{dup}} \code{\link{weak}} \code{\link{unsorted}}
#' @export
#' @keywords internal
#' @family na
na <- function(x, ...)UseMethod('na')
#' Show duplicate or duplicated elements.
#'
#' Shows duplicate or duplicated elements.
#' @param x object of dispatch
#' @param ... other arguments
#' @seealso \code{\link{dup.data.frame}} \code{\link{na}} \code{\link{weak}} \code{\link{unsorted}}
#' @export
#' @keywords internal
#' @family dup
dup <- function(x,...)UseMethod('dup')
#' Show na, duplicate, or duplicated elements.
#'
#' Shows na, duplicate, or duplicated elements.
#' @param x object of dispatch
#' @param ... other arguments
#' @seealso \code{\link{weak.data.frame}}
#' @export
#' @keywords internal
#' @family weak
weak <- function(x,...)UseMethod('weak')
#' Show records with NA values of grouping variables.
#'
#' Shows records with NA values of grouping variables.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family na
#' @return data.frame
na.data.frame <- function(x,...)x[naGroups(x, ...), , drop = FALSE]
#' Show records with duplicate or duplicated values of grouping variables.
#'
#' Shows records with duplicate or duplicated values of grouping variables.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family dup
#' @return data.frame
#' @examples
#' library(dplyr)
#' dupGroups(mtcars)
#' dupGroups(group_by(mtcars, mpg))
#' dup(group_by(mtcars, mpg))
dup.data.frame <- function(x, ...) x[dupGroups(x, ...), , drop = FALSE]
#' Show records with NA, duplicate or duplicated values of grouping variables.
#'
#' Shows records with NA, duplicate or duplicated values of grouping variables.
#' @param x data.frame
#' @param ... optional grouping columns (named arguments are ignored)
#' @export
#' @family weak
#' @return data.frame
weak.data.frame <- function(x,...)x[naGroups(x, ...) | dupGroups(x, ...), , drop = FALSE]
singular <- function(x,...)length(unique(x)) == 1
#' Find unique records for subset of columns with one unique value.
#'
#' Finds unique records for subset of columns with one unique value.
#' @param x data.frame
#' @param ... ignored
#' @export
#' @family util
#' @return data.frame
static <- function(x,...){
s <- x %>% summarise_each(funs(singular))
nms <- names(s)[sapply(s,function(col)all(col == TRUE))]
x <- select_(x, .dots=as.list(nms))
x <- x %>% unique
x
}
#' Drop columns in x that are present in y.
#'
#' Drops columns in x that are present in y.
#' @param x data.frame
#' @param y data.frame
#' @param ... ingored
#' @export
#' @family ignore
#' @return data.frame
ignore <- function(x,y,...){
x[,! names(x) %in% names(y), drop=FALSE]
}
#' Drop columns in x that are entirely NA.
#'
#' Drops columns in x that are entirely NA.
#' @param x object of dispatch
#' @param ... passed
#' @seealso \code{\link{informative.data.frame}}
#' @examples
#' head(Theoph)
#' Theoph$Dose <- NA
#' head(informative(Theoph))
#' @export
#' @family informative
informative <- function(x,...)UseMethod('informative')
#' Drop columns in x that are entirely NA.
#'
#' Drops columns in x that are entirely NA.
#' @param x data.frame
#' @param ... ingored
#' @export
#' @family informative
#' @return data.frame
informative.data.frame <- function(x,...)x[,sapply(x,function(col)any(!is.na(col))),drop=FALSE]
|
/scratch/gouwar.j/cran-all/cranData/wrangle/R/wrangle.R
|
utils::globalVariables(c("rawdata","stratum"))
#' Automatic rounding to a reasonable length, based on largest number
#'
#' \code{roundR} takes a vector or matrix of numbers and returns rounded values
#' with selected precision and various formatting options.
#'
#' @param roundin A vector or matrix of numbers.
#' @param smooth A logical specifying if you want rounding before the dot
#' (e.g. 12345 to 12300).
#' @param level A number specifying number of relevant digits to keep.
#' @param textout A logical if output is converted to text.
#' @param drop0 A logical if trailing zeros should be dropped.
#' @param .german A logical if german numbers should be reported.
#' @param .bigmark A logical if big.mark is to be shown, mark itself
#' depends on parameter .german.
#'
#' @return vector of type character (default) or numeric,
#' depending on parameter textout.
#'
#' @examples
#' roundR(1.23456, level = 3)
#' roundR(1.23456, level = 3, .german = TRUE)
#' roundR(1234.56, level = 2, smooth = TRUE)
#' @export
roundR <- function(roundin, level = 2, smooth = FALSE,
textout = TRUE, drop0 = FALSE, .german = FALSE, .bigmark = FALSE) {
if (.german) {
textout <- TRUE
}
decimalmark <- ifelse(.german, ",", ".")
bigmark <- ifelse(.german, ".", ",")
if (!.bigmark) {
bigmark <- ""
}
if (!is.matrix(roundin)) {
roundin <- matrix(roundin)
}
roundin <- as.numeric(roundin)
roundout <- roundin
roundlevel <- 0
roundlevel <- max(
0,
level - floor(
log10(
max(abs(roundin), na.rm = TRUE)
) + 1
)
)
if(is.infinite(roundlevel)) {
roundlevel <- level
}
roundout[which(!is.na(roundout))] <-
round(roundin[which(!is.na(roundin))], roundlevel)
if (smooth & max(abs(roundout), na.rm = TRUE) != 0) {
roundout[which(!is.na(roundout))] <-
round(
roundin[which(!is.na(roundin))] /
10^ceiling(log10(max(abs(roundin), na.rm = TRUE)) - level)
) *
10^ceiling(log10(max(abs(roundin), na.rm = TRUE)) - level)
}
if (textout) {
roundout[which(!is.na(roundout))] <-
formatC(roundout[which(!is.na(roundout))],
format = "f",
digits = roundlevel, drop0trailing = drop0,
big.mark = bigmark,
decimal.mark = decimalmark
)
}
return(roundout)
}
#' Convert significance levels to symbols
#'
#' \code{markSign} returns the symbol associated with a significance level.
#'
#' @param SignIn A single p-value.
#' @param plabel A translation table, predefined with the usual symbols.
#'
#' @returns factor with label as defined in plabel.
#'
#' @examples
#' markSign(0.012)
#' @export
markSign <- function(SignIn, plabel = c("n.s.", "+", "*", "**", "***")) {
SignIn <- as.numeric(SignIn)
SignOut <- cut(SignIn,
breaks = c(-Inf, .001, .01, .05, .1, 1),
labels = rev(plabel)
)
return(SignOut)
}
#' Re-format p-values, avoiding rounding to 0 and adding surprisal if requested
#'
#' \code{formatP} simplifies p-values by rounding to the maximum of p or a
#' predefined level. Optionally < or = can be added, as well as
#' symbols according to significance level.
#'
#' @param pIn A numeric vector or matrix with p-values.
#' @param ndigits Number of digits (default=3).
#' @param textout Cast output to character (default=TRUE)?
#' @param pretext Should = or < be added before p (default=FALSE)?
#' @param mark Should significance level be added after p (default=FALSE)?
#' @param german_num change dot (default) to comma?
#' @param add.surprisal Add surprisal aka Shannon information to p-value (default=FALSE)?
#' @param sprecision Rounding level for surprisal (default=1).
#'
#' @returns vector or matrix (depending on type of pIn) with type character (default) or numeric,
#' depending on parameter textout
#'
#' @examples
#' formatP(0.012345)
#' formatP(0.012345, add.surprisal = TRUE)
#' formatP(0.012345, ndigits = 4)
#' formatP(0.000122345, ndigits = 3, pretext = TRUE)
#' @export
formatP <- function(pIn, ndigits = 3, textout = TRUE, pretext = FALSE,
mark = FALSE, german_num = FALSE,
add.surprisal = FALSE, sprecision = 1) {
decimal.mark <- ifelse(german_num, ",", ".")
pIn_is_matrix <- is.matrix(pIn)
if(pIn_is_matrix){
pIn <- apply(pIn,c(1,2),as.numeric)
} else{
pIn <- as.numeric(pIn)
}
formatp <- NA_character_
if (length(na.omit(pIn))>0) {
if (!pIn_is_matrix) {
pIn <- matrix(pIn)
}
formatp <- apply(
X = pIn, MARGIN = c(1, 2), max,
10**(-ndigits), na.rm = FALSE
) |>
apply(MARGIN = c(1, 2), round, ndigits) |>
apply(
MARGIN = c(1, 2),
formatC, format = "f",
digits = ndigits, drop0trailing = FALSE,
decimal.mark = decimal.mark
)
if (pretext) {
for (row_i in 1:nrow(pIn)) {
for (col_i in 1:ncol(pIn)) {
formatp[row_i, col_i] <- paste(
ifelse(pIn[row_i, col_i] < 10**(-ndigits),
"<", "="
),
formatp[row_i, col_i]
)
}
}
}
if (mark) {
formatp <- matrix(
paste(
formatp,
apply(gsub("[\\<\\=]", "", formatp), c(1, 2), markSign)
),
ncol = ncol(pIn)
)
}
if(add.surprisal){
s <- apply(pIn,MARGIN = c(1,2),surprisal, precision = sprecision)
if(german_num){
s <- str_replace(s,'\\.',',')
}
formatp <- paste0(formatp,', s = ',s)
}
if (textout == FALSE & pretext == FALSE & add.surprisal == FALSE) {
formatp <- apply(formatp, MARGIN = c(1, 2), as.numeric)
}
if(!pIn_is_matrix){
formatp <- as.vector(formatp)
}
}
return(formatp)
}
#' Find numeric index and names of columns based on patterns
#'
#' @description
#' `r lifecycle::badge('superseded')`
#'
#' Function [ColSeeker] extends this by adding class-checks.
#'
#' \code{FindVars} looks up colnames (by default for data-frame rawdata)
#' based on parts of names, using regular expressions. Be warned that
#' special characters as e.g. `[` `(` need to be escaped or replaced by `.`
#' Exclusion rules may be specified as well.
#' New function [ColSeeker()] extends this by adding class-checks.
#'
#' @param varnames Vector of pattern to look for.
#' @param allnames Vector of values to detect pattern in; by default: colnames(rawdata).
#' @param exact Partial matching or exact only (adding ^ and $)?
#' @param exclude Vector of pattern to exclude from found names.
#' @param casesensitive Logical if case is respected in matching (default FALSE: a<>A)
#' @param fixed Logical, match as is, argument is passed to [grep()].
#' @param return_symbols Should names be reported as symbols additionally? (Default FALSE)
#'
#' @export
#' @return A list with index, names, backticked names, and symbols
#' @examples
#' FindVars(varnames = c("^c", "g"), allnames = colnames(mtcars))
#' FindVars(varnames = c("^c", "g"), allnames = colnames(mtcars), exclude = "r")
## rawdata <- mtcars
## FindVars(varnames = c("^c", "g"))
FindVars <- function(varnames, allnames = colnames(rawdata),
exact = FALSE, exclude = NA, casesensitive = TRUE,
fixed = FALSE, return_symbols=FALSE) {
# if (is.null(allnames)) {
# allnames <- colnames(get("rawdata"))
# }
if (fixed) {
exact <- FALSE
}
allnames_tmp <- allnames
if (!casesensitive) {
varnames <- tolower(varnames)
allnames_tmp <- tolower(allnames)
exclude <- tolower(exclude)
}
vars <- numeric()
evars <- numeric()
if (exact) {
for (i in 1:length(varnames)) {
vars <- c(vars, grep(paste0("^", varnames[i], "$"), allnames_tmp))
}
vars <- unique(vars)
} else {
for (i in 1:length(varnames)) {
vars <- c(vars, grep(varnames[i], allnames_tmp,
fixed = fixed
))
}
vars <- sort(unique(vars))
if (any(!is.na(exclude))) {
for (i in 1:length(exclude))
{
evars <- c(evars, grep(exclude[i], allnames_tmp))
}
evars <- unique(na.omit(match(
sort(unique(evars)), vars
)))
if (length(evars) > 0) {
vars <- vars[-evars]
}
}
vars <- unique(vars)
}
if(return_symbols) {
return_list <- list(
index = vars,
names = allnames[vars],
bticked = bt(allnames[vars]),
symbols = rlang::syms(allnames[vars]),
count = length(vars))
} else {
return_list <- list(
index = vars,
names = allnames[vars],
bticked = bt(allnames[vars]),
count = length(vars))
}
return(return_list)
}
#' Find numeric index and names of columns based on type and patterns
#'
#' \code{ColSeeker} looks up colnames (by default for tibble rawdata)
#' based on type and parts of names, using regular expressions.
#' Be warned that special characters as e.g. `[` `(` need to be escaped or replaced by `.`
#' Exclusion rules may be specified as well.
#'
#' @param data tibble or data.frame, where columns are to be found; by default rawdata
#' @param namepattern Vector of pattern to look for.
#' @param varclass Vector, only columns of defined class(es) are returned
#' @param exclude Vector of pattern to exclude from found names.
#' @param excludeclass Vector, exclude columns of specified class(es)
#' @param casesensitive Logical if case is respected in matching (default FALSE: a<>A)
#' @param returnclass Logical if classes should be included in output
#'
#' @export
#' @return A list with index, names, and backticked names, optionally the classes as well
#' @examples
#' ColSeeker(data = mtcars, namepattern = c("^c", "g"))
#' ColSeeker(data = mtcars, namepattern = c("^c", "g"), exclude = "r")
## rawdata <- mtcars
## ColSeeker(namepattern = c("^c", "g"), varclass="numeric")
ColSeeker <- function(data=rawdata,
namepattern = '.',
varclass = NULL,
exclude = NULL,
excludeclass = NULL,
casesensitive = TRUE,
returnclass = FALSE) {
allclasses <- sapply(sapply(data,class),paste,collapse = '+')
# allclasses <- allclasses[which(allclasses!='ordered')]
allnames_tmp <- allnames <- colnames(data)
if (!casesensitive) {
namepattern <- tolower(namepattern)
allnames_tmp <- tolower(allnames)
if(!is.null(exclude)) {
exclude <- tolower(exclude)
}
}
vars <- numeric()
evars <- numeric()
for (i in 1:length(namepattern)) {
vars <- c(vars, grep(namepattern[i], allnames_tmp,
fixed = FALSE
))
}
vars <- sort(unique(vars))
if (!is.null(exclude)) {
for (i in 1:length(exclude))
{
evars <- c(evars, grep(exclude[i], allnames_tmp))
}
evars <- unique(na.omit(match(
sort(unique(evars)), vars
)))
if (length(evars) > 0) {
vars <- vars[-evars]
}
}
vars <- unique(vars)
if(!is.null(varclass)){
vars_typed <- NULL
for(type_i in seq_along(varclass)){
vars_typed <- c(vars_typed,
which(grepl(pattern = varclass[type_i], allclasses)))
}
vars <- vars[which(vars %in% vars_typed)]
}
if(!is.null(excludeclass)){
vars_typed <- NULL
for(type_i in seq_along(excludeclass)){
vars_typed <- c(vars_typed,
which(grepl(excludeclass[type_i],allclasses)))
}
vars <- vars[-which(vars %in% vars_typed)]
}
if(returnclass){
return_list <- list(
index = vars,
names = allnames[vars],
bticked = bt(allnames[vars]),
count = length(vars),
varclass = allclasses[vars])
} else {
return_list <- list(
index = vars,
names = allnames[vars],
bticked = bt(allnames[vars]),
count = length(vars))
}
return(return_list)
}
#' Enhanced [kable] with definable number of rows and/or columns for splitting
#'
#' @description
#' `r lifecycle::badge('superseded')`
#'
#' package flextable is a more powerful alternative
#'
#' \code{print_kable} formats and prints tibbles/df's in markdown with splitting
#' into sub-tables with repeated caption and header.
#'
#' @param t table to print.
#' @param nrows number of rows (30) before splitting.
#' @param ncols number of columns (100) before splitting.
#' @param caption header.
#' @param ... Further arguments passed to [kable].
#' @return No return value, called for side effects.
#'
#' @examples
#' \dontrun{
#' print_kable(mtcars, caption = "test")
#' }
#' @export
print_kable <- function(t, nrows = 30, caption = "",
ncols = 100, ...) {
lifecycle::deprecate_warn(when = '0.8.0',
what = 'print_kable()',
with = 'flextable::flextable()') # require(knitr)
for (block_i in 1:ceiling(nrow(t) / nrows)) {
for (col_i in 1:ceiling((ncol(t) - 1) / ncols)) {
if (block_i + col_i > 2) {
cat("\\newpage\n\n")
}
print(
knitr::kable(
t[
(1 + (block_i - 1) * nrows):
min(nrow(t), block_i * nrows),
c(1, (2 + (col_i - 1) * ncols):min((1 + col_i * ncols), ncol(t)))
],
row.names = FALSE,
caption = paste0(
ifelse(block_i + col_i > 2, "continued: ", ""),
caption,
" \n \n "
)
)
)
cat(" \n \n")
}
}
}
#' Enhanced kable with latex
#'
#' \code{pdf_kable} formats tibbles/df's for markdown
#'
#' @param .input table to print
#' @param twidth Default 14
#' @param width1 Width of 1st column, default 6.
#' @param tposition Default left
#' @param innercaption subheader
#' @param caption header
#' @param foot footnote
#' @param escape see kable
#'
#'@return A character vector of the table source code.
#' @export
pdf_kable <- function(.input, width1 = 6,
twidth = 14,
tposition = "left",
innercaption = NULL,
caption = "",
foot = NULL,
escape = TRUE) {
ncols <- ncol(.input)
out <- knitr::kable(.input,
format = "latex", booktabs = TRUE,
linesep = "",
escape = escape, caption = caption,
align = c("l", rep("c", ncols - 1))
) |>
kableExtra::kable_styling(
position = tposition,
latex_options = c(
"striped",
"hold_position"
)
) |>
kableExtra::column_spec(-1, # border_left = TRUE,
width = paste0((twidth - width1) / (ncols - 1), "cm"),
) |>
kableExtra::column_spec(1, bold = TRUE, width = paste0(width1, "cm")) |>
kableExtra::row_spec(0, bold = TRUE)
if (!is.null(innercaption)) {
caption1 <- c(caption = ncols)
names(caption1) <- caption
out <- out |>
kableExtra::add_header_above(caption1, bold = TRUE)
}
if (!is.null(foot)) {
out <- out |>
kableExtra::footnote(general = foot)
}
return(out)
}
#' Shortcut for colnames()
#'
#' \code{cn} lists column names, by default for variable rawdata.
#'
#' @param data Data structure to read column names from.
#'
#' @return Character vector with column names.
#'
#' @examples
#' cn(mtcars)
#' @export
cn <- function(data = rawdata) {
colnames(data)
}
#' Add backticks to names or remove them
#'
#' \code{bt} adds leading and trailing backticks to make illegal variable names
#' usable. Optionally removes them.
#'
#' @param x Names to add backtick to.
#' @param remove Option to remove existing backticks, default=FALSE.
#'
#' @return Character vector with backticks added.
#'
#' @examples
#' bt('name 1')
#'
#' @export
bt <- function(x, remove = FALSE) {
if (remove) {
return(gsub("`", "", x))
} else {
return(paste0("`", x, "`"))
}
}
#' Search within data.frame or tibble
#'
#' \code{tab.search} searches for pattern within a data-frame or tibble,
#' returning column(s) and row(s)
#'
#' @param searchdata table to search in, predefined as rawdata
#' @param pattern regex, for exact matches add ^findme$
#' @param find.all return all row indices or only 1st per column,default=TRUE
#' @param names.only return only vector of colnames rather than list with names
#' and rows, default=FALSE
#'
#' @return A list with numeric vectors for each column giving row numbers
#' of matched elements
#' @export
tab.search <- function(searchdata = rawdata, pattern,
find.all = T, names.only = FALSE) {
if (!is.character(pattern)) {
pattern <- as.character(pattern)
}
positions <- purrr::map(searchdata, str_which, pattern = pattern) |> purrr::compact()
if (!find.all) {
positions <- purrr::map(positions, nth, n = 1)
}
if (names.only) {
positions <- names(positions)
}
return(positions)
}
#' Compute surprisal aka Shannon information from p-values
#'
#' \code{surprisal} takes p-values and returns s, a value representing the
#' number of consecutive heads on a fair coin, that would be as surprising
#' as the p-value
#'
#' @param p a vector of p-values
#' @param precision rounding level with default 1
#'
#' @return a character vector of s-values
#' @export
surprisal <- function(p, precision = 1){
round(-log2(as.numeric(p)),precision) |> as.character()
}
#' Transform flextable to rmd if non-interactive
#'
#' \code{flex2rmd} takes a flextable and returns a markdown table if not in an interactive session
#'
#' @param ft a flextable
#'
#' @return either a markdown table or the flextable
#' @export
flex2rmd <- function(ft){
if(interactive()){
return(ft)
} else {
return(flextable_to_rmd(ft))
}
}
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/basefunctions.R
|
#' Michaelis-Menten enzyme kinetics model and plot
#'
#' \code{plot_MM} creates a Michaelis-Menten type Enzyme kinetics plot and returns model as well
#'
#' @param data data structure with columns for model data
#' @param substrate colname for substrate concentration
#' @param velocity colname for reaction velocity
#' @param group colname for optional grouping factor
#' @param xlab label for x-axis
#' @param ylab label for y-axis
#' @param title title of the plot
#'
#' @examples
#' MMdata <- data.frame(subst = c(2.00, 1.00, 0.50, 0.25),
#' velo = c(0.2253, 0.1795, 0.1380, 0.1000))
#'
#' plot_MM(data=MMdata,
#' substrate = 'subst',velocity = 'velo')
#'
#' MMdata <- data.frame(subst = rep(c(2.00, 1.00, 0.50, 0.25),2),
#' velo = c(0.2253, 0.1795, 0.1380, 0.1000,
#' 0.4731333, 0.4089333, 0.3473000, 0.2546667),
#' condition = rep(c('C1','C2'),each=4))
#'
#' plot_MM(data=MMdata,substrate = 'subst',
#' velocity = 'velo',group='condition')
#'
#' @return
#' a list with elements "MMfit" and "MMplot"
#'
#' @export
#'
plot_MM <- function(
data,
substrate, velocity, group = NULL,
title = "Michaelis-Menten",
xlab = "substrate", ylab = "velocity"){
if(is.null(group)) {
group <- 'Group'
data[[group]] <- 'all data'
}
fit_list <- list()
plot_list <- list()
groups <- data[[group]] |> as.factor() |> levels()
for(group_i in seq_along(groups)){
# Fitten des SSmicmen-Modell
groupdata <-
data |>
filter(!!rlang::sym(group)==groups[group_i])
vdata <- groupdata[[velocity]]
sdata <- groupdata[[substrate]]
fit <- stats::nls(vdata ~ SSmicmen(sdata, Vm, K))
linedata <- tibble(
x=seq(0,#min(groupdata[[substrate]]),
max(groupdata[[substrate]])*1.5,
length.out=100))
linedata$y <-
predict(fit,
newdata = list(sdata=linedata$x))
refdata <- broom::tidy(fit) |>
select('term','estimate') |>
pivot_wider(names_from = 'term',values_from = 'estimate') |>
mutate(xv=max(sdata, na.rm = T),
yk= min(vdata, na.rm = T))
# plotten des Fits
MMplot <- ggplot(data=groupdata,
mapping = aes(x = .data[[substrate]],#sdata,
y = .data[[velocity]]))+
geom_point()+
geom_line(data=linedata,aes(x=.data[['x']], y=.data[['y']]),
color='blue')+
geom_hline(data = refdata,
aes(yintercept = .data[['Vm']]), linetype=3)+ # Vmax from coefficents
geom_text(data = refdata,
aes(.data[['xv']],.data[['Vm']],
label = paste(
'Vmax =',
roundR(.data[['Vm']]))),
vjust = 1.4, hjust = 1)+ # Vmax from coefficents
geom_hline(data = refdata,
aes(yintercept =.data[['Vm']]/2),linetype=3)+ # Vmax/2 from coefficents
geom_text(data = refdata,
aes(.data[['xv']],.data[['Vm']]/2),
label = "Vmax/2", vjust = -0.8, hjust = 1)+
geom_vline(data = refdata,
aes(xintercept=.data[['K']]), linetype=2)+ # Km from coefficents
geom_text(data = refdata,
aes(.data[['K']],.data[['yk']],
label = paste(
'K =',
roundR(.data[['K']]))),
vjust = .5, hjust = -0.1)+
scale_x_continuous(n.breaks = 10)+
xlab(xlab)+
ylab(ylab)+
ggtitle(title)
if(length(groups)>1){
MMplot <-
MMplot+
labs(subtitle = groups[group_i])
}
fit_list <- rlist::list.append(fit_list,fit)
rm(fit)
names(fit_list)[length(fit_list)] <- groups[group_i]
plot_list <- rlist::list.append(plot_list,MMplot)
rm(MMplot)
names(plot_list)[length(plot_list)] <- groups[group_i]
}
return(list(MMfit=fit_list, MMplot=plot_list))
}
#' Lineweaver-Burk diagram
#'
#' \code{plot_LB} plots a Lineweaver-Burk diagram and computes the linear model
#' @param data data structure with columns for model data
#' @param substrate colname for substrate concentration
#' @param velocity colname for reaction velocity
#' @param group colname for optional grouping factor
#' @param title title of the plot
#' @param xlab label of the abscissa
#' @param ylab label of the ordinate
#'
#' @examples
#' MMdata <- data.frame(subst = c(2.00, 1.00, 0.50, 0.25),
#' velo = c(0.2253, 0.1795, 0.1380, 0.1000))
#'
#' plot_LB(data=MMdata,
#' substrate = 'subst',velocity = 'velo')
#'
#' MMdata <- data.frame(subst = rep(c(2.00, 1.00, 0.50, 0.25),2),
#' velo = c(0.2253, 0.1795, 0.1380, 0.1000,
#' 0.4731333, 0.4089333, 0.3473000, 0.2546667),
#' condition = rep(c('C1','C2'),each=4))
#'
#' plot_LB(data=MMdata,substrate = 'subst',
#' velocity = 'velo',group='condition')
#'
#' @export
plot_LB <- function(data,
substrate, velocity, group = NULL,
title = "Lineweaver-Burk-Plot", xlab = "1/substrate",
ylab = "1/velocity"){
if(is.null(group)) {
group <- 'Group'
data[[group]] <- 'all data'
}
fit_list <- list()
plot_list <- list()
groups <- data[[group]] |> as.factor() |> levels()
for(group_i in seq_along(groups)){
groupdata <-
data |>
filter(!!sym(group)==groups[group_i])
# vdata <- groupdata[[velocity]]
# sdata <- groupdata[[substrate]]
fitformula=paste0('I(1/',bt(velocity),') ~ I(1/',bt(substrate),')') |>
as.formula()
fit <- stats::lm(fitformula, data=groupdata)
LBplot <- ggplot(data = groupdata,
mapping = aes(x = 1/.data[[substrate]],
y = 1/.data[[velocity]]))+
geom_point()+
geom_smooth(
method = "lm",
fullrange = TRUE
)+
scale_x_continuous(n.breaks = 10)+
ggtitle(title)+
xlab(xlab)+
ylab(ylab)
if(length(groups)>1){
LBplot <-
LBplot+
labs(subtitle = groups[group_i])
}
fit_list <- rlist::list.append(fit_list,fit)
rm(fit)
names(fit_list)[length(fit_list)] <- groups[group_i]
plot_list <- rlist::list.append(plot_list,LBplot)
rm(LBplot)
names(plot_list)[length(plot_list)] <- groups[group_i]
}
return(list(LBfit=fit_list,LBplot=plot_list))
# Velo <-1/vel
# Subs <- 1/sub
# stats::coefficients(
# stats::lm(Velo~Subs)
# )
}
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/biotech.R
|
#' Comparison for groups in clinical trials based on all possible combinations of subjects
#'
#' @description
#' `r lifecycle::badge('experimental')`
#'
#' \code{WINratio} computes the ratio of wins and losses for any number
#' of comparison rules.
#'
#' @param data name of data set (tibble/data.frame) to analyze.
#' @param groupvar name of grouping variable, has to translate to 2 groups.
#' @param testvars names of variables for sequential rules.
#' @param rules list of rules (minimal cut-offs) for sequential comparison, negative if reduction is success, positive if increase is beneficial, must not be 0.
#' @param idvar name of identifier variable. If NULL, rownumber is used.
#' @param p_digits level for rounding p-value.
#'
#' @return
#' A list with elements:
#'
#' WINratio=vector with WINratio and CIs,
#'
#' WINodds=odds ratio of wins and losses, taking ties into account,
#'
#' p.value=p.value from prop.test,
#'
#' WINratioCI=character with merged WINratio, CI, and p
#'
#' testdata= tibble with testdata from cross-join.
#'
#' @export
#'
WINratio <- function(data,groupvar,testvars,rules, idvar=NULL,
p_digits=3){
if(any(rules==0)){
stop("rules must not be 0 but give direction and magnitude of minimal difference")
}
data <- select(data,any_of(c(groupvar,testvars,idvar)))
if((!is.factor(data[[groupvar]]))){
data[[groupvar]] <- factor(data[[groupvar]])
}
if(nlevels(data[[groupvar]])!=2){
stop("groupvar must have exactly two levels")
}
groupvar_level <- levels(data[[groupvar]])
grp1 <- data |>filter(!!sym(groupvar)==groupvar_level[1])
colnames(grp1) <- c("GRP",paste0("X",seq_along(testvars)))
grp2 <- data |>filter(!!sym(groupvar)==groupvar_level[2])
colnames(grp2) <- c("GRP",paste0("Y",seq_along(testvars)))
testdata <- cross_join(grp1,grp2) |>
mutate(WIN=0)
for(rule_i in seq_along(rules)){
testdata <-
testdata |>
rowwise() |>
mutate(
!!sym(paste0("rule",rule_i,"out")) :=
case_when(
#already decided
WIN!=0 ~ NA_integer_,
#sign of delta=sign of rule and win
abs(!!sym(paste0("X",rule_i))-
!!sym(paste0("Y",rule_i)))>=abs(rules[rule_i]) &
(sign(!!sym(paste0("X",rule_i))-
!!sym(paste0("Y",rule_i)))==sign(rules[rule_i])) ~1,
#sign of delta!=sign of rule and loose
abs(!!sym(paste0("X",rule_i))-
!!sym(paste0("Y",rule_i)))>=abs(rules[rule_i]) &
(sign(!!sym(paste0("X",rule_i))-
!!sym(paste0("Y",rule_i)))!=sign(rules[rule_i])) ~-1,
.default=0),
WIN=sum(c_across(starts_with("rule")),na.rm=TRUE)
) |>
ungroup()
}
WINners <-
testdata |>
# group_by() |>
summarize(
across(starts_with("rule"),
list(Wins=~sum(.x==1,na.rm=TRUE),
Losses=~sum(.x==-1,na.rm=TRUE),
Ties=~sum(.x==0,na.rm=TRUE),
NC=~sum(is.na(.x))))) |>
pivot_longer(everything(),
names_to=c("rule","outcome"),
names_sep = "_") |>
pivot_wider(names_from = outcome) |>
add_row(rule="all") |>
mutate(across(-rule,
~case_when(rule=="all" ~sum(.x, na.rm=TRUE),
.default=.x))) |>
mutate(rule=c(paste(testvars,rules, sep=": "),"all"))
p_w <- WINners |>
filter(rule=="all") |>
pull("Wins")# / nrow(testdata)
p_l <- WINners |>
filter(rule=="all") |>
pull("Losses")# / nrow(testdata)
p_t <- WINners |>
filter(rule=="all") |>
pull("Ties")# / nrow(testdata)
# WINratio=pT/pC
p.value <- prop.test(p_w,p_w+p_l)$p.value
WINratio <- DescTools::BinomRatioCI(p_w,nrow(testdata),
p_l,nrow(testdata),
method = 'katz') |>
roundR(3)
WINodds <- (p_w+p_t*.5) / (p_l+p_t*.5)
WINratioCI <- paste0(roundR(WINratio[1],3)," (",
roundR(WINratio[2],3),"/",
roundR(WINratio[3],3),")",
" p ",formatP(p.value, ndigits=p_digits, pretext=TRUE))
return(list(WIN=WINners,
WINratio=WINratio,
WINodds=WINodds,
p.value=p.value,
WINratioCI=WINratioCI,
testdata=testdata))
}
utils::globalVariables(c('outcome',"rule"))
#' Estimation of glomerular filtration rate (eGFR) based on sex, age, and either serum creatinine and/or cystatin C
#'
#' @description
#' `r lifecycle::badge('experimental')`
#'
#' \code{eGFR} computes eGFR according to different rules (see references).
#'
#' @param data name of data set (tibble/data.frame) to analyze.
#' @param age_var name of column with patient age in years, default=age.
#' @param sex_var name of column with sex, assumed as female and male.
#' @param crea_var name of column with creatinine in mg/dl. If not available, leave as NULL.
#' @param cys_var name of column with cystatin C in mg/l. If not available, leave as NULL.
#'
#' @return
#' A list with 3 elements:
#'
#' eGFR_crea
#'
#' eGFR_cystatin
#'
#' eGFR_creatinine_cystatin
#'
#' @references https://www.kidney.org/content/ckd-epi-creatinine-cystatin-equation-2021
#'
#' https://www.kidney.org/content/ckd-epi-creatinine-equation-2021
#'
#' https://www.kidney.org/content/ckd-epi-cystatin-c-equation-2012
#'
#' @export
#'
eGFR <- function(data,
age_var="age",
sex_var="sex",
crea_var=NULL,
cys_var=NULL){
if(is.null(crea_var) & is.null(cys_var)){
stop("At least one of crea or cys must be provided")
}
colnames(data)[which(colnames(data)==age_var)] <- "AGE"
colnames(data)[which(colnames(data)==sex_var)] <- "SEX"
data <-
mutate(data,
"sex_cys"=case_when(SEX=="female"~.932,
SEX=="male"~1),
"sex_crcys"=case_when(SEX=="female"~.963,
SEX=="male"~1),
"sex_cr"=case_when(SEX=="female"~1.012,
SEX=="male"~1),
"alpha_crcys"=case_when(SEX=="male"~-.144,
SEX=="female"~-.219),
"alpha_cr"=case_when(SEX=="male"~-.302,
SEX=="female"~-.241),
"kappa_crcys"=case_when(SEX=="male"~.9,
SEX=="female"~.7),
"kappa_cr"=case_when(SEX=="male"~.9,
SEX=="female"~.7))
if(!is.null(crea_var)){
colnames(data)[which(colnames(data)==crea_var)] <- "CREA"
data <-
data |>
rowwise() |>
mutate(
crea_min_cr=min(CREA/kappa_cr,1)^alpha_cr,
crea_max_cr=max(CREA/kappa_cr,1)^-1.2,
eGFR_cr=142*
crea_min_cr*
crea_max_cr*
.9938**AGE*
sex_cr) |>
ungroup()
eGFR_crea <- data$eGFR_cr
} else {
eGFR_crea <- NULL
}
if(!is.null(cys_var)){
colnames(data)[which(colnames(data)==cys_var)] <- "CYS"
data <-
data |>
rowwise() |>
mutate(
cys_min_cys=(min(CYS/.8, 1))^-.499,
cys_max_cys=(max(CYS/.8,1))^-1.328,
eGFR_cys=133*
cys_min_cys*
cys_max_cys*
.996**AGE*
sex_cys
) |>
ungroup()
eGFR_cystatin <- data$eGFR_cys
} else {
eGFR_cystatin <- NULL
}
if(!is.null(crea_var) & !is.null(cys_var)){
data <-
data |>
rowwise() |>
mutate(
cys_min_crcys=(min(CYS/.8, 1))^-.323,
cys_max_crcys=(max(CYS/.8,1))^-.778,
crea_min_crcys=min(CREA/kappa_crcys,1)^alpha_crcys,
crea_max_crcys=max(CREA/kappa_crcys,1)^-.544,
eGFR_crcys=135*
crea_min_crcys*
crea_max_crcys*
cys_min_cys*
cys_max_cys*
.9961**AGE*
sex_crcys) |>
ungroup()
# eGFR_creatinine_cystatin <- data$eGFR_crcys
} else {
data <-
data |>
mutate(
eGFR_creatinine_cystatin = NULL)
}
return(list(
"eGFR_crea"=eGFR_crea,
"eGFR_cystatin"=eGFR_cystatin,
"eGFR_creatinine_cystatin"=data$eGFR_creatinine_cystatin))
}
utils::globalVariables(c("AGE", "CREA", "CYS", "alpha_cr",
"alpha_crcys", "crea_max_cr",
"crea_max_crcys",
"crea_min_cr", "crea_min_crcys",
"cys_max_cys", "cys_min_cys", "kappa_cr",
"kappa_crcys", "sex_cr", "sex_crcys",
"sex_cys"))
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/clin_estimates.R
|
#' Predefined sets of labels for plots with log-scaled axes
#'
#' \code{logrange_1} returns a vector for log-labels at .1, 1, 100, 1000 ...
#'
#' @return numeric vector
#'
#' @examples
#' ggplot2::ggplot(mtcars) +
#' ggplot2::aes(wt, mpg) +
#' ggplot2::geom_point() +
#' ggplot2::scale_y_log10(breaks = logrange_5)
#' ggplot2::ggplot(mtcars) +
#' ggplot2::aes(wt, mpg) +
#' ggplot2::geom_point() +
#' ggplot2::scale_y_log10(breaks = logrange_123456789)
#' @export
logrange_1 <- c(1) * rep(10^(-20:20), each = 1)
#' @describeIn logrange_1 vector for log-labels at
#' 1.0, 1.5, 2.0, 2.5 ... 10, 15, 20, 25 ...
#'
#' @return numeric vector
#'
#' @export
logrange_5 <- seq(1, 9.5, .5) * rep(10^(-20:20), each = 18)
#' @describeIn logrange_1 vector for log-labels at
#' 1, 2, 3 ... 9, 10, 20, 30 ... 90, 100 ...
#'
#' @export
logrange_123456789 <- c(1:9) * rep(10^(-20:20), each = 9)
#' @describeIn logrange_1 vector for log-labels at
#' 1 ,2, 3, 5, 7, 10, 20 ,30, 50, 70 ...
#' @export
logrange_12357 <- c(1, 2, 3, 5, 7) * rep(10^(-20:20), each = 5)
#' @describeIn logrange_1 vector for log-labels at
#' 1, 5, 10, 50 ...
#' @export
logrange_15 <- c(1, 5) * rep(10^(-20:20), each = 2)
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/constants.R
|
#' Results from a simulated clinical trial with interaction effects.
#'
#' A dataset containing physiological data, biomarkers, and categorical data.
#'
#' @format A tibble with 300 rows and 24 variables:
#' \describe{
#' \item{Sex}{Sex of animal, factor with levels 'female', 'male'}
#' \item{Agegroup}{Factor with levels 'young','middle','old'}
#' \item{Treatment}{Factor with levels 'sham', 'OP'}
#' \item{HR}{Heart rate}
#' \item{sysRR,diaRR}{Systolic and diastolic blood pressure}
#' \item{Med xxx}{Pseudo-medications, factors with levels 'y','n'}
#' \item{Biomarker x [units]}{Biomarkers with log-normal distribution}
#' \item{Responder}{factor yes/no, systolic plood pressure >= 120?}
#' }
#'
"faketrial"
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/data.R
|
#' Compute mean and sd and put together with the ± symbol.
#'
#' @param x Data for computation.
#' @param roundDig Number of relevant digits for roundR.
#' @param drop0 Should trailing zeros be dropped?
#' @param groupvar Optional grouping variable for subgroups.
#' @param range Should min and max be included in output?
#' @param rangesep How should min/max be separated from mean+-sd?
#' @param add_n Should n be included in output?
#' @param add_n Should n be included in output?
#' @param .german logical, should "." and "," be used as bigmark and decimal?
#' @return character vector with mean ± SD, rounded to desired precision
#'
#' @examples
#' # basic usage of meansd
#' meansd(x = mtcars$wt)
#' # with additional options
#' meansd(x = mtcars$wt, groupvar = mtcars$am, add_n = TRUE)
#' @export
meansd <- function(x, roundDig = 2, drop0 = FALSE, groupvar = NULL,
range = FALSE, rangesep = " ", add_n = FALSE, .german = FALSE) {
out <- ""
if (length(na.omit(x)) > 0) {
if (is.null(groupvar)) {
meansd <- cbind(
matrix(c(
mean(x, na.rm = TRUE),
sd(x, na.rm = TRUE),
min(x, na.rm = TRUE),
max(x, na.rm = TRUE)
),
ncol = 4, byrow = FALSE
),
length(na.omit(x))
)
meansd[1:2] <- meansd[1:2] |>
roundR(level = roundDig, drop0 = drop0, .german = .german)
meansd[3:4] <- meansd[3:4] |>
roundR(level = roundDig, drop0 = drop0, .german = .german)
} else {
meansd <- matrix(c(
by(x, groupvar, mean, na.rm = TRUE),
by(x, groupvar, sd, na.rm = TRUE),
by(x, groupvar, min, na.rm = TRUE),
by(x, groupvar, max, na.rm = TRUE)
),
ncol = 4, byrow = FALSE
) |>
na_if(Inf) |>
na_if(-Inf)
meansd[, 1:2] <- meansd[, 1:2] |>
roundR(level = roundDig, drop0 = drop0, .german = .german)
meansd[, 3:4] <- meansd[, 3:4] |>
# as.numeric() |>
roundR(level = roundDig, drop0 = drop0, .german = .german)
meansd <- meansd |>
cbind(by(x, groupvar, function(x) {
length(na.omit(x))
}))
}
out <- paste(meansd[, 1], meansd[, 2], sep = " \u00B1 ")
if (range) {
out <- paste0(
out, rangesep, " [",
apply(matrix(meansd[, 3:4], ncol = 2), 1, paste,
collapse = " -> "
), "]"
) # \u22ef
}
if (add_n) {
out <- paste0(
out, rangesep, " [n=",
meansd[, 5], "]"
) # \u22ef
}
} # }
return(out)
}
#' Compute median and quartiles and put together.
#'
#' @param x Data for computation.
#' @param nround Number of digits for fixed round.
#' @param probs Quantiles to compute.
#' @param qtype Type of quantiles.
#' @param roundDig Number of relevant digits for roundR.
#' @param drop0 Should trailing zeros be dropped?
#' @param groupvar Optional grouping variable for subgroups.
#' @param range Should min and max be included in output?
#' @param rangesep How should min/max be separated from mean+-sd?
#' @param rangearrow What is put between min -> max?
#' @param prettynum logical, apply prettyNum to results?
#' @param .german logical, should "." and "," be used as bigmark and decimal?
#' @param add_n Should n be included in output?
#' @return character vector with median \code{[1stQuartile/3rdQuartile]}, rounded to desired precision
#' @examples
#' # basic usage of median_quart
#' median_quart(x = mtcars$wt)
#' # with additional options
#' median_quart(x = mtcars$wt, groupvar = mtcars$am, add_n = TRUE)
#' data(faketrial)
#' median_quart(x=faketrial$`Biomarker 1 [units]`,groupvar = faketrial$Treatment)
#' @export
median_quart <- function(x, nround = NULL, probs = c(.25, .5, .75),
qtype = 8, roundDig = 2, drop0 = FALSE,
groupvar = NULL, range = FALSE, rangesep = " ",
rangearrow = " -> ",
prettynum = FALSE, .german = FALSE, add_n = FALSE) {
out <- " "
bigmark <- ifelse(.german, ".", ",")
decimal <- ifelse(.german, ",", ".")
if (length(na.omit(x)) >= 1) {
if (is.null(groupvar)) {
quart <- matrix(
c(
stats::quantile(x, probs = c(probs, 0, 1), na.rm = TRUE, type = qtype),
length(na.omit(x))
),
ncol = length(probs) + 3
)
} else {
quart <- matrix(
unlist(
by(x, groupvar, quantile,
probs = c(probs, 0, 1), na.rm = TRUE,
type = qtype
)
),
ncol = length(probs) + 2, byrow = TRUE
)
quart <- cbind(
quart,
unlist(by(
x, groupvar, function(x) {
length(na.omit(x))
}
))
)
}
if (is.null(nround)) {
colcount <- ncol(quart)
quart[, 1:(colcount - 3)] <- roundR(quart[, 1:(colcount - 3)],
level = roundDig, drop0 = drop0, .german = .german
)
quart[, (colcount - 2):(colcount - 1)] <-
roundR(as.numeric(quart[, (colcount - 2):(colcount - 1)]),
level = roundDig, drop0 = drop0, .german = .german
)
if (prettynum) {
# quart <- apply(quart,1:2,function(x){
# formatC(as.numeric(x),
# digits = roundDig-1,
# format = 'f',
# big.mark = bigmark,
# decimal.mark = decimal,
# preserve.width = 'common',drop0trailing = FALSE)})
}
} else {
quart[, -ncol(quart)] <- round(quart[, -ncol(quart)], nround)
if (prettynum) {
quart <- apply(quart, 1:2, function(x) {
formatC(as.numeric(x),
digits = nround,
format = "f",
big.mark = bigmark,
decimal.mark = decimal,
preserve.width = "common", drop0trailing = FALSE
)
})
}
}
out <- str_glue("{quart[,2]} ({quart[,1]}/{quart[,3]})")
if (range) {
out <- str_glue("{out}{rangesep} [\\
{apply(matrix(quart[,(length(probs)+1):(length(probs)+2)],ncol=2),1,glue::glue_collapse,
sep=rangearrow)}]")
}
if (add_n) {
out <- str_glue("{out}{rangesep} [n={quart[,length(probs)+3]}]")
}
}
out <- as.character(out)
return(out)
}
#' Compute mean and standard error of mean and put together with the ± symbol.
#'
#' \code{meanse} computes SEM based on Standard Deviation/square root(n)
#' @param x Data for computation.
#' @param roundDig Number of relevant digits for roundR.
#' @param drop0 Should trailing zeros be dropped?
#' @param mult multiplier for SEM, default 1, can be set to
#' e.g. 2 or 1.96 to create confidence intervals
#'
#' @return character vector with mean ± SEM, rounded to desired precision
#'
#' @examples
#' # basic usage of meanse
#' meanse(x = mtcars$wt)
#' @export
meanse <- function(x, mult = 1, roundDig = 2, drop0 = FALSE) {
m <- mean(x, na.rm = TRUE)
s <- sd(x, na.rm = TRUE) / sqrt(length(na.omit(x)))
ms <- roundR(c(m, s * mult),
level = roundDig, drop0 = drop0
)
out <- paste(ms[1], ms[2], sep = " \u00B1 ")
return(out)
}
#' Compute standard error of median.
#'
#' \code{medianse} is based on \code{\link{mad}}/square root(n)
#'
#' @param x Data for computation.
#'
#' @return numeric vector with SE Median.
#'
#' @examples
#' # basic usage of medianse
#' medianse(x = mtcars$wt)
#' @export
medianse <- function(x) {
mad(x, na.rm = TRUE) / sqrt(length(na.omit(x)))
}
#' Compute standard error of median
#'
#' \code{se_median} is based on \code{\link{mad}}/square root(n)
#' (Deprecated, please see \link{medianse}, which is the same but named more consistently)
#'
#' @param x Data for computation.
#'
#' @return numeric vector with SE Median.
#'
#' @examples
#' # basic usage of se_median
#' \dontrun{
#' se_median(x = mtcars$wt)
#' }
#' @export
se_median <- function(x) {
.Deprecated('medianse')
mad(x, na.rm = TRUE) / sqrt(length(na.omit(x)))
}
#' Compute confidence interval of median by bootstrapping.
#'
#' \code{median_cl_boot} computes lower and upper confidence limits for the
#' estimated median, based on bootstrapping.
#'
#' @param x Data for computation.
#' @param conf confidence interval with default 95%.
#' @param type type for function boot.ci.
#' @param nrepl number of bootstrap replications, defaults to 1000.
#'
#' @return A tibble with one row and three columns: Median, CIlow, CIhigh.
#'
#' @examples
#' # basic usage of median_cl_boot
#' median_cl_boot(x = mtcars$wt)
#' @export
median_cl_boot <- function(x, conf = 0.95, type = "basic", nrepl = 10^3) {
x <- na.omit(x)
lconf <- (1 - conf) / 2
uconf <- 1 - lconf
bmedian <- function(x, ind) median(x[ind], na.rm = TRUE)
bt <- boot::boot(x, bmedian, nrepl)
bb <- boot::boot.ci(bt, type = type)
tibble(
Median = median(x, na.rm = TRUE),
CIlow = quantile(bt$t, lconf),
CIhigh = quantile(bt$t, uconf)
)
}
#' Rename output from \link{median_cl_boot} for use in ggplot.
#'
#' \code{median_cl_boot_gg} computes lower and upper confidence limits for the
#' estimated median, based on bootstrapping, using default settings.
#'
#' @param x Data for computation.
# #' @param conf confidence interval with default 95%.
# #' @param type type for function boot.ci.
# #' @param nrepl number of bootstrap replications, defaults to 1000.
#'
#' @return A tibble with one row and three columns: y, ymin, ymax.
#'
#' @examples
#' # basic usage of median_cl_boot
#' median_cl_boot_gg(x = mtcars$wt)
#' @export
median_cl_boot_gg <- function(x){
out <- median_cl_boot(x = x) |>
rename(y="Median",ymin="CIlow",ymax="CIhigh")
return(out)
}
#' Compute absolute and relative frequencies.
#'
#' \code{cat_desc_stats} computes absolute and relative frequencies for
#' categorical data with a number of formatting options.
#'
#' @param source Data for computation. Previously "quelle".
#' @param separator delimiter between results per level, preset as ' '.
#' @param return_level Should levels be reported?
#' @param ndigit Digits for rounding of relative frequencies.
#' @param groupvar Optional grouping factor.
#' @param singleline Put all group levels in a single line?
#' @param percent Logical, add percent-symbol after relative frequencies?
#' @param prettynum logical, apply prettyNum to results?
#' @param .german logical, should "." and "," be used as bigmark and decimal?
#' Sets prettynum to TRUE.
#' @param quelle deprecated, retained for compatibility, use 'source' instead.
#'
#' @return
#' Structure depends on parameter return_level:
#' if FALSE than a tibble with descriptives, otherwise a list with two tibbles
#' with levels of factor and descriptives.
#' If parameter singleline is FALSE (default), results for each factor level is
#' reported in a separate line, otherwise they are pasted.
#' Number of columns for result tibbles is one or number of levels of the
#' additional grouping variable.
#'
#' @examples
#' cat_desc_stats(mtcars$gear)
#' cat_desc_stats(mtcars$gear, return_level = FALSE)
#' cat_desc_stats(mtcars$gear, groupvar = mtcars$am)
#' cat_desc_stats(mtcars$gear, groupvar = mtcars$am, singleline = TRUE)
#' @export
cat_desc_stats <- function(source=NULL, separator = " ",
return_level = TRUE,
ndigit = 0,
groupvar = NULL,
singleline = FALSE,
percent = TRUE,
prettynum = FALSE,
.german = FALSE,
quelle=NULL) {
if(!is.null(quelle)) {
source <- quelle
}
percent <- ifelse(percent, "%", "")
bigmark <- ifelse(.german, ".", ",")
decimal <- ifelse(.german, ",", ".")
if (!is.factor(source)) {
# if(is.numeric(source)) {
# source<-factor(source,
# levels=sort(unique(source)),
# labels=sort(unique(source)))
# } else {
source <- factor(source)
}
level <- levels(source) |> enframe(name = NULL)
if (singleline) {
level <- paste(levels(source), sep = "", collapse = separator)
}
if (is.null(groupvar)) {
tableout <- matrix(table(source),
nrow = length(levels(source)),
byrow = FALSE
)
colnames(tableout) <- "abs"
pt_temp <- round(
100 * prop.table(tableout),
ndigit
)
if (.german) {
prettynum <- TRUE
}
if (prettynum) {
pt_temp <- formatC(pt_temp,
digits = ndigit,
format = "f",
big.mark = bigmark,
decimal.mark = decimal,
preserve.width = "common", drop0trailing = FALSE
)
tableout <- formatC(tableout,
digits = 0,
format = "f",
big.mark = bigmark,
decimal.mark = decimal,
preserve.width = "common"
)
}
ptableout <- matrix(paste0(
" (",
pt_temp,
percent, ")"
),
nrow = length(levels(source)),
byrow = FALSE
)
colnames(ptableout) <- "rel"
} else {
tableout <- matrix(unlist(by(source, groupvar, table)),
nrow = length(levels(source)),
byrow = FALSE
)
colnames(tableout) <- glue::glue("abs{levels(factor(groupvar))}")
pt_temp <- round(100 * prop.table(tableout, margin = 2), ndigit)
if (prettynum) {
pt_temp <- formatC(pt_temp,
digits = ndigit,
format = "f",
big.mark = bigmark,
decimal.mark = decimal,
preserve.width = "common", drop0trailing = FALSE
)
tableout <- formatC(tableout,
digits = 0,
format = "f",
big.mark = bigmark,
decimal.mark = decimal,
preserve.width = "common"
)
}
ptableout <- matrix(
paste0(
" (", pt_temp,
percent, ")"
),
nrow = length(levels(source)),
byrow = FALSE
)
colnames(ptableout) <- glue::glue("rel{levels(factor(groupvar))}")
}
zvalue <- purrr::map2(tableout, ptableout, glue::glue) |>
as.character() |>
matrix(
nrow = length(levels(source)),
byrow = FALSE
) |>
as_tibble(.name_repair = "minimal")
if (is.null(groupvar)) {
colnames(zvalue) <- "desc"
} else {
colnames(zvalue) <- glue::glue("desc{levels(factor(groupvar))}")
}
if (singleline) {
zvalue <- purrr::map(zvalue,
.f = function(x) {
glue::glue_collapse(x, sep = separator)
}
) |>
as_tibble()
}
levdesstats <- list(level = level, freq = zvalue)
if (return_level == TRUE) {
return(levdesstats)
} else {
return(zvalue)
}
}
#' Compute absolute and relative frequencies for a table.
#'
#' \code{cat_desc_table} computes absolute and relative frequencies for
#' categorical data with a number of formatting options.
#'
#' @param data name of data set (tibble/data.frame) to analyze.
#' @param desc_vars vector of column names for dependent variables.
#' @param round_desc number of significant digits for rounding of descriptive stats.
#' @param singleline Put all group levels in a single line?
#' @param spacer Text element to indent levels and fill empty cells,
#' defaults to " ".
#' @param indentor Optional text to indent factor levels
#'
#' @return
#' A tibble with variable names and descriptive statistics.
#' @examples
#' cat_desc_table(
#' data = mtcars, desc_vars = c("gear", "cyl", "carb"))
#'
#' cat_desc_table(
#' data = mtcars, desc_vars = c("gear", "cyl", "carb"), singleline = TRUE)
#'
#' @export
#'
cat_desc_table <- function(data, desc_vars,
round_desc = 2,
singleline = FALSE,
spacer = " ", indentor='') {
freq <-
purrr::map(data[desc_vars],
.f = function(x) {
cat_desc_stats(
x,
return_level = FALSE, singleline = singleline,
ndigit = round_desc
)
}
) |>
purrr::map(as_tibble)
levels <-
purrr::map(data[desc_vars],
.f = function(x) {
cat_desc_stats(x,
singleline = singleline
)$level
}
) |>
purrr::map(as_tibble)
out <- tibble(
Variable = character(), desc_all = character())
for (var_i in seq_along(desc_vars)) {
if (!singleline) {
out_tmp <- add_row(out[0,],
Variable = c(
desc_vars[var_i],
glue::glue(
"{indentor}{levels[[var_i]][[1]]}"
)
),
desc_all = c(spacer, freq[[var_i]][[1]])
)
out <- rbind(out,out_tmp)
} else {
out_tmp <- add_row(out[0,],
Variable = paste(
desc_vars[var_i],
levels[[var_i]][[1]]
),
desc_all = freq[[var_i]][[1]]
)
out <- rbind(out,out_tmp)
}
}
return(out)
}
#' Compute coefficient of variance.
#'
#' \code{var_coeff computes relative variability as standard deviation/mean *100}
#'
#' @param x Data for computation.
#'
#' @return numeric vector with coefficient of variance.
#'
#' @examples
#' var_coeff(x = mtcars$wt)
#' @export
var_coeff <- function(x) {
return(sd(x, na.rm = TRUE) / mean(x, na.rm = TRUE) * 100)
}
#' Standard Error of Mean.
#'
#' \code{SEM} computes standard error of mean.
#'
#' @param x Data for computation.
#'
#' @return numeric vector with SEM.
#'
#' @examples
#' SEM(x = mtcars$wt)
#' @export
SEM <- function(x) {
return(sd(x, na.rm = TRUE) / sqrt(length(na.omit(x))))
}
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/descriptives.R
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage(paste(
"Package wrappedtools is still experimental,",
"be warned that there might be dragons"
))
}
.onLoad <- function(libname, pkgname) {
# options(stringsAsFactors=F)
}
#' @import tibble
NULL
#' @import dplyr
NULL
#' @import stringr
NULL
# #'@importFrom testthat test_that expect_equal
# NULL
#' @importFrom stats anova as.formula confint cor.test fisher.test ks.test mad median na.omit p.adjust pairwise.t.test pnorm power.prop.test power.t.test qnorm quantile rpois sd wilcox.test t.test var.test predict SSmicmen coef
NULL
#' @import ggplot2
NULL
#' @import rlang
NULL
#' @importFrom tidyr separate nest pivot_longer pivot_wider
NULL
#' @importFrom utils data
NULL
#' @importFrom broom tidy
NULL
#' @importFrom rlist list.append
NULL
#' @importFrom forcats fct_lump_n fct_drop
NULL
#' @importFrom grDevices boxplot.stats
NULL
#' @importFrom stats prop.test
NULL
#' @importFrom DescTools BinomRatioCI
NULL
#' @importFrom flextable flextable_to_rmd
NULL
|
/scratch/gouwar.j/cran-all/cranData/wrappedtools/R/pkgstart.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.