content
stringlengths
0
14.9M
filename
stringlengths
44
136
smooth.over.scale<-function(x, det1, det2, lre, lreA, scale.range = NULL, Arange = NULL, Jstar = 20, splines = FALSE, positive = FALSE, dfS = 10, interpolate = FALSE){ # this is a function which does smoothing (over scale) of general spectral quantities, for outputs of cnlt.univ and cnlt.biv (same grid). spec <- matrix(ncol=length(x), nrow=Jstar) scale.range1 <- range(logb(unlist(lre),2)) min.s <- max(scale.range1[1], scale.range[1]) # should work with NULL arg max.s <- min(scale.range1[2], scale.range[2]) scale <- seq(from=min.s, to=(max.s+0.01), length=(Jstar+1)) # add a little on to the max s1 <- scale[1:Jstar] s2 <- scale[2:(Jstar+1)] mscale <- (s1+s2)/2 # convenience matching function find.values <-function(a,b,c){ findV <- sapply(1:length(a),function(i){length(which((c>=a[i])&(c < b[i])))}) findV <- which(findV > 0) return(findV) } if(is.null(Arange)){ Arange<-c(-Inf,Inf) # set to silly values to include everything } for(k in 1:length(x)) { mat <- matrix(ncol=3, nrow=length(lre[[k]])) mat[,1] <- logb(lre[[k]],2) mat[,2] <- det1[[k]]*det2[[k]] mat[,3] <- lreA[[k]] # only use values with a certain alpha range mat <- (mat[(Arange[1]<= mat[,3])&(mat[,3]< Arange[2]),]) # needs to have more than one value in mat if(length(mat) > 3){ if(splines){ xobs <- find.values(s1,s2,mat[,1]) if(positive){ # ensure spectral values are positive spline1 <- smooth.spline(mat[,1], sqrt(mat[,2]), df = dfS) if(interpolate){ spec[,k] <- (approx(spline1$x, spline1$y, xout = mscale, rule = 2)$y)^2 } else{ spec[xobs,k] <- (approx(spline1$x, spline1$y, xout = mscale[xobs], rule = 2)$y)^2 } } else{ # standard spline smoothing (doesn't need to be positive) spline1 <- smooth.spline(mat[,1], (mat[,2]), df = dfS) if(interpolate){ spec[,k] <- (approx(spline1$x, spline1$y, xout = mscale, rule = 2)$y) } else{ spec[xobs,k] <- (approx(spline1$x, spline1$y, xout = mscale[xobs], rule = 2)$y) } } } else{ for(i in 1:Jstar){ for(i in 1:Jstar){ spec[i,k] <- mean(mat[(scale[i]<= mat[,1])&(mat[,1]< scale[i+1]),2]) } } } } } return(list(spec=spec, mscale=mscale)) }
/scratch/gouwar.j/cran-all/cranData/CNLTtsa/R/smooth.over.scale.R
smooth.over.time <- function(x, spec, M, fact = 1){ nscale <- nrow(spec) smooth.spec <- matrix(data = NA, nrow = nscale, ncol = ncol(spec)) if(length(M)%in%c(1,nscale)){ # error checking if(length(M)==1){ # smoothing window increases with scale according to fact v<-(1:nscale)-1 Mvec <- M * fact^v } else{ Mvec<-M } } else{ stop("Please supply valid smoothing bandwidth(s)!") } for(i in 1:nscale){ a <- cbind(order(x),x ,spec[i,]) a <- a[is.na(a[,3])==FALSE,] # must have MORE THAN 1 obs at each scale if(length(a)<=3){ smooth.spec[i,] <- rep(NA, dim(spec)[2] ) } else{ sm <- ksmooth(a[,2], a[,3], kernel="box", bandwidth=M[i], x.points =x[a[,1]]) smooth.spec[i,a[,1]] <- sm$y } } return(smooth.spec) }
/scratch/gouwar.j/cran-all/cranData/CNLTtsa/R/smooth.over.time.R
## this is the equivalent of .First.lib (for packages with a namespace) ## note that it is general, so the name of the package etc doesn't need ## to be specified (just say whatever you want in the prints though). .onAttach <-function(lib,pkg) { ver <- read.dcf(file.path(lib, pkg, "DESCRIPTION"), "Version") ver <- as.character(ver) curdate <- read.dcf(file.path(lib, pkg, "DESCRIPTION"), "Date") curdate <- as.character(curdate) # Welcome message (MAN): packageStartupMessage(paste( "\n", "******************************************************************************\n", " CNLTtsa: Complex-Valued Wavelet Lifting for Univariate and Bivariate Signal Analysis \n\n", " --- Written by Jean Sanderson and Matt Nunes ---\n", " --- Contributions from Marina Knight and Piotr Fryzlewicz ---\n", " Current package version: ",ver," (",curdate,") \n\n", "\n", "******************************************************************************\n","\n") ) }
/scratch/gouwar.j/cran-all/cranData/CNLTtsa/R/zzz.R
MultiDimen_test = function(data , stat = "HT",pair=FALSE, method_p = "sampling" ,rank = FALSE, diff = FALSE , samplenum = 1000){ ################ warning message ######################### if(!is.matrix(data) & !is.data.frame(data)) stop("The input data must be a matrix or dataframe") if(!is.numeric(samplenum)) stop("samplenum input is not true") if(pair==TRUE){ if(stat %in% c("wsum")) stop("The stat input is not suitable for paired calculation") if( length(unique(data[,ncol(data)-1])) != 2) stop("The last two column of the data must only contain two unique numbers if pair = 'TRUE'") if(method_p == "asymptotic" & stat != "HT") stop("Asymptotic method can only be used when statistic is HT when pair = 'TRUE'") } if(pair==FALSE) { if(stat %in% c("zmax","zmaxabs")) stop("The stat input is not suitable for not paired calculation") if(sum(unique(data[,ncol(data)]) == c(0,1)) != 2 ) stop("The last column of the data must only contain 0 and 1 if pair = 'FALSE'") if(method_p == "asymptotic" & stat %in% c("tmax","tmaxabs")) stop("Asymptotic method can not be used when statistic is tmax or tmaxabs and pair = 'FALSE'") if(method_p == "asymptotic" & rank == TRUE & stat == "HT") stop("Asymptotic method can not be used when statistic is ranked HT and pair = 'FALSE'") } parameter_value = c(stat , method_p , rank , diff , pair) parameter_name = c("stat" , "method_p" , "rank" , "diff","pair") parameter_content = c("HT tmax tmaxabs wsum zmax zmaxabs" , "sampling exact asymptotic" , "TRUE FALSE 1 0" , "TRUE FALSE 1 0", "TRUE FALSE 1 0") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } if(stat == "wsum" & diff == TRUE & pair == FALSE) warning("The diff is not calculating when stat = 'wsum'") if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(stat == "wsum" & pair == FALSE) { rank = TRUE if(rank == FALSE) warning("Changing the rank to TRUE, because stat = 'wsum' is chosen") } ############################################## if(pair == FALSE){ HT <- function(dat,index,rank){ x = dat[index==0,] y = dat[index==1,] m = dim(x)[1] k = dim(x)[2] n = dim(y)[1] mx = apply(x,2,mean) vx = var(x) my = apply(y,2,mean) vy = var(y) v = (m-1)*vx+(n-1)*vy v = v/(m+n-2) diff = mx-my t2 = t(diff)%*%solve(v)%*%diff t2 = t2*m*n/(m+n) fs = (m+n-k-1)*t2/((m+n-2)*k) as.numeric(fs) } wilcox.sd <- function(x, index){ rd = rank(x) m = sum(index==0) nt = length(index) n = nt-m mw = m*(nt+1)/2 vw1 = mw*n/6 ux = unique(x) freq = ux for(i in 1:length(ux)){ freq[i] = sum(x==ux[i]) } af = sum(freq^3-freq)*m*n tmp = 12*nt*(nt-1) af = af/tmp vw = vw1-af w = sum(rd[index==0]) (w-mw)/sqrt(vw) } tall <- function(dat,index=NULL , rank=NULL){ if(rank == FALSE){ x = dat[index==0,] y = dat[index==1,] m = dim(x)[1] k = dim(x)[2] n = dim(y)[1] stata = rep(0,k) for(i in 1:k){ stata[i] = t.test(x[,i],y[,i],var.equal=TRUE)$statistic } stata } else if(rank == TRUE){ k = dim(dat)[2] stat = rep(0,k) for(i in 1:k){ stat[i] = wilcox.sd(dat[,i],index) } stat } } tmax <- function(dat,index=NULL,rank=NULL){ max(tall(dat,index,rank)) } tmaxabs <- function(dat,index=NULL,rank=NULL){ max(abs(tall(dat,index,rank))) } wsum <- function(rd,index , rank){ sum(rd[index==0,]) } nc = ncol(data) index = data[,nc] data = data[,-nc] STAT = stat if(stat == "HT") stat = HT else if(stat == "tmax") stat = tmax else if(stat == "tmaxabs") stat = tmaxabs else if(stat == "wsum") stat = wsum if(rank == TRUE){ for(i in 1:ncol(data)){ data[,i] = rank(data[,i]) } } else if(rank == FALSE) { } x = data[index==0,] y = data[index==1,] m = dim(x)[1] k = dim(x)[2] n = dim(y)[1] nt=n+m data = as.matrix(data) Fobs = stat(data, index , rank) if( method_p == "exact"){ all.comb = combn(nt, m) N = dim(all.comb)[2] replace = rep(1, N) for(i in 1:N){ ind = all.comb[,i] index = rep(0,nt) index[ind] = 1 replace[i] = stat(data,index,rank) } p.value <- length(replace[replace >= Fobs]) / N } else if(method_p == "sampling"){ replace <- replicate(samplenum, stat(data, sample(index) , rank)) p.value <- length(replace[replace >= Fobs]) / samplenum } else if( method_p == "asymptotic"){ if(STAT == "HT" & rank == FALSE){ p.value = 1-pf(Fobs, k, nt-k-1) } else if(STAT == "wsum"){ S = apply(data,1,sum) sigmaz = var(S)*(nt-1)/nt varw = m*n*sigmaz/(nt-1) mw = k*m*(nt+1)/2 z = (Fobs - mw) / sqrt(varw) p.value = 1 - pnorm(z) } } if(diff == TRUE & STAT != "wsum"){ replace <- replicate(samplenum, tmaxabs(data, sample(index) , rank)) quan = quantile(replace , 0.95) large = which(abs(tall(data , index , rank)) >= quan) larger=NULL for(i in 1:length(large)){ if(i<length(large)){larger<-c(larger,paste(large[i],", "))} else{larger<-c(larger,paste(large[i]))} } larger<-paste(larger,collapse = "") } } else { HT1<-function(x){ mx = apply(x,2,mean) vx = var(x) n = dim(x)[1] k = dim(x)[2] t2 = n*t(mx)%*%solve(vx)%*%mx t2[1,1] } HT2<-function(x){ mx=apply(x,2,mean) vx=var(x) n=dim(x)[1] k=dim(x)[2] t2=n*t(mx)%*%solve(vx)%*%mx fs = (n-k)*t2/((n-1)*k) fs[1,1] } statistic <- function(x, fun){ n = dim(x)[1] bin = rbinom(n,1,0.5) bin = 2*bin-1 y = diag(bin)%*%x fun(y) } binary <- function(x, k){ tmp = NULL y = x if(x < 2^k) { for(i in k-1:k){ a = floor(y/2^i) tmp = c(tmp, a) y = y-a*2^i } } 2*(tmp-0.5) } tall <- function(dat,index=NULL , rank=NULL){ x=dat mx=apply(x,2,mean) vx=apply(x,2,var) n=dim(x)[1] t=mx*sqrt(n)/sqrt(vx) t } tmax <- function(dat,index=NULL , rank=NULL){ max(tall(dat))} tmaxabs <- function(dat,index=NULL , rank=NULL){ max(abs(tall(dat)))} zall<-function(x){ mx=apply(abs(x),2,sum)/2 vx=apply(x^2,2,sum)/4 sr=apply(x*(x>0),2,sum) z=(sr-mx)/sqrt(vx) z } zmax<-function(x){ max(zall(x)) } zmaxabs<-function(x){ max(abs(zall(x))) } STAT = stat if(stat == "HT") stat = HT1 else if(stat == "tmax") stat = tmax else if(stat == "tmaxabs") stat = tmaxabs else if(stat == "zmax") stat = zmax else if(stat == "zmaxabs") stat = zmaxabs m = nrow(data) / 2 n = nrow(data) / 2 k = ncol(data) - 2 nc = ncol(data) uni_index = unique(data[,nc-1]) uni_pair = unique(data[,nc]) x = data[data[,nc-1] == uni_index[1],] y = data[data[,nc-1] == uni_index[2],] x = x[x[,nc] == uni_pair,] y = y[y[,nc] == uni_pair,] x = x[,1:k] y = y[,1:k] D = x - y rank=FALSE if(STAT == "zmax" | STAT == "zmaxabs"){ rank=TRUE sd = sign(D) SRD = abs(D) for(i in 1:k){ SRD[,i] = rank(SRD[,i]) } D = SRD*sd } Fobs = stat(D) if(method_p == "exact"){ l = dim(D)[1] ppm = NULL N = 2^l-1 for(i in 0:N) { condition = diag(binary(i, l)) %*% D ppm = c(ppm, stat(condition)) } p.value = length(ppm[ppm >= Fobs]) / N } else if(method_p == "sampling"){ results <- replicate(samplenum, statistic(D,stat)) p.value = length(results[results >= Fobs]) / samplenum } if(method_p == "asymptotic" & STAT == "HT"){ f = HT2(D) p.value = 1 - pf(f,k,n-k) } } ############# output ################################ names(p.value) = method_p names(Fobs) = STAT if(method_p == "asymptotic") { attr(p.value , "type") = switch(STAT,"HT"="F distribution","wsum"="normal") } alternative=if(STAT=="wsum"){"Each components in sample0 is greater than sample1."}else{"The means are different."} output <- list(method = if(pair){"Multiple Dimensional paired test"}else("Multiple Dimensional test") , score = if(rank){"Wilcoxon"}else{"original"} , stat =Fobs ,pval = p.value , alternative=alternative) if(diff == TRUE & STAT != "wsum"){ output<-c(output,addition=paste(larger , 'dimensions are considered having significant differences\n')) } class(output) = "nonp" output ################################## }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/MultiDimen_test.R
RMD_test = function(x , y , alternative = "greater" , mu1=median(x) , mu2=median(y), method_p="exact" , samplenum = 2000 , samplemethod = "R" , conf.level.sample = 0.95 ){ ################## warning message ############################# if(!is.numeric(c(x,y))) stop("The input data is not a numeric") parameter_value = c(alternative,method_p,samplemethod) parameter_name = c("alternative","method_p","samplemethod") parameter_content = c("greater less two.sided" , "sampling exact" , "S R W") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } cts_value = c(mu1 , mu2 , samplenum , conf.level.sample) cts_name = c("mu1" , "mu2" , "samplenum" , "conf.level.sample") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(4)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } if(method_p!="sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(method_p!="sampling" & !missing(samplemethod)) warning("\n samplemethod is not working") if(method_p!="sampling" & !missing(conf.level.sample)) warning("\n conf.level.sample is not working") ################################################ mean1.diff = function(x , nx){ mean(x[1:nx]) / mean(x[-(1:nx)]) } mean2.diff = function(x , nx){ max(mean(x[1:nx]) , mean(x[-(1:nx)])) / min(mean(x[1:nx]) , mean(x[-(1:nx)])) } nx = length(x) ny = length(y) n = nx + ny dev_x = abs(x - mu1) dev_y = abs(y - mu2) data <- c(dev_x, dev_y) index <- c(rep(1 , nx) , rep(2 , ny)) if(alternative != "two.sided"){ RMD_obs = mean(dev_x) / mean(dev_y) if(method_p=="exact"){ all.comb = combn(n, nx) N = dim(all.comb)[2] replace = rep(1, N) for(i in 1:N){ ind = all.comb[,i] replace[i] = mean(data[ind]) / mean(data[-ind]) } if(alternative == "greater"){ p.value = length(replace[replace >= RMD_obs]) / N } else if(alternative == "less"){ p.value = length(replace[replace <= RMD_obs]) / N } } else if(method_p=="sampling"){ replace = apply(TwosampleSRS(data,index,samplenum,method = samplemethod),2, function(x) mean1.diff(x,nx) ) if(alternative == "greater"){ p.value = length(replace[replace >= RMD_obs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "less"){ p.value = length(replace[replace <= RMD_obs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } } } else if(alternative == "two.sided"){ RMD_obs = max(mean(dev_x) , mean(dev_y)) / min(mean(dev_x) , mean(dev_y)) if(method_p=="exact"){ all.comb = combn(n, nx) N = dim(all.comb)[2] replace = rep(1, N) for(i in 1:N){ ind = all.comb[,i] replace[i] = max(mean(data[ind]) , mean(data[-ind])) / min(mean(data[ind]) , mean(data[-ind])) } p.value = length(replace[replace >= RMD_obs]) / N } else if(method_p=="sampling"){ replace = apply(TwosampleSRS(data,index,samplenum,method = samplemethod),2, function(x) mean2.diff(x,nx) ) p.value = length(replace[replace >= RMD_obs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } } names(p.value) = method_p names(RMD_obs) = "RMDobs" conf=NULL if(method_p == "sampling"){ attr(p.value , "type") = samplemethod conf = c(p.down , p.up) attr(conf , "conf.level") = conf.level.sample } null.value = c("The variance of the first" , "the variance of the second") attr(null.value , "direction") = alternative output = list(method = "RMD test" , stat = RMD_obs , conf.int=conf ,pval = p.value , null.value = null.value) class(output) = "nonp" output }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/RMD_test.R
FindK<-function(x,M){ K=M tmp=1 while(tmp<=x){ tmp=tmp*(K+1)/(K-M+1) K=K+1 } return(K-1) } GetBinary<-function(x,M,N){ B<-rep(FALSE,N) while(x>=0&M>0){ K<-FindK(x,M) x=x-choose(K,M) M=M-1 B[N-K]=TRUE } return(B) } ReservoirSample<-function(N,samplenum){ Reservoir<-seq(from=1,to=samplenum) i=samplenum+1 while(i<=N){ rand<-as.integer(runif(1)*i)+1 if(rand<=samplenum){ Reservoir[rand]=i } i=i+1 } return(Reservoir) } TwosampleSRS<-function(data,index,samplenum,method="R"){ if(method=="R"){ return(replicate(samplenum,sample(data))) } N<-length(data) M<-length(index[index==1]) if(samplenum>choose(N,M)) stop("sample number is greater than the all possible combinations") if(method=="S"){ SampleIndex<-sample(0:(choose(N,M)-1),samplenum) } else if(method=="W"){ SampleIndex<-ReservoirSample((choose(N,M)-1),samplenum) } Index<-matrix(rep(FALSE,samplenum*N),N,samplenum) replace<-matrix(rep(-1,samplenum*M),N,samplenum) for(i in 1:samplenum){ Index[,i]<-GetBinary(SampleIndex[i],M,N) replace[,i]<-c(data[Index[,i]],data[!Index[,i]]) } return(replace) }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/TwoSampleSRS.R
cip <- function(x, conf.level = 0.95, p = 0.5){ ################### warning ################## if(!is.vector(x)) stop("The input must be a vector") if(!is.numeric(x)) stop("The input must be numerical") cts_value = c(conf.level , p) cts_name = c("conf.level" , "p") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(2,1)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } ########################################### n=length(x) y=sort(x) beta = 1-(1-conf.level)/2 d=qnorm(beta)*sqrt(n*p*(1-p)) a=round(p*n-d) b=round(p*n+1+d) if (a>=1 & b<=n){ cat("The ",conf.level*100,"% confidence interval for the ",(p*100),"th percentile is ","(",y[floor(a)],",",y[ceiling(b)],").\n",sep = "") interval <- list(lower.rank=floor(a), upper.rank=ceiling(b), lower=y[floor(a)], upper=y[upper.rank=ceiling(b)]) } else if (a<1){ a <- 1 b <- 1+n*p+sqrt(n*p*(1-p))*qnorm(conf.level+pnorm((a-n*p)/sqrt(n*p*(1-p)))) warning("Can't to find the symmetric shortest interval because the resulting a(the order of lower bound) is less than 1") cat("The ",conf.level*100,"% confidence interval for the ",(p*100),"th percentile is ","(",y[floor(a)],",",y[ceiling(b)],").\n",sep = "") interval <- list(lower.rank=floor(a), upper.rank=ceiling(b), lower=y[floor(a)], upper=y[ceiling(b)]) } else if (b>length(x)){ b <- length(x) a <- n*p+sqrt(n*p*(1-p))*qnorm(pnorm((b-1-n*p)/sqrt(n*p*(1-p)))-conf.level) warning("Can't to find the symmetric shortest interval because the resulting b(the order of upper bound) is greater than length of given data") cat("The ",conf.level*100,"% confidence interval for the ",(p*100),"th percentile is ","(",y[floor(a)],",",y[ceiling(b)],").\n",sep = "") interval <- list(lower.rank=floor(a), upper.rank=ceiling(b), lower=y[floor(a)], upper=y[ceiling(b)]) } invisible(interval) }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/cip.r
corr_test = function(x , y , alternative = "greater" , measure = "pearson" , method_p = "sampling" , samplenum = 1000 , conf.level.sample = 0.95){ ################ warning message ######################### if(!is.numeric(c(x,y))) stop("The input data is not a numeric") if(length(x) != length(y)) stop("The input pair of datas have different length") parameter_value = c(alternative,measure,method_p) parameter_name = c("alternative","measure","method_p") parameter_content = c("greater less two.sided" , "pearson spearman kendall" , "sampling asymptotic exact") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } cts_value = c(samplenum , conf.level.sample) cts_name = c("samplenum" , "conf.level.sample") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(2)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(method_p != "sampling" & !missing(conf.level.sample)) warning("\n conf.level.sample is not working") ############################################## kendtau<-function(x,y){ count = 0 n = length(x) for(i in 1:(n-1)){ for(j in (i+1):n){ tmp = ((x[i]-x[j])*(y[i]-y[j]) >0) count=count+tmp } } tau = count / choose(n,2) 2*tau - 1 } nx = length(x) ny = length(y) n = nx if(measure == "pearson"){ Dobs = cor(x , y) } else if(measure == "spearman"){ x = rank(x) ; y = rank(y) Dobs = cor(x , y) } else if(measure == "kendall"){ Dobs = kendtau(x , y) } if(method_p == "sampling"){ if(measure != "kendall") replace = replicate(samplenum , cor(x , sample(y))) else replace = replicate(samplenum , kendtau(x , sample(y))) if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "two.sided"){ replace = abs(replace) p.value = length(replace[replace >= abs(Dobs)]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } } else if(method_p == "asymptotic"){ if(measure != "kendall") z = Dobs * sqrt(n-1) else { V = (4*n+10)/(9*(n^2-n)) z = Dobs / sqrt(V) } if(alternative == "greater"){ p.value = 1 - pnorm(z) } else if(alternative == "less"){ p.value = pnorm(z) } else if(alternative == "two.sided"){ p.value = min(1,2 - 2 * pnorm(abs(z))) } } else if(method_p == "exact"){ if(!requireNamespace("e1071")){stop("Need R-Package e1071 to finish test.")} if(measure != "kendall"){ z <- e1071::permutations(ny) N <- nrow(z) replace <- apply(z,MARGIN = 1,function(z){cor(x , y[z])}) } else{ z <- e1071::permutations(ny) N <- nrow(z) replace <- apply(z,MARGIN = 1,function(z){kendtau(x , y[z])}) } if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / length(replace) } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / length(replace) } else if(alternative == "two.sided"){ p.greater = length(replace[replace >= Dobs]) / length(replace) p.less = length(replace[replace <= Dobs]) / length(replace) p.value = min(2*p.greater , 2*p.less,1) } } method="Correlation test" names(p.value) = method_p names(Dobs) = "Dobs" if(method_p == "asymptotic") { attr(p.value , "type") <- "normal" } conf=NULL if(method_p == "sampling"){ conf = c(p.down , p.up) attr(conf , "conf.level") = conf.level.sample } null.value = c(paste(measure,"correlation") , "0") attr(null.value , "direction") = alternative output = list(method = method ,score=measure, stat = Dobs , conf.int=conf ,pval = p.value , null.value=null.value) class(output) = "nonp" output }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/corr_test.R
emcdf <- function(x, conf.level=0.05){ #################### warning ####################### if(!is.vector(x)) stop("The input must be a vector") if(!is.numeric(x)) stop("The input must be numerical") if(!is.numeric(conf.level)) stop("The conf.level must be numerical") if(conf.level < 0 | conf.level >1) stop("The conf.level is out of range") ########################################### u <- sort(x) n=length(x) for(i in 1:length(x)){ u[i] = sum(x<=u[i])/n } max_x <- max(x) min_x <- min(x) rangex <- max_x - min_x start <- min_x - rangex/n final <- max_x + rangex/n x1=c(start, sort(x), final) y1=c(0, u, 1) deta <- (2*u*n+qnorm(1-conf.level/2)^2)^2-4*(n+qnorm(1-conf.level/2)^2)*(u^2)*n lower <- ((2*u*n+qnorm(1-conf.level/2)^2) - sqrt(deta))/(2*(n+qnorm(1-conf.level/2)^2)) upper <- ((2*u*n+qnorm(1-conf.level/2)^2) + sqrt(deta))/(2*(n+qnorm(1-conf.level/2)^2)) y <- data.frame(sample=sort(x), empirical.cdf = u, lower = lower, upper = upper) class(y)<-c("emcdf","data.frame") return(y) }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/emcdf.r
ksample_test <- function(x , group , score = "kruskal", method_p = "sampling" , type = "normal" , samplenum = 1000 , conf.level.sample = 0.95){ ################ warning message ######################### if(!is.numeric(x)) stop("The input data is not a numeric") if(length(x) != length(group)) stop("The length of the data is not equal to the length of the group") parameter_value = c(score , method_p , type) parameter_name = c("score" , "method_p" ,"type") parameter_content = c("original kruskal van exp" , "sampling asymptotic exact" , "normal JT") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } cts_value = c(samplenum , conf.level.sample) cts_name = c("samplenum" , "conf.level.sample") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(2)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(method_p != "sampling" & !missing(conf.level.sample)) warning("\n conf.level.sample is not working") if(type == "JT" & !missing(score)) warning("\n score is not working") ############################################## expsc<-function(x){ y=rank(x) n=length(x) sc=rep(0, n) for(i in 1:n) { for(j in 1:y[i]) { sc[i] = sc[i]+1/(n+1-j) } } sc } kw = function(x , group){ group = factor(group) m = mean(x) sum(tapply(x , group , function(x) length(x) * (mean(x) - m)^2)) / var(x) } mw<-function(x1,x2){ u = 0 for(i in 1:length(x2)){ u = u + sum(x1 > x2[i]) } u } jt<-function(x, group){ ug = unique(group) k = length(ug) jt = 0 for(j in 1:(k-1)){ for(i in (j+1):k){ xa = x[group==ug[i]] xb = x[group==ug[j]] tmp = mw(xa,xb) jt = jt + tmp } } jt } n = length(x) k = length(unique(group)) group = factor(group) if(type == "normal"){ if(score == "original"){ method_name = "F-test" } else if(score == "kruskal"){ x = rank(x) method_name = "Kruskal-Wallis rank sum test" } else if(score == "van"){ x = qnorm( rank(x)/(n+1)) method_name = "Van der Waerden test" } else if(score == "exp"){ x = expsc(x) method_name = "Savage exponential test" } Dobs = kw(x , group) if(method_p == "asymptotic"){ p.value = 1 - pchisq(Dobs , k-1) C = pchisq(Dobs , k-1) } else if(method_p == "sampling"){ replace = replicate(samplenum ,kw(sample(x) , group)) p.value = length(replace[replace >= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(method_p=="exact"){ print("under preparing...") } } else if(type == "JT"){ Dobs = jt(x,group) if(method_p == "asymptotic"){ size = table(group) mu = (n^2-sum(size^2))/4 sigma = (n^2*(2*n+3)-sum(size^2*(2*size+3)))/72 p.value = 1 - pnorm((Dobs-mu)/sqrt(sigma)) } else if(method_p == "sampling"){ replace = replicate(samplenum, jt(sample(x), group)) p.value = length(replace[replace >= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(method_p=="exact"){ print("under preparing...") } } ############# output ################################ method=paste(switch(type,"normal"=method_name,"JT"="Jonckheere-Terpstra test"),"with",k,"groups") names(p.value) = method_p names(Dobs) = "Dobs" if(method_p == "asymptotic") { attr(p.value , "type") = switch(type,"normal"="chisq","JT"="normal") if(type=="normal") attr(p.value , "df")=k-1 } conf=NULL if(method_p == "sampling"){ #attr(p.value , "type") = samplemethod conf = c(p.down , p.up) attr(conf , "conf.level") = conf.level.sample } alternative=switch(type,"normal"="There exists at least one pair of groups such that the means differ.","JT"="The grouped means are ordered.") output = list(method = method , stat = Dobs , conf.int=conf ,pval = p.value , alternative=alternative) class(output) = "nonp" output ################################## }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/ksample_test.R
print.nonp<-function(x, digits = getOption("digits"), prefix = "\t\t",...){ cat("\n") theme<-strwrap(x$method, prefix = prefix) if(!is.null(x$score)){ if(x$score %in% c("pearson", "spearman", "kendall")) theme<-c(theme,paste("with",x$score,"measurement")) else theme<-c(theme,paste("using",x$score,"scoring")) } cat(theme,"\n") out <- character() if (!is.null(x$stat)) out <- c(out, paste(names(x$stat), "=", format(x$stat, digits = max(1L, digits - 2L)))) if (!is.null(x$pval)) { fp <- paste(format.pval(x$pval, digits = max(1L, digits - 3L))) if(!is.null(attr(x$pval,"type"))){ if(attr(x$pval,"type")%in%c("W","R","S")) out <- c(out,paste(names(x$pval),switch(attr(x$pval,"type"),"W"="without replacement","S"="without replacement","R"="with replacement"),paste("method to calculate :\n\t",if(!is.null(attr(x$pval,"df"))){paste("df = ",attr(x$pval,"df"),",")}, "p-value = ",sep=""), if (startsWith(fp, "<")) fp else paste(fp))) else out <- c(out,paste(attr(x$pval,"type"),names(x$pval),paste("method to calculate :\n\t", if(!is.null(attr(x$pval,"df"))){paste("df = ",attr(x$pval,"df"),",")},"p-value = ",sep=""),if (startsWith(fp, "<")) fp else paste (fp))) } else out <- c(out,paste(names(x$pval),paste("method to calculate :\n\t", if(!is.null(attr(x$pval,"df"))){paste("df = ",attr(x$pval,"df"),",")}, "p-value = ",sep=""), if (startsWith(fp, "<")) fp else paste(fp))) } cat(paste(out, collapse = "\n"),sep = "\n") if (is.null(x$alternative)) { cat("alternative hypothesis: \n\t") alt.char <- switch(attr(x$null.value,"direction"), two.sided = "not equal to", less = "less than", greater = "greater than") cat(x$null.value[1], " is ", alt.char, " ", x$null.value[2], "\n", sep = "") } else if(is.null(x$null.value)) cat("alternative hypothesis: \n\t",x$alternative, "\n", sep = "") if (!is.null(x$conf.int)) { cat(format(100 * attr(x$conf.int, "conf.level")), "% confidence interval of p-value :\n\t", paste("[",format(x$conf.int[1], digits = digits-3L), ",",format(x$conf.int[2], digits = digits-3L), "]",collapse = " "), "\n", sep = "") } if(!is.null(x$addition)) cat("\n",x$addition) if(!is.null(x$alternative) & !is.null(x$null.value)){warning("Alternative and null.value both exist, so output from null.value disabled.")} cat("\n") invisible(x) } plot.emcdf<-function(x,...){ data<-x x<-data$sample n=length(x) max_x <- max(x) min_x <- min(x) rangex <- max_x - min_x start <- min_x - rangex/n final <- max_x + rangex/n x1=c(start, sort(x), final) y1=c(0, data$empirical.cdf, 1) lower_plot <- c(0,data$lower,1) upper_plot <- c(0,data$upper,1) plot(x1,y1, type='S', xlab='Cycles', ylab='Probability', main='Empirical Distribution',...) lines(x1, upper_plot, lty=2,type='S', col='red') lines(x1, lower_plot, lty=2,type='S', col='blue') legend("bottomright", lty=c(1,2,2), col=c('black', 'red', 'blue'), c('EM-CDF', 'Upper', 'Lower'), cex=0.8) }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/nonp.R
pairwise_test = function(x , y , alternative = "greater" , score = "wilcoxon" , method_p = "asymptotic", method_asymptotic = "norm", method_wilcoxon = "type1" , samplenum = 1000 , conf.level.sample = 0.95,samplemethod="R"){ ################ warning message ######################### if(!is.numeric(c(x,y))) stop("The input data is not a numeric") if(length(x) != length(y)) stop("The input pair of datas have different length") parameter_value = c(alternative,score,method_p,method_asymptotic, method_wilcoxon,samplemethod) parameter_name = c("alternative","score","method_p","method_asymptotic","method_wilcoxon","samplemethod") parameter_content = c("greater less two.sided" , "original wilcoxon sign" , "sampling exact asymptotic" , "norm binomal" , "type1 type2","R S") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } cts_value = c(samplenum , conf.level.sample) cts_name = c("samplenum" , "conf.level.sample") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(2)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } if(score == "sign" & method_p != "asymptotic") stop(" 'score = sign' only use asymptotic method to calculate P.value") if(score != "sign" & method_asymptotic == "binomal") stop("binomal asymptotic method can only be used when 'score = sign' ") if(score != "wilcoxon" & !missing(method_wilcoxon)) warning("\n method_wilcoxon is not working") if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(method_p != "sampling" & !missing(conf.level.sample)) warning("\n conf.level.sample is not working") ############################################## binary <- function(x, k){ tmp = NULL y = x if(x < 2^k) { for(i in k-1:k){ a = floor(y/2^i) tmp = c(tmp, a) y = y-a*2^i } } 2*(tmp-0.5) } ppm <- function(x){ k = length(x) ppm = NULL N = 2^k-1 for(i in 0:N) { condition = x*binary(i, k) ppm = c(ppm, sum(condition[condition > 0])) } ppm } ppmrr<-function(x){ ind = rbinom(length(x), 1, 0.5) ind = (ind-0.5)*2 condition = x*ind sum(condition[condition > 0]) } ppmr<-function(x,samplenum){ N<-length(x) s_index<-sample(1:2^N,samplenum)-1 ppmr<-NULL for(i in 1:samplenum){ tmp=x*binary(s_index[i],N) ppmr=c(ppmr,sum(tmp[tmp>0])) } ppmr } sign_binomal_greater = function(k , n){ total = 0 for(i in k:n){ total = total + 0.5^n * choose(n , i) } total } sign_binomal_less = function(k , n){ total = 0 for(i in 0:k){ total = total + 0.5^n * choose(n , i) } total } data = x - y if(score == "original"){ E = sum(abs(data)) / 2 V = sum(abs(data) ^ 2) / 4 } else if(score == "wilcoxon"){ if(method_wilcoxon == "type1"){ transit = data data = rank(abs(data)) * sign(data) data[transit == 0] = 0 } else if(method_wilcoxon == "type2"){ data = data[data != 0] data = rank(abs(data)) * sign(data) } n = length(data) E = n*(n+1)/4 V = E*(2*n+1)/6 } else if(score == "sign"){ data[data > 0] = 1 data[data < 0] = -1 n = length(data) E = n / 2 ; V = n / 4 } Dobs = sum(data[data > 0]) if(method_p == "exact"){ replace = ppm(data) if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / length(replace) } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / length(replace) } else if(alternative == "two.sided"){ p.greater = length(replace[replace >= Dobs]) / length(replace) p.less = length(replace[replace <= Dobs]) / length(replace) p.value = min(2*p.greater , 2*p.less,1) } } else if(method_p == "sampling"){ if(samplemethod=="R") replace = replicate(samplenum , ppmrr(data)) else if(samplemethod=="S"){ if(samplenum>2^length(data)){stop(paste("Samplenum of sampling without replacement should be smaller than",2^length(data)))} replace = ppmr(data,samplenum) } if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "two.sided"){ p.greater = length(replace[replace >= Dobs]) / samplenum p.less = length(replace[replace <= Dobs]) / samplenum p.value = min(2*p.greater , 2*p.less,1) p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } } else if(method_p == "asymptotic"){ if(method_asymptotic == "norm"){ if(alternative == "greater"){ p.value = 1 - pnorm((Dobs - E - 0.5)/sqrt(V)) } else if(alternative == "less"){ p.value = pnorm((Dobs - E + 0.5)/sqrt(V)) } else if(alternative == "two.sided"){ p.value = min(1,2 - 2 * pnorm((abs(Dobs - E) - 0.5)/sqrt(V))) } } else if(method_asymptotic == "binomal"){ if(alternative == "greater"){ p.value = sign_binomal_greater(Dobs , n) } else if(alternative == "less"){ p.value = sign_binomal_less(Dobs , n) } else if(alternative == "two.sided"){ p.value = 2 * min(sign_binomal_greater(Dobs , n) , sign_binomal_less(Dobs , n)) } } } ############# output ################################ names(p.value) = method_p names(Dobs) = "Dobs" if(method_p == "asymptotic") attr(p.value , "type") = method_asymptotic conf=NULL if(method_p == "sampling"){ attr(p.value , "type") = samplemethod conf = c(p.down , p.up) attr(conf , "conf.level") = conf.level.sample } null.value = c("The mean of the first" , "the mean of the second") attr(null.value , "direction") = alternative output = list(method = "Pairwise comparision test" , score = score , stat = Dobs , conf.int=conf ,pval = p.value , null.value = null.value) class(output) = "nonp" output ################################## }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/pairwise_test.R
permu_table = function(data , permu = "row" , row = NULL , col = NULL , fix = "row" , samplenum = 1000){ ########### warning ##################### if(!is.matrix(data) & !is.data.frame(data)) stop("The input data must be a matrix or dataframe") if(!is.numeric(samplenum)) stop("The samplenum must be a numeric") parameter_value = c(permu , fix) parameter_name = c("permu" , "fix") parameter_content = c("row col" , "row col") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } if(!missing(row)){ if(!is.numeric(row)) stop("The row parameter must be numeric") if(!is.vector(row)) stop("The row parameter must be a vector") if(length(row) != nrow(data)) stop("The length of 'row' must equal to the row length of data") } if(!missing(col)){ if(!is.numeric(col)) stop("The col parameter must be numeric") if(!is.vector(col)) stop("The col parameter must be a vector") if(length(col) != ncol(data)) stop("The length of 'col' must equal to the col length of data") } if((missing(row) | missing(col)) & !missing(fix)) warning("\n fix is not working") ################################### perm0 <- function(row,col){ n = sum(col) r = length(row) c = length(col) x = NULL for(i in 1:c){ x = c(x, rep(i,col[i])) } y = sample(x) freq = matrix(rep(0, r*c), r,c) count = 0 for(j in 1:r){ tmpy = y[(count+1):(count+row[j])] for(i in 1:c){ freq[j,i] = sum(tmpy==i) } count = count+row[j] } freq } perm1 <- function(row,col){ n = sum(col) r = length(row) c = length(col) x = NULL for(i in 1:r){ x=c(x, rep(i,row[i])) } y = sample(x) freq = matrix(rep(0, r*c), r,c) count = 0 for(j in 1:c){ tmpy = y[(count+1):(count+col[j])] for(i in 1:r){ freq[i,j] = sum(tmpy==i) } count = count+col[j] } freq } chi2 <- function(freq){ row = apply(freq,1,sum) col = apply(freq,2,sum) row = matrix(rep(row , ncol(freq)) , ncol = ncol(freq)) col = matrix(rep(col , nrow(freq)) , ncol = ncol(freq) , byrow = TRUE) fenmu = row * col / sum(freq) sum(freq^2 / fenmu) - sum(freq) } if(is.null(row) & is.null(col)){ row = apply(data,1,sum) col = apply(data,2,sum) obs = chi2(data) if(permu == "row"){ results <- replicate(samplenum, chi2(perm0(row,col))) } else if(permu == "col"){ results <- replicate(samplenum, chi2(perm1(row,col))) } p.value = sum(results >= obs) / samplenum method="Pearson's Chi-squared test" names(p.value) = "simulated" names(obs) = "chi-squared_obs" alternative="There exists at least one pair of groups that are not independent." output = list(method = method , stat = obs ,pval = p.value , alternative=alternative) class(output) = "nonp" } else { r = nrow(data) c = ncol(data) if(is.vector(row) & is.null(col)){ scoring = NULL group = NULL for(i in 1:r){ for(j in 1:c){ scoring = c(scoring , rep(row[i] , data[i,j])) group = c(group , rep(j , data[i,j])) } } if(c > 2) { output<-ksample_test(scoring , group , score = "kruskal" , samplenum = samplenum, type = "normal" ) output$method<-"Kruskal test on table" output$alternative<-"The rows are ordered." } else if(c==2) { output<-twosample_test(scoring[group == 1] , scoring[group == 2] ,score = "wilcoxon" , alternative = "two.sided", method_p = "sampling" , samplenum = samplenum) output$method<-"Wilcoxon test on table" output$alternative<-"The rows are ordered." } } else if(is.vector(col) & is.null(row)){ scoring = NULL group = NULL for(j in 1:c){ for(i in 1:r){ scoring = c(scoring , rep(col[j] , data[i,j])) group = c(group , rep(i , data[i,j])) } } if(c > 2) { output<-ksample_test(scoring , group , score = "kruskal" , samplenum = samplenum, type = "normal" ) output$method<-"Kruskal test on table" output$alternative<-"The columns are ordered." } else if(c==2) { output<-twosample_test(scoring[group == 1] , scoring[group == 2] ,score = "wilcoxon" , alternative = "two.sided", method_p = "sampling" , samplenum = samplenum) output$method<-"Wilcoxon test on table" output$alternative<-"The columns are ordered." } } else if(is.vector(col) & is.vector(row)){ if(fix == "row"){ scoring = NULL group = NULL for(j in 1:c){ for(i in 1:r){ scoring = c(scoring , rep(col[j] , data[i,j])) group = c(group , rep(i , data[i,j])) } } } else if(fix == "col"){ scoring = NULL group = NULL for(i in 1:r){ for(j in 1:c){ scoring = c(scoring , rep(row[i] , data[i,j])) group = c(group , rep(j , data[i,j])) } } } output<-ksample_test(scoring , group , type = "JT",samplenum = samplenum) output$method<-"Jonckheere-Terpstra test on table" output$alternative<-"Both rows and columns are ordered." } } return(output) }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/permu_table.R
siegel_tukey=function(x,y,adjust.median=FALSE,...) { if(!is.numeric(c(x,y))) stop("The input data is not a numeric") data=data.frame(c(x,y),rep(c(0,1),c(length(x),length(y)))) names(data)=c("x","y") data=data[order(data$x),] if(adjust.median==TRUE){ data$x[data$y==0]=data$x[data$y==0]-(median(data$x[data$y==0])) data$x[data$y==1]=data$x[data$y==1]-(median(data$x[data$y==1])) } x<-c(x,y) sort.x<-sort(data$x) sort.id<-data$y[order(data$x)] data.matrix<-data.frame(sort.x,sort.id) base1<-c(1,4) iterator1<-matrix(seq(from=1,to=length(x),by=4))-1 rank1<-apply(iterator1,1,function(x) x+base1) iterator2<-matrix(seq(from=2,to=length(x),by=4)) base2<-c(0,1) rank2<-apply(iterator2,1,function(x) x+base2) if(length(rank1)==length(rank2)){ rank<-c(rank1[1:floor(length(x)/2)],rev(rank2[1:ceiling(length(x)/2)])) } else{ rank<-c(rank1[1:ceiling(length(x)/2)],rev(rank2[1:floor(length(x)/2)])) } unique.ranks<-tapply(rank,sort.x,mean) unique.x<-as.numeric(as.character(names(unique.ranks))) rank.matrix<-data.frame(unique.x,unique.ranks) ST.matrix<-merge(data.matrix,rank.matrix,by.x="sort.x",by.y="unique.x") ranks0<-ST.matrix$unique.ranks[ST.matrix$sort.id==0] ranks1<-ST.matrix$unique.ranks[ST.matrix$sort.id==1] output<-twosample_test(ranks0,ranks1,score = "wilcoxon",conf.diff = FALSE,...) output$method<-"Siegel-Tukey test" output$score<-NULL output$null.value[1]<-"The variance of the first" output$null.value[2]<-"the variance of the second" output }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/siegel_tukey.R
twosample_test = function(x , y , alternative = "greater" , score = "wilcoxon" , method_p = "sampling" , samplenum = 2000 ,samplemethod="R", conf.level.sample = 0.95 , conf.diff = TRUE, conf.level.diff = 0.95){ ################ warning ######################### if(!is.numeric(c(x,y))) stop("The input data is not a numeric") parameter_value = c(alternative,score,method_p,samplemethod,conf.diff) parameter_name = c("alternative","score","method_p","samplemethod","conf.diff") parameter_content = c("greater less two.sided" , "original wilcoxon van exp" , "sampling exact asymptotic" , "S R W" , "TRUE FALSE 1 0") parameter = data.frame(parameter_value , parameter_name , parameter_content) for(i in 1:nrow(parameter)){ if(!parameter[i,1] %in% strsplit(parameter[i,3]," ")[[1]]) stop(paste(parameter[i,2] , "input is not true")) } cts_value = c(samplenum , conf.level.sample , conf.level.diff) cts_name = c("samplenum" , "conf.level.sample" , "conf.level.diff") cts = data.frame(cts_name , cts_value) for(i in 1:nrow(cts)){ if(!is.numeric(cts[i,2])) stop(paste(cts[i,1] , "input is not true")) if(i %in% c(2,3)){ if(cts[i,2] < 0 | cts[i,2] > 1) stop(paste(cts[i,1] , "input is out of range")) } } if(method_p != "sampling" & !missing(samplenum)) warning("\n samplenum is not working") if(method_p != "sampling" & !missing(samplemethod)) warning("\n samplemethod is not working") if(method_p != "sampling" & !missing(conf.level.sample)) warning("\n conf.level.sample is not working") if(conf.diff==FALSE & !missing(conf.level.diff)) warning("\n conf.level.diff is not working") ############################################## expsc<-function(x) { y=rank(x) n=length(x) sc=rep(0, n) for(i in 1:n) { for(j in 1:y[i]) { sc[i] = sc[i]+1/(n+1-j) } } sc } nx = length(x) ny = length(y) n = nx + ny data = c(x , y) index = c(rep(1 , nx) , rep(2 , ny)) if(conf.diff){ diff = rep(1, nx*ny) for (i in 1: nx){ for(j in 1: ny){ k = (i-1)*ny+j diff[k] = x[i] - y[j] } } diff = sort(diff) mu = nx*ny/2 va = mu*(n+1)/6 ka = round(mu - qnorm(0.5+0.5*conf.level.diff)*sqrt(va)) kb = round(mu + qnorm(0.5+0.5*conf.level.diff)*sqrt(va) + 1) diff.up = diff[kb] diff.down = diff[ka] Hodges = median(diff) } if(score == "original"){ } else if(score == "wilcoxon"){ data = rank(data) } else if(score == "van"){ data = qnorm( rank(data)/(n+1)) } else if(score == "exp"){ data = expsc(data) } Dobs = sum(data[index==1]) if(method_p == "exact"){ all.comb = combn(n , nx) N = choose(n , nx) replace = rep(1 , N) for(i in 1:N){ ind = all.comb[,i] replace[i] = sum(data[ind]) } if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / N } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / N } else if(alternative == "two.sided"){ p.greater = length(replace[replace >= Dobs]) / N p.less = length(replace[replace <= Dobs]) / N p.value = min(2*p.less , 2*p.greater,1) } } else if(method_p == "sampling"){ replace = apply(TwosampleSRS(data,index,samplenum,method = samplemethod),2, function(x) sum(x[1:nx])) if(alternative == "greater"){ p.value = length(replace[replace >= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "less"){ p.value = length(replace[replace <= Dobs]) / samplenum p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } else if(alternative == "two.sided"){ p.greater = length(replace[replace >= Dobs]) / samplenum p.less = length(replace[replace <= Dobs]) / samplenum p.value = min(2*p.less , 2*p.greater,1) p.up = p.value + qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 p.down = p.value - qnorm(0.5+0.5*conf.level.sample) * (p.value*(1 - p.value) / samplenum)^0.5 } } else if(method_p == "asymptotic"){ E = nx * mean(data) V = nx * ny * var(data) / n if(alternative == "greater"){ p.value = 1 - pnorm((Dobs - E - 0.5)/sqrt(V)) } else if(alternative == "less"){ p.value = pnorm((Dobs - E + 0.5)/sqrt(V)) } else if(alternative == "two.sided"){ p.value = min(2 - 2 * pnorm((abs(Dobs - E) - 0.5)/sqrt(V)),1) } } ############# output ################################ names(p.value) = method_p names(Dobs) = "Dobs" if(method_p == "asymptotic") attr(p.value , "type") = "normal" conf=NULL if(method_p == "sampling"){ attr(p.value , "type") = samplemethod conf = c(p.down , p.up) attr(conf , "conf.level") = conf.level.sample } null.value = c("The mean of the first" , "the mean of the second") attr(null.value , "direction") = alternative output = list(method = "Two sample test" , score = score , stat = Dobs , conf.int=conf ,pval = p.value , null.value = null.value) if(conf.diff){ output<-c(output,addition=paste("The Hodges-Lehmann statistic =" , Hodges , "\nThe" , conf.level.diff*100 , "% CI for mean difference is [" , diff.down , "," , diff.up , "]")) } class(output) = "nonp" output ################################## }
/scratch/gouwar.j/cran-all/cranData/CNPS/R/twosample_test.R
#' The 'CNVRG' package. #' #' @description This package implements Dirichlet multinomial modeling of relative abundance data using functionality provided by the 'Stan' software. The purpose of this package is to provide a user friendly way to interface with 'Stan' that is suitable for those new modelling. #' #' @docType package #' @name CNVRG-package #' @aliases CNVRG #' @useDynLib CNVRG, .registration = TRUE #' @import methods #' @import Rcpp #' @import tibble #' @importFrom vegan diversity #' @importFrom rstan sampling #' #' @references #' Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.18.2. #' NULL
/scratch/gouwar.j/cran-all/cranData/CNVRG/R/CNVRG-package.R
#' Perform Hamiltonian Monte Carlo sampling #' #' This function uses a compiled Dirichlet multinomial model and performs Hamiltonian Monte Carlo sampling of posteriors using 'Stan'. #' After sampling it is important to check convergence. Use the summary function and shinystan to do this. #' If you use this function then credit 'Stan' and 'RStan' along with this package. #' #' It can be helpful to use the indexer function to automatically identify the indices needed for the 'starts' and 'ends' parameters. See the vignette for an example. #' #' Warning: data must be input in the correct organized format or this function will not provide accurate results. See vignette if you are unsure how to organize data. #' Warning: depending upon size of data to be analyzed this function can take a very long time to run. #' @param countData A matrix or data frame of counts.The first field should be sample names and the subsequent fields should be integer data. Data should be arranged so that the first n rows correspond to one treatment group and the next n rows correspond with the next treatment group, and so on. The row indices for the first and last sample in these groups are fed into this function via 'starts' and 'ends'. #' @param starts A vector defining the indices that correspond to the first sample in each treatment group. The indexer function can help with this. #' @param ends A vector defining the indices that correspond to the last sample in each treatment group. The indexer function can help with this. #' @param algorithm The algorithm to use when sampling. Either 'NUTS' or 'HMC' or 'Fixed_param'. If unsure, then be like a squirrel. This is "No U turn sampling". The abbreviation is from 'Stan'. #' @param chains The number of chains to run. #' @param burn The warm up or 'burn in' time. #' @param samples How many samples from the posterior to save. #' @param thinning_rate Thinning rate to use during sampling. #' @param cores The number of cores to use. #' @param params_to_save The parameters from which to save samples. Can be 'p', 'pi', 'theta'. #' @return A fitted 'Stan' object that includes the samples from the parameters designated. #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' #These are toy data, many more samples, multiple chains, and a longer burn #' #are likely advisable for real data. #' fitstan_HMC <- cnvrg_HMC(com_demo,starts = c(1,6), #' ends=c(5,10), #' chains = 1, #' burn = 100, #' samples = 150, #' thinning_rate = 2) #' @export cnvrg_HMC <- function(countData, starts, ends, algorithm = "NUTS", chains = 2, burn = 500, samples = 1000, thinning_rate = 2, cores = 1, params_to_save = c("pi", "p") ){ #Statements, warnings, and errors if(dim(countData)[2] > 5000 & dim(countData)[1] > 100){ print("You have a lot of data. Yay! Beware that modeling could be slow and you may want to run this on a remote computer.") } if(any(c("NUTS", "HMC", "Fixed_param") %in% algorithm) == F){ stop("Algorithm must be one of 'NUTS', 'HMC', 'Fixed_param'. Be like a squirrel.") } if(chains > 8){ print("Why so many chains? You do you though.") } if(any(c("pi", "p","theta") %in% params_to_save) == F){ print("Parameters that can be saved are one of 'pi' or 'p' or 'theta'. If you want to save more than one, then pass in a vector of those you want (e.g., c('pi', 'p')).") } if(length(starts) != length(ends)){ stop("You didn't pass in the same number of start and end points. These two vectors must be of the same length.") } if(any(apply(countData[,2:dim(countData)[2]], 2, is.numeric)) == F){ stop("You have a non-numeric value in your input data. Input data has to be numeric (use str() to learn about the offending input object).") } if(any(countData == 0)){ stop("Zeros exist in the data. A pseudocount (e.g., 1) should be added to all of the data to avoid taking the log of zero.") } if( burn == samples){ print("Burn-in is the same length as sampling. This means that you won't get any samples, because the integer for burn in is subtracted from the integer for samples. This is just how Stan/Rstan does it. So if you want 500 burn in and 1000 samples, then choose burn in = 500 and samples = 1500.") } treatments <- length(starts) fitstan_HMC <-rstan::sampling(stanmodels$dm, data =list("datamatrix" = countData[,2:dim(countData)[2]], "nreps" = nrow(countData), "notus" = ncol(countData[,2:dim(countData)[2]]), "N" = treatments, "start" = starts, "end" = ends), algorithm = algorithm, # chains = chains, warmup = burn, iter = samples, thin = thinning_rate, cores = cores, seed = 123, pars <- params_to_save, verbose = T) return(fitstan_HMC) } #' Perform variational inference sampling #' #' This function uses a compiled Dirichlet multinomial model and performs variational inference estimation of posteriors using 'Stan'. #' Evaluating the performance of variational inference is currently under development per our understanding. Please roll over to the 'Stan' website and see if new diagnostics are available. #' If you use this function then credit 'Stan' and 'RStan' along with this package. #' #' It can be helpful to use the indexer function to automatically identify the indices needed for the 'starts' and 'ends' parameters. See the vignette for an example. #' #' Warning: data must be input in the correct organized format or this function will not provide accurate results. See vignette if you are unsure how to organize data. #' Warning: depending upon size of data to be analyzed this function can take a very long time to run. #' @param countData A matrix or data frame of counts.The first field should be sample names and the subsequent fields should be integer data. Data should be arranged so that the first n rows correspond to one treatment group and the next n rows correspond with the next treatment group, and so on. The row indices for the first and last sample in these groups are fed into this function via 'starts' and 'ends'. #' @param starts A vector defining the indices that correspond to the first sample in each treatment group. The indexer function can help with this. #' @param ends A vector defining the indices that correspond to the last sample in each treatment group. The indexer function can help with this. #' @param algorithm The algorithm to use when performing variational inference. Either 'meanfield' or 'fullrank'. The former is the default. #' @param output_samples The number of samples from the approximated posterior to save. #' @param params_to_save The parameters from which to save samples. Can be 'p', 'pi', 'theta'. #' @return A fitted 'Stan' object that includes the samples from the parameters designated. #' @examples #' #simulate an OTU table # com_demo <-matrix(0, nrow = 10, ncol = 10) # com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 # com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation # fornames <- NA # for(i in 1:length(com_demo[1,])){ # fornames[i] <- paste("otu_", i, sep = "") # } # sample_vec <- NA # for(i in 1:length(com_demo[,1])){ # sample_vec[i] <- paste("sample", i, sep = "_") # } # com_demo <- data.frame(sample_vec, com_demo) # names(com_demo) <- c("sample", fornames) # # cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' @export cnvrg_VI <- function(countData, starts, ends, algorithm = "meanfield", output_samples = 500, params_to_save = c("pi", "p") ){ #Statements, warnings, and errors if(dim(countData)[2] > 5000 & dim(countData)[1] > 100){ print("You have a lot of data. Yay! Beware that modeling could be slow and you may want to run this on a remote computer.") } if(algorithm %in% c("meanfield", "fullrank") == F){ stop("Algorithm must be one of 'meanfield' or 'fullrank'.") } if(output_samples > 800){ print("Why saving so many samples? Try saving fewer to avoid filling up disk.") } if(any(c("pi", "p","theta") %in% params_to_save) == F){ print("Parameters that can be saved are one of 'pi' or 'p' or 'theta'. If you want to save more than one, then pass in a vector of those you want (e.g., c('pi', 'p')).") } if(length(starts) != length(ends)){ stop("You didn't pass in the same number of start and end points. These two vectors must be of the same length.") } if(any(apply(countData[,2:dim(countData)[2]], 2, is.numeric)) == F){ stop("You have a non-numeric value in your input data. Input data has to be numeric (use str() to learn about the offending input object).") } if(any(countData == 0)){ stop("Zeros exist in the data. A pseudocount (e.g., 1) should be added to all of the data to avoid taking the log of zero.") } treatments <- length(starts) fitstan_VI <-rstan::vb(stanmodels$dm, data =list("datamatrix" = countData[,2:dim(countData)[2]], "nreps" = nrow(countData), "notus" = ncol(countData[,2:dim(countData)[2]]), "N" = treatments, "start" = starts, "end" = ends), algorithm = algorithm, output_samples = output_samples, check_data = T, seed = 123, pars <- params_to_save) return(fitstan_VI) } #' Calculate features with different abundances between treatment groups #' #' This function determines which features within the matrix that was modeled differ in relative abundance among treatment groups. #' Pass in a model object, with samples for pi parameters. #' This function only works for pi parameters. #' #' The output of this function gives the proportion of samples that were greater than zero after subtracting the two relevant posterior distributions. Therefore, values that are very large or very small denote a high certainty that the distributions subtracted differ. #' If this concept is not clear, then read Harrison et al. 2020 'Dirichlet multinomial modeling outperforms alternatives for analysis of microbiome and other ecological count data' in Molecular Ecology Resources. #' For a simple explanation, see this video: https://use.vg/OSVhFJ #' #' The posterior probability distribution of differences is also output. These samples can be useful for plotting or other downstream analyses. #' Finally, a list of data frames describing the features that differed among treatment comparisons is output, with the probability of differences and the magnitude of those differences (the effect size) included. #' @param model_out Output of CNVRG modeling functions, including cnvrg_HMC and cnvrg_VI #' @param countData Dataframe of count data that was modeled. Should be exactly the same as those data modeled! The first field should be sample name and integer count data should be in all other fields. This is passed in so that the names of fields can be used to make the output of differential relative abundance testing more readable. #' @param prob_threshold Probability threshold, below which it is considered that features had a high probability of differing between groups. Default is 0.05. #' @return A dataframe with the first field denoting the treatment comparison (e.g., treatment 1 vs. 2) and subsequent fields stating the proportion of samples from the posterior that were greater than zero (called "certainty of diffs"). Note that each treatment group is compared to all other groups, which leads to some redundancy in output. A list, called ppd_diffs, holding samples from the posterior probability distribution of the differences is also output. Finally, a list of dataframes describing results for only those features with a high probability of differing is output (this list is named: features_that_differed). #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' out <- cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' diff_abund_test <- diff_abund(model_out = out, countData = com_demo) #' @export diff_abund <- function(model_out, countData, prob_threshold = 0.05){ if(class(model_out)=="stanfit"){ pis <- rstan::extract(model_out, "pi") #This pulls out the second element in the dimensions of pi, which is the number of treatments treatments <- rapply(pis, dim, how = "list")$pi[2] #This gives us number of features features <- rapply(pis, dim, how = "list")$pi[3] }else if(class(model_out)=="list"){ pis <- model_out$pi treatments <- dim(pis)[2] features <- dim(pis)[3] } if (treatments == 1) { stop( "There is only one treatment group.\n Can't compare relative abundances across groups with only one group!\n Is a list of pi parameter samples being passed in? See the vignette." ) } diffs <- vector("list", length = treatments) for (i in 1:treatments) { for (j in 1:treatments) { if(i != j){ if(class(model_out)=="stanfit"){ diffs[[i]][[j]] <- pis[[1]][, i, ] - pis[[1]][, j, ] }else if(class(model_out)=="list"){ diffs[[i]][[j]] <- pis[, i, ] - pis[, j, ] } } #Sanity check # pis[[1]][1:5,1,1:5] - # pis[[1]][1:5,2,1:5] # # pis[1:5,1,1:5]; pis[1:5,2,1:5] # pis[1:5,1,1:5] - # pis[1:5,2,1:5] } } #Next determine the percentage of samples that are greater than zero for each comparison. output <- data.frame(matrix(nrow = treatments ^ 2, ncol = features + 1)) names(output) <- c("comparison", names(countData)[2:length(countData)]) #Make a dataframe for effect sizes: effects <- data.frame(matrix(nrow = treatments ^ 2, ncol = features + 1)) names(effects) <- c("comparison", names(countData)[2:length(countData)]) m <- 1 for (j in 1:treatments) { for (k in 1:treatments) { if(j != k){ output[m, 1] <- paste("treatment_", j, "_vs_treatment_", k, sep = "") effects[m, 1] <- paste("treatment_", j, "_vs_treatment_", k, sep = "") #Calculate number of samples for the denominator of percentage calculation denom <- dim(diffs[[j]][[k]])[1] #Calculate number of samples > 0 gtzero <- apply( diffs[[j]][[k]], 2, FUN = function(x) { length(which(x > 0)) } ) #effect size effects[m,2:length(effects)] <- apply( diffs[[j]][[k]], 2, FUN = mean) #Save percentage to output output[m, 2:length(output)] <- gtzero / denom m <- m + 1 } } } print("Names being added to the output file correspond to count data as entered, minus the initial sample column.") print("DOUBLECHECK that these names match your expectations, or you will be led astray.") print("Look at the source code for this function if you are not certain what is happening.") #Remove empty rows in output. Doing this instead of constraining table size #since it seems easier then calculating exactly how many rows are needed #depending on treatment. This could be changed in the future. output <- output[!is.na(output$comparison),] #Determine those features that differed between treatment comparisons. certain_diff_features_list <- list() for(m in 1:nrow(output)){ selected_comparison_output <- output[m,] probs <- NA for(i in 2:length(selected_comparison_output)){ if(selected_comparison_output[1,i] > .5){ probs[i] <- 1 - selected_comparison_output[1,i] }else{ probs[i] <- selected_comparison_output[1,i] } } certain_diff_features <- names(output)[probs <= prob_threshold] effectSizes_cert_differences <- effects[m, names(effects) %in% certain_diff_features] certain_diff_features <- data.frame(cbind(certain_diff_features, probs[probs <= prob_threshold])) certain_diff_features <- certain_diff_features[-is.na(certain_diff_features),] names(certain_diff_features) <- c("feature_that_differed", "probability_of_difference") certain_diff_features <- data.frame(certain_diff_features, enframe( unlist( effectSizes_cert_differences[names(effectSizes_cert_differences) %in% certain_diff_features$feature_that_differed])) ) if(any((certain_diff_features$name == certain_diff_features$feature_that_differed) == F)){ print("ERROR: the order of OTUs that effect sizes were calculated for does not match expectations. Something deep is wrong and you will need to dig in to the source code to fix it.") } certain_diff_features <- certain_diff_features[names(certain_diff_features) != "name"] names(certain_diff_features)[length(certain_diff_features)] <- "effect size" certain_diff_features_list[[m]] <- certain_diff_features } names(certain_diff_features_list) <- output[,1] return(list(certainty_of_diffs = output, ppd_diffs = diffs, features_that_differed = certain_diff_features_list)) } #' Calculate diversity entropies for each replicate #' #' Calculate Shannon's or Simpson's indices for each replicate while propagating uncertainty in relative abundance estimates through calculations. #' #' Takes as input either a fitted Stan object from the cnvrg_HMC or cnvrg_VI functions, or the output of isd_transform. #' As always, doublecheck the results to ensure the function has output reasonable values. Note that because there are no zero values #' and all proportion estimates are non zero there is a lot of information within the modeled data. Because diversity entropies #' are measures of information content, this means there will be a much higher entropy estimate for modeled data than the raw #' count data. However, patterns of variation in diversity should be similar among treatment groups for modeled and raw data. #' #' @param model_out Output of CNVRG modeling functions, including cnvrg_HMC and cnvrg_VI or isd_transform #' @param countData Dataframe of count data that was modeled. Should be exactly the same as those data modeled! The first field should be sample name and integer count data should be in all other fields. This is passed in so that the names of fields can be used to make the output of differential relative abundance testing more readable. #' @param params Parameter for which to calculate diversity, can be 'p' or 'pi' or both (e.g., c("pi","p")) #' @param entropy_measure Diversity entropy to use, can be one of 'shannon' or 'simpson' #' @param equivalents Convert entropies into number equivalents. Defaults to true. See Jost (2006), "Entropy and diversity" #' @return A list that has samples from posterior distributions of entropy metrics #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' out <- cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' diversity_calc(model_out = out,params = c("pi","p"), #' countData = com_demo, entropy_measure = 'shannon') #' @export diversity_calc <- function(model_out, countData, params = "pi", entropy_measure = "shannon", equivalents = T){ if(any(params %in% c("pi","p")) == F){ stop("'params' must be specified and be either p or pi.") } if("pi" %in% params){ if(class(model_out) == "stanfit"){ pis <- rstan::extract(model_out, "pi") #This pulls out the second element in the dimensions of pi, which is the number of treatments treatments <- rapply(pis, dim, how="list")$pi[2] entropy_pi <- list() if(equivalents == T){ if(entropy_measure == "shannon"){ for(i in 1:treatments){ #Note on June 2, I checked that this method of indexing worked. It do entropy_pi[[i]] <- exp(vegan::diversity(pis$pi[,i,], index = entropy_measure)) } }else if (entropy_measure == "simpson"){ for(i in 1:treatments){ entropy_pi[[i]] <- 1 / (vegan::diversity(pis$pi[,i,], index = entropy_measure)) } } }else{ for(i in 1:treatments){ entropy_pi[[i]] <- vegan::diversity(pis$pi[,i,], index = entropy_measure) } } #FOR ALTERNATE LIST INPUT as output by isd_transform() }else if(class(model_out) == "list"){ pis <- model_out print("Model input appears to be a list, not an Rstan fitted object. This may be ok, but check output of this function") #This pulls out the second element in the dimensions of pi, which is the number of treatments treatments <- length(pis) entropy_pi <- vector("list",treatments) if(equivalents == T){ if(entropy_measure == "shannon"){ for(i in 1:treatments){ for(j in 1:length(pis[[1]][[1]])){ entropy_pi[[i]][[j]] <- exp(vegan::diversity(sapply(pis[[i]], "[[", j) ,index = entropy_measure)) } } }else if (entropy_measure == "simpson"){ for(i in 1:treatments){ for(j in 1:length(pis[[1]][[1]])){ entropy_pi[[i]][j] <- 1 / (vegan::diversity(sapply(pis[[i]], "[[", j) ,index = entropy_measure)) } } } }else{ for(i in 1:treatments){ entropy_pi[[i]] <- vegan::diversity(pis[,i,], index = entropy_measure) } } }else if(class(model_out) == "array"){ print("Model input appears to be a array, not an Rstan fitted object. This may be ok, but check output of this function") entropy_pi <- list() treatments <- dim(model_out)[2] if(equivalents == T){ if(entropy_measure == "shannon"){ for(i in 1:treatments){ entropy_pi[[i]] <- exp(vegan::diversity(model_out[,i,], index = entropy_measure)) } }else if (entropy_measure == "simpson"){ for(i in 1:treatments){ entropy_pi[[i]] <- 1/(vegan::diversity(model_out[,i,], index = entropy_measure)) } } }else{ for(i in 1:treatments){ entropy_pi[[i]] <- exp(vegan::diversity(model_out[,i,], index = entropy_measure)) } } } } if("p" %in% params){ if(class(model_out) != "stanfit"){ stop("ERROR: p parameters can not be processed for objects that are not of class 'stanfit'. If you want to calculate diversity for p parameters that are not in a stanfit object, such as those that have been estimated via extract_point_estimates(), it is better to use the standard vegan 'diversity' function.") } ps <- rstan::extract(model_out, "p") reps <- rapply(ps, dim, how="list")$p[2] entropy_p <- list() if(equivalents == T){ if(entropy_measure == "shannon"){ for(i in 1:reps){ entropy_p[[i]] <- exp(vegan::diversity(ps$p[,i,], index = entropy_measure)) } }else if (entropy_measure == "simpson"){ for(i in 1:reps){ entropy_p[[i]] <- 1 / (vegan::diversity(ps$p[,i,], index = entropy_measure)) } }else{ stop("It appears that you didn't choose either 'simpson' or 'shannon' for your entropy index.") } }else{ for(i in 1:reps){ entropy_p[[i]] <- vegan::diversity(ps$p[,i,], index = entropy_measure) } } } if(any("p" %in% params) & any("pi" %in% params)){ return(list(entropy_pi = entropy_pi, entropy_p = entropy_p)) }else if (any(params %in% "pi")==F & any(params %in% "p")==T){ return(list(entropy_p = entropy_p)) }else if (any(params %in% "p")==F & any(params %in% "pi")==T){ return(list(entropy_pi = entropy_pi)) } } #' Extract point estimates of multinomial and Dirichlet parameters #' #' Provides the mean value of posterior probability distributions for parameters. #' @param model_out Output of CNVRG modeling functions, including cnvrg_HMC and cnvrg_VI #' @param countData The count data modeled. #' @param params Parameters to be extracted, either pi (Dirichlet) or p (multinomial). #' @return A list of of point estimates for model parameters. If both multinomial and Dirichlet parameters are requested then they will be named elements of a list. #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' out <- cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' extract_point_estimate(model_out = out, countData = com_demo) #' @export extract_point_estimate <- function(model_out, countData, params = c("pi", "p")){ pis <- rstan::extract(model_out, "pi") treatments <- rapply(pis, dim, how="list")$pi[2] #Make names for treatment groups for convenience treats <- vector() for (i in 1:treatments) { treats[i] <- paste("treatment", i, sep = "_") } #make export object of the pis out_pis <- data.frame(treats, apply(pis$pi[, , ], MARGIN = c(2, 3), FUN = mean)) if(length(names(countData)[2:dim(countData)[2]]) + 1 != length(out_pis)){ print("ERROR: the length of the name vector from the count data does not match the length of modeled pi parameters. Check that the count data provided to this function are exactly those that were modeled.") } colnames(out_pis) <- c("treatments", names(countData)[2:dim(countData)[2]]) #Catch ps only if they exist if( any(params == "p") ){ ps <- rstan::extract(model_out, "p") out_ps <- data.frame(countData[,1], apply(ps$p[, , ], MARGIN = c(2, 3), FUN = mean)) colnames(out_ps) <- c("sample", names(countData)[2:dim(countData)[2]]) } if(is.null(names(countData)[2:dim(countData)[2]]) | any(is.na(names(countData)[2:dim(countData)[2]]))){ print("The names of the count data provided include NA or are NULL. If more informative names are desired for the output then the count matrix should have column names.") } if( exists("out_pis") & exists("out_ps")){ return(list( pointEstimates_p = out_ps, pointEstimates_pi = out_pis) )}else if(exists("out_pis")){ return(list( pointEstimates_pi = out_pis) ) }else if(exists("out_ps")){ return(list( pointEstimates_p = out_ps) ) } } #' Extract quantiles of pi parameters #' #' Provides quantiles of pi parameters for each feature and treatment group. #' @param model_out Output of CNVRG modeling functions, including cnvrg_HMC and cnvrg_VI #' @param probs A vector of quantiles #' @return A list specifying quantiles for each feature in each treatment group. #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' out <- cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' extract_pi_quantiles(model_out = out, probs = c(0.05,0.5,0.95)) #' @export extract_pi_quantiles <- function(model_out, probs = c(0.05,0.5,0.95)){ if(any(probs > 1)){ print("You are asking for a quantile greater than one. Quantiles must be between zero and one.") } if(model_out@stan_args[[1]]$method == "variational"){ pisamples <- model_out@sim$samples[[1]][grep("pi",names(model_out@sim$samples[[1]]))] pisamples <- lapply(pisamples, FUN=quantile, probs = probs) } if(model_out@stan_args[[1]]$method == "sampling"){ pis <- rstan::extract(model_out, "pi") pisamples <- apply(pis$pi[, , ], MARGIN = c(2, 3), FUN = quantile, probs = probs) } return(list(pi_quantils = pisamples)) } #' Determine indices for treatment groups #' #' This function determines the indices for the first and last replicates within a vector describing treatment group. #' #' @param x Vector input. #' @return A list with two named elements that contain start and end indices. #' @examples #' indexer(c(rep("treatment1",5), rep("treatment2",5))) #' @export indexer <- function(x){ starts <- vector() ends <- vector() k <- 1 for(i in unique(x)){ if(is.na(i)){ print("One of the treatment groups is coded as NA. It should be recoded as something else.") break } #Extract the indices for the treatment indices <- which(x == i) #Make a sequence from the min to the max of the indices. #We will use this to test that the indices are in order and that the data are formatted properly. test_indices <- seq(min(indices), max(indices), by = 1) if(any((indices == test_indices) == F)){ print("ERROR: it does not appear that all the replicates for a treatment group are adjacent.") }else{ starts[k] <- min(indices) ends[k] <- max(indices) k <- k + 1 } } if(length(starts) != length(unique(x))){ print("ERROR: there was a problem trying to calculate starting and ending indices for each treatment group. Check data formatting.") return("FAILED") } if(length(ends) != length(unique(x))){ print("ERROR: there was a problem trying to calculate starting and ending indices for each treatment group. Check data formatting.") return("FAILED") } return(list(starts = starts, ends = ends)) } #' Transform data into estimates of absolute abundances using an ISD #' #' If an internal standard (ISD) has been added to samples such that the counts for that standard are representative of the same absolute abundance, then the ISD can be used to transform relative abundance data such that they are proportional to absolute abundances (Harrison et al. 2020). #' This function performs this division while preserving uncertainty in relative abundance estimates of both the ISD and the other features present. #' #' An index for the ISD must be provided. This should be the field index that corresponds with the ISD. Remember that the index should mirror what has been modeled. Also, note that this function subtracts one from this index because the modeled data have a non integer sample field. #' If the wrong index is passed in, the output of this function will be incorrect, but there will not be a fatal error or warning. #' #' A simple check that the correct index has been passed to the function is to examine the output and make sure that the field that should correspond with the ISD is one (signifying that the ISD was divided by itself). #' #' Output format can either as means of the samples for each pi parameter or the transformed samples from the posterior distribution for that parameter. #' Harrison et al. 2020. 'The quest for absolute abundance: the use of internal standards for DNA based community ecology' Molecular Ecology Resources. #' @param model_out Output of CNVRG modeling functions, including cnvrg_HMC and cnvrg_VI #' @param countData The count data modeled. #' @param isd_index The index for the field with information for the internal standard. #' @param format The output format. Can be either 'or 'samples' or 'ml'. "samples" outputs samples from the posterior probability distribution, the last option ("ml") outputs the mean of posterior samples for each parameter. #' @return A dataframe, or list, specifying either point estimates for each feature in each treatment group (if output format is 'ml') or samples from the posterior (if output format is 'samples'). #' @examples #' #simulate an OTU table #' com_demo <-matrix(0, nrow = 10, ncol = 10) #' com_demo[1:5,] <- c(rep(3,5), rep(7,5)) #Alternates 3 and 7 #' com_demo[6:10,] <- c(rep(7,5), rep(3,5)) #Reverses alternation #' fornames <- NA #' for(i in 1:length(com_demo[1,])){ #' fornames[i] <- paste("otu_", i, sep = "") #' } #' sample_vec <- NA #' for(i in 1:length(com_demo[,1])){ #' sample_vec[i] <- paste("sample", i, sep = "_") #' } #' com_demo <- data.frame(sample_vec, com_demo) #' names(com_demo) <- c("sample", fornames) #' #' #Model the data #' out <- cnvrg_VI(com_demo,starts = c(1,6), ends=c(5,10)) #' #Transform the data #' transformed_data <- isd_transform(model_out = out, countData = com_demo, #' isd_index = 3, format = "ml") #' @export isd_transform <- function(model_out, isd_index, countData, format = "stan"){ if(exists("isd_index") == F){ stop("An index for the ISD has not been provided.") } isd_index <- isd_index - 1 #This is because the modeled p values are one fewer then the dimensions of the count data, #because the count data had a sample name column. #if(model_out@stan_args[[1]]$method == "sampling"){ pis <- rstan::extract(model_out, "pi") treatments <- rapply(pis, dim, how="list")$pi[2] out <- pis #Do division for(i in 1:treatments){ #Recall the array goes samples, groups, features out$pi[,i,] <- out$pi[,i,] / out$pi[,i,isd_index] } #Convert to max. likelihood estimates of posteriors if(format == "ml"){ groupnms <- NA for(i in 1:treatments){ groupnms[i] <- paste("treatment_group_",i, sep = "") } out <- data.frame(groupnms, apply(out$pi[, , ], MARGIN = c(2, 3), FUN = mean)) names(out) <- c("treatment_group",names(countData)[2:length(names(countData))]) } return(out) }
/scratch/gouwar.j/cran-all/cranData/CNVRG/R/convrg_func.R
#' Fungal endophytes of Astragalus lentiginosus grown near Reno, NV #' #' @source Joshua G. Harrison, \url{https://www.biorxiv.org/content/10.1101/608729v1.full} #' @format A data frame with columns: #' \describe{ #' \item{treatment}{A categorical variable describing if a plant was treated with a slurry of endophyte inoculum and whether it was positive or negative for Alternaria fulva.} #' \item{Otu10}{Contains count data.} #' \item{Otu100}{Contains count data.} #' \item{Otu11}{Contains count data.} #' \item{Otu12}{Contains count data.} #' \item{Otu4}{Contains count data.} #' \item{Otu40}{Contains count data.} #' \item{Otu42}{Contains count data.} #' \item{Otu54}{Contains count data.} #' \item{Otu58}{Contains count data.} #' \item{Otu6}{Contains count data.} #' \item{Otu62}{Contains count data.} #' \item{Otu7}{Contains count data.} #' \item{Otu70}{Contains count data.} #' \item{Otu71}{Contains count data.} #' \item{Otu72}{Contains count data.} #' \item{Otu74}{Contains count data.} #' \item{Otu76}{Contains count data.} #' \item{Otu77}{Contains count data.} #' \item{Otu79}{Contains count data.} #' \item{Otu86}{Contains count data.} #' \item{Otu9}{Contains count data.} #' \item{Otu92}{Contains count data.} #' \item{Otu94}{Contains count data.} #' \item{Otu96}{Contains count data.} #' \item{Otu97}{Contains count data.} #' \item{Otu99}{Contains count data.} #' } #' @examples #' \dontrun{ #' fungi #' } "fungi"
/scratch/gouwar.j/cran-all/cranData/CNVRG/R/data.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("dm") # load each stan module Rcpp::loadModule("stan_fit4dm_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/CNVRG/R/stanmodels.R
#' Server component of the CNVScope plotly shiny application. #' #' Server function of the CNVScope shiny application. run with runCNVScopeShiny #' @name CNVScopeserver #' @keywords CNV heatmap shiny plotly #' @import ggplot2 magrittr #' @rawNamespace import(shiny, except = c(runExample,renderDataTable)) #' @rawNamespace import(RCurl, except = reset) #' @rawNamespace import(data.table, except = c(melt, dcast)) #' @param session The shiny session object for the application. #' @param input shiny server input #' @param output shiny server output #' @param debug enable debugging mode #' @return None #' @examples #' \dontrun{ #' runCNVScopeShiny() #' } #' @export #globalVariables(c("ensembl_gene_tx_data_gr","baseurl","chromosomes","downsample_factor","basefn", # "subset_name", # "expression_data_gr_nbl",'start2','start1','value','Var1','Var2','value1', # 'tcga_type','census_data_gr','common_coords','myReactives', # 'genev','delete.isolates','freq_data'),add = F) #rawNamespace import(GenomicFeatures ,except = show) if(getRversion() >= "2.15.1") utils::globalVariables(c("."), add=F) CNVScopeserver<-function(session,input, output, debug=F) { # if(requireNamespace("plotly",quietly = T)){ # #importFrom tidyr unite #importFrom jointseg jointSeg #importFrom logging addHandler #importFrom DT renderDataTable #rawNamespace import(shinyjs, except = runExample) #import reshape2 htmltools htmlwidgets ensembl_gene_tx_data_gr <- if(exists("ensembl_gene_tx_data_gr")){get("ensembl_gene_tx_data_gr")} else {NULL} baseurl <- if(exists("baseurl")){get("baseurl")} else {NULL} adjpvaluechr <- if(exists("adjpvaluechr")){get("adjpvaluechr")} else {NULL} basefn <- if(exists("basefn")){get("basefn")} else {NULL} osteofn <- if(exists("osteofn")){get("osteofn")} else {NULL} start1 <- if(exists("start1")){get("start1")} else {NULL} start2 <- if(exists("start2")){get("start2")} else {NULL} value <- if(exists("value")){get("value")} else {NULL} value1 <- if(exists("value1")){get("value1")} else {NULL} Var1 <- if(exists("Var1")){get("Var1")} else {NULL} Var2 <- if(exists("Var2")){get("Var2")} else {NULL} bins.seqnames <- if(exists("bins.seqnames")){get("bins.seqnames")} else {NULL} bins.start <- if(exists("bins.start")){get("bins.start")} else {NULL} bins.end <- if(exists("bins.end")){get("bins.end")} else {NULL} expression_data_gr <- if(exists("expression_data_gr")){get("expression_data_gr")} else {NULL} common_coords <- if(exists("common_coords")){get("common_coords")} else {NULL} myReactives <- if(exists("myReactives")){get("myReactives")} else {NULL} genev <- if(exists("genev")){get("genev")} else {NULL} delete.isolates <- function(graph, mode = 'all') { isolates <- which(igraph::degree(graph, mode = mode) == 0) igraph::delete.vertices(graph, isolates) } freq_data <- if(exists("freq_data")){get("freq_data")} else {NULL} #adjpvalue chr cn correlation genes_text probe visval adjpvalue <- if(exists("adjpvalue")){get("adjpvalue")} else {NULL} chr <- if(exists("chr")){get("chr")} else {NULL} cn <- if(exists("cn")){get("cn")} else {NULL} correlation <- if(exists("correlation")){get("correlation")} else {NULL} genes_text <- if(exists("genes_text")){get("genes_text")} else {NULL} probe <- if(exists("probe")){get("probe")} else {NULL} visval <- if(exists("visval")){get("visval")} else {NULL} privpolurl <- a("NCI Privacy Policy", href="https://www.cancer.gov/policies/privacy-security",target="_blank") output$privpol <- renderUI({ tagList(privpolurl)}) downsample_factor<-NULL subset_name<-NULL #expression_data_gr_nbl<-NULL tcga_type<-NULL chrom.pairs<-NULL printLogJs <- function(x, ...) { shinyjs::logjs(x) T } observe({ if (input$geneSearch == 0) {return()} x<-isolate(input$geneSearch) #browser() if(x!=0 & isolate(input$gene_input_col)!=""& isolate(input$gene_input_row)!=""){ if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)])!=0) { colgene_loc<-paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....chromosome_name,":", ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....start_position,"-", ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....end_position) } else { colgene_loc<-""} if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)])!=0) { rowgene_loc<-paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....chromosome_name,":", ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....start_position,"-", ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....end_position) } else { # ##browser() rowgene_loc<-""} updateTextInput(session,"loc_input_col",value=colgene_loc) updateTextInput(session,"loc_input_row",value=rowgene_loc) if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)])!=0){ updateSelectInput(session,"chrom2",selected = paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....chromosome_name,"_"))} if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)])!=0){ updateSelectInput(session,"chrom1",selected = paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....chromosome_name,"_"))} } #end check to see if there is input in the gene search. }) # observeEvent(input$goButton, { # hide("minimap") # hide("row_gene_data") # hide("col_gene_data") # }) # observeEvent(event_data("plotly_click"), { # show("minimap") # show("row_gene_data") # show("col_gene_data") # }) observeEvent(plotly::event_data("plotly_click"), { #showTab(inputId = "tabs",select = T, target = "sample info") if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { showTab(inputId="tabs",target="gain/loss frequency") } shinyjs::show("row_gene_data") shinyjs::show("col_gene_data") shiny::showTab(inputId="tabs",target="sample info") shiny::showTab(inputId="tabs",target="COSMIC cancer gene census") shiny::showTab(inputId="tabs",target="expression_data") }) observeEvent(input$goButton, { showTab(inputId = "tabs",select = T, target = "Plots") if(isolate(input$data_source)!="linreg_osteosarcoma_CNVkit") { shiny::hideTab(inputId="tabs",target="gain/loss frequency") } if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { shiny::showTab(inputId="tabs",select = F,target="gain/loss frequency") } }) observeEvent(input$data_source, { if(isolate(input$data_source)!="linreg_osteosarcoma_CNVkit") { shiny::hideTab(inputId="tabs",target="gain/loss frequency") } if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { shiny::showTab(inputId="tabs",select = F,target="gain/loss frequency") } if(is.null(plotly::event_data("plotly_click"))){ shiny::hideTab(inputId="tabs",target="gain/loss frequency") shiny::hideTab(inputId="tabs",target="sample info") shiny::hideTab(inputId="tabs",target="COSMIC cancer gene census") shiny::hideTab(inputId="tabs",target="expression_data") } }) getHeight<-function() { return(isolate(input$heatmapHeight)) } logging::addHandler(printLogJs) isolate(input$goButton) # observe({ # input$goButton # if(!is.null(isolate(input$loc_input_row))){ # updateSelectInput(session,"chrom1",chromosomes,selected=paste0(as.character(GRanges(isolate(input$loc_input_row))@seqnames),"_"))} # }) output$plotlyChromosomalHeatmap <- plotly::renderPlotly({ if (input$goButton == 0) {return()} input$goButton if(debug){browser()} #browser() #browser() # if(!file.exists( # ( # paste0(getwd(),"/matrix/linreg/", # chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], # "melted_downsampled_linreg.RData") # ) # )){ return("file does not exist!");} #if there is location data, change the chromosomes from what they were chosen. # #isolate(input$loc_input_row) # observe({ # updateSelectInput(session,"chrom1",chromosomes,selected=paste0(as.character(GRanges(isolate(input$loc_input_row))@seqnames),"_")) # }) if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { # load( url(paste0(paste0(baseurl,"matrix/linreg/unrescaled/", # chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], # "melted_downsampled_linreg_unrescaled.RData")))) load(paste0(paste0(osteofn,"linreg236/unrescaled/downsample/", chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], "melted_downsampled_linreg_max_cap_75.RData"))) # load( url(paste0(paste0(baseurl,"matrix/linreg/unrescaled/full/", # chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], # "melted_full_linreg_max_cap_75.RData")))) load( paste0(paste0(osteofn,"linreg236/unrescaled/full/", chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], "melted_full_linreg_max_cap_75.RData"))) downsample_factor<<-4 if(debug){browser()} if(!exists("osteofn")){ tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_lcc236.rds")))),error = function(e) NULL)} tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) colnames(ggplotmatrix_full)<-colnames(ggplotmatrix)<-c("Var1","Var2","value","Var11","Var21","value1") } # if(isolate(input$data_source)=="TCGA_SARC_SNP6") { load( url(paste0(paste0(baseurl,"matrix/TCGA_SARC/downsampled_factor_8/", chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], "melted_downsampled_TGCA_SARC_unrescaledv2.RData")))) # load( url(paste0(paste0(baseurl,"matrix/TCGA_SARC/full/", # chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))], # "melted_full_TGCA_SARC_unrescaled.RData")))) downsample_factor<<-8 } # if(isolate(input$data_source)=="TCGA_BRCA_low_pass") { # sample_name<-"BRCA_output_matrix1e6" load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/BRCA/", paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData") )))) ggplotmatrix_full<-ggplotmatrix } if(isolate(input$data_source)=="TCGA_AML_low_pass") { sample_name<-"AML_output_matrix1e6" load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/AML/", paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData") )))) ggplotmatrix_full<-ggplotmatrix } if(isolate(input$data_source)=="TCGA_PRAD_low_pass") { sample_name<-"PRAD_output_matrix1e6" load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/PRAD/", paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData") )))) ggplotmatrix_full<-ggplotmatrix } if(isolate(input$data_source)=="TCGA_NBL_low_pass") { #browser() sample_name<-"NBL_output_matrix1e6" load( paste0(paste0(basefn,"matrix/TCGA_low_pass/NBL/", paste0(isolate(input$chrom1),isolate(input$chrom2),"nbl_sample_matched_unrescaled.RData") ))) #browser() # ggplotmatrix ggplotmatrix_full<-ggplotmatrix tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_nbl.rds")))),error = function(e) NULL) tryCatch(bin_data<<-readRDS((paste0(basefn,"bin_data_nbl.rds"))),error = function(e) NULL) } if(isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { #browser() subset_name<<-gsub("_subset","",gsub("TCGA_NBL_","",paste0(input$data_source))) sample_name<-"NBL_output_matrix1e6" # load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/NBL/",subset_name,"/", # paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_","NBLsample_matched","_unrescaled",subset_name,".RData") # )))) load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/NBL/",subset_name,"/", paste0(isolate(input$chrom1),isolate(input$chrom2),"melted_downsampled_TGCA_","NBLsample_matched","_unrescaled",subset_name,"pos_neg.RData") )))) if(length(bin_data$probe)==0) { bin_data$probe<-rownames(bin_data) } #browser() # ggplotmatrix ggplotmatrix_full<-ggplotmatrix tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_nbl_",subset_name,".rds")))),error = function(e) NULL) tryCatch(bin_data<<-readRDS((paste0(basefn,"bin_data_nbl_",subset_name,".rds"))),error = function(e) NULL) input_mat<-bin_data %>% dplyr::select(-probe) rownames(input_mat)<-bin_data$probe # tryCatch(expression_data_gr_nbl<<-readRDS(url(paste0(baseurl,"tcga_nbl_expression_",subset_name,"subset.rds"))),error = function(e) NULL) tryCatch(expression_data_gr_nbl<<-readRDS(paste0(basefn,"tcga_nbl_expression_",subset_name,"subset.rds")),error = function(e) NULL) #browser() #server-side processing(disabled): # tryCatch(tcga_gr<<-readRDS((url(paste0(baseurl,"tcga_gr_no_stats.rds")))),error = function(e) NULL) # tryCatch(tcga_gr<<-readRDS((paste0(basefn,"tcga_gr_no_stats.rds"))),error = function(e) NULL) # tryCatch(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm<<-readRDS((url(paste0(baseurl,"tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_caseid.rds")))),error = function(e) NULL) # tryCatch(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm<<-readRDS((paste0(basefn,"tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_caseid.rds"))),error = function(e) NULL) # # tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset<-as.data.frame(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm)[,na.omit(match(colnames(bin_data),colnames(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm)))] # #dim(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset) # mcols(tcga_gr)$rowMean<-rowMeans(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset) #tcga_dfs_cbind_with_ensg[,2:ncol(tcga_dfs_cbind_with_ensg)] # mcols(tcga_gr)$rowMeanPctl<-heatmaply::percentize(rowMeans(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset)) # mcols(tcga_gr)$rowVar<-matrixStats::rowVars(as.matrix(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset)) # mcols(tcga_gr)$rowVarPctl<-heatmaply::percentize(matrixStats::rowVars(as.matrix(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset))) # mcols(tcga_gr)$SYMBOL<-mcols(tcga_gr)$....external_gene_name # mcols(tcga_gr)$gene_type<-mcols(tcga_gr)$....gene_biotype # expression_data_gr<<-tcga_gr } if(isolate(input$data_source)=="TCGA_OS_low_pass") { sample_name<-"OS_output_matrix1e6" load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/OS/", paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData") )))) ggplotmatrix_full<-ggplotmatrix } # colnames(ggplotmatrix)<-gsub(pattern = "(\\.)+.","",colnames(ggplotmatrix)) # colnames(ggplotmatrix_full)<-gsub(pattern = "(\\.)+.","",colnames(ggplotmatrix_full)) #browser() ggplotmatrix$value<-signedRescale(ggplotmatrix$value,max_cap=isolate(input$max_cap))[,1] ggplotmatrix<-dplyr::bind_cols(ggplotmatrix,reshape2::colsplit(ggplotmatrix$Var1,"_",c("chr1","start1","end1"))) ggplotmatrix<-dplyr::bind_cols(ggplotmatrix,reshape2::colsplit(ggplotmatrix$Var2,"_",c("chr2","start2","end2"))) ggplotmatrix<-ggplotmatrix[order(ggplotmatrix$start1,ggplotmatrix$start2),] if(!is.null(ggplotmatrix)){ggplotmatrix<<-ggplotmatrix} if(!is.null(ggplotmatrix_full)){ ggplotmatrix_full$value<-signedRescale(ggplotmatrix_full$value,max_cap=isolate(input$max_cap))[,1]} if(!is.null(ggplotmatrix_full)){ggplotmatrix_full<<-ggplotmatrix_full} recast_matrix<-reshape2::dcast(data=ggplotmatrix,formula=Var1 ~ Var2, var = ggplotmatrix$value) #this creates a matrix in wide format. if(ncol(recast_matrix)!=nrow(recast_matrix)) { rownames(recast_matrix)<-recast_matrix$Var1 recast_matrix<-recast_matrix[,2:ncol(recast_matrix)] } recast_matrix_full<-reshape2::dcast(data=ggplotmatrix_full,formula=Var1 ~ Var2, var = ggplotmatrix_full$value) #this creates a matrix with if(ncol(recast_matrix_full)!=nrow(recast_matrix_full)) { rownames(recast_matrix_full)<-recast_matrix_full$Var1 recast_matrix_full<-recast_matrix_full[,2:ncol(recast_matrix_full)] } #browser() #resorting recast_matrix if(!is.null(recast_matrix)){recast_matrix<<-recast_matrix} if(!is.null(recast_matrix_full)){recast_matrix_full<<-recast_matrix_full} rownames_gr<-underscored_pos_to_GRanges(rownames(recast_matrix),zeroToOneBasedStart = F,zeroToOneBasedEnd = F) colnames_gr<-underscored_pos_to_GRanges(colnames(recast_matrix),zeroToOneBasedStart = F,zeroToOneBasedEnd = F) rownames_gr_full<-underscored_pos_to_GRanges(rownames(recast_matrix_full),zeroToOneBasedStart = F,zeroToOneBasedEnd = F) colnames_gr_full<-underscored_pos_to_GRanges(colnames(recast_matrix_full),zeroToOneBasedStart = F,zeroToOneBasedEnd = F) if(!is.null(rownames_gr)){rownames_gr<<-rownames_gr} if(!is.null(rownames_gr_full)){rownames_gr_full<<-rownames_gr_full} if(!is.null(colnames_gr)){colnames_gr<<-colnames_gr} if(!is.null(colnames_gr_full)){colnames_gr_full<<-colnames_gr_full} ggplotmatrix$value1<-gsub("col genes:","row genes:",ggplotmatrix$value1) ggplotmatrix$value1<-gsub("row_genes:","col_genes:",ggplotmatrix$value1) rownames_ordered<-GRanges_to_underscored_pos(rownames_gr[order(rownames_gr)]) colnames_ordered<-GRanges_to_underscored_pos(colnames_gr[order(colnames_gr)]) if(debug){browser()} recast_matrix<-recast_matrix[rownames_ordered,colnames_ordered] block_indices_row<-jointseg::jointSeg(recast_matrix,K=10,method="RBS")$bestBkp block_indices_col<-jointseg::jointSeg(t(recast_matrix),K=10,method="RBS")$bestBkp block_index_labels_row<-rownames(recast_matrix)[block_indices_row] block_index_labels_col<-colnames(recast_matrix)[block_indices_col] # xfactor<-as.factor(ggplotmatrix$Var1) # levels(xfactor)<-order(colnames_gr) # yfactor<-as.factor(ggplotmatrix$Var1) # levels(yfactor)<-order(rownames_gr) # p <- ggplot(data = ggplotmatrix ) + #geom_tile() + theme_void() # geom_raster(aes(x = xfactor, y = yfactor,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1))) + scale_x_discrete(breaks = block_index_labels_col) + # scale_y_discrete(breaks = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) + # ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) + coord_flip() #+ scale_y_reverse(breaks=block_indices) # #browser() #recreate input matrix, add rownames. #browser() options(stringsAsFactors = F) input_mat<-bin_data %>% dplyr::select(-probe) %>% as.data.frame() rownames(input_mat)<-bin_data$probe #correlate input matrix if(debug){browser()} if(isolate(input$visval)=="Correlation" & isolate(input$data_source)!="linreg_osteosarcoma_CNVkit") { if(isolate(input$cor_method)!="spearman - pearson"){ input_mat_cor<-cor(t(input_mat),method=isolate(input$cor_method)) } else { input_mat_cor<-cor(t(input_mat),method="spearman")-cor(t(input_mat),method="pearson") } #browser() #wide to long input_mat_cor_flat<-input_mat_cor %>% reshape2::melt() #grab ggplotmatrix and add correlation values. #if(!isolate(input$genes_toggle)){ggplotmatrix$value1<-NULL} #browser() #ggplotmatrix_joined<- dplyr::inner_join(x=ggplotmatrix,y=input_mat_cor_flat,by=c("Var1"="Var1","Var2"="Var2")) ggplotmatrix_joined<- data.table::merge.data.table(x=ggplotmatrix,y=input_mat_cor_flat,by.x=c("Var1","Var2"),by.y=c("Var1","Var2"),all=F) colnames(ggplotmatrix_joined) <- ggplotmatrix_joined %>% colnames() %>% gsub(pattern = "value.x",replacement = "linregval") %>% gsub(pattern = "value.y",replacement = "correlation") #convert the negative log p-values to p-values and apply two kinds of FDR correction. #browser() ggplotmatrix_joined$pvalue<-exp(-(abs(ggplotmatrix_joined$orig_value))) ggplotmatrix_joined$adjpvaluechr<-p.adjust(p = ggplotmatrix_joined$pvalue,method = "fdr") ggplotmatrix_joined$adjpvaluegenome<-p.adjust(p = ggplotmatrix_joined$pvalue,method = "fdr", n = dim(input_mat)[1]*dim(input_mat)[2]) ggplotmatrix_joined<<-ggplotmatrix_joined rownames_ordered<-GRanges_to_underscored_pos(rownames_gr[order(rownames_gr)]) colnames_ordered<-GRanges_to_underscored_pos(colnames_gr[order(colnames_gr)]) if(isolate(input$fdr_correction)=="chromosome_pair"){ ggplotmatrix_joined$adjpvalue<-ggplotmatrix_joined$adjpvaluechr } else { if(isolate(input$fdr_correction)=="genome"){ ggplotmatrix_joined$adjpvalue<-ggplotmatrix_joined$adjpvaluegenome } } #browser() ggplotmatrix_joined<<-ggplotmatrix_joined if(isolate(input$visval)=="Correlation") { ggplotmatrix_joined$visval<-ggplotmatrix_joined$correlation } else { if(isolate(input$visval)=="-log(Linear Regression P-value) * correlation sign") { ggplotmatrix_joined$visval<-ggplotmatrix_joined$linregval } } if(isolate(input$pval_filter_toggle)){ ggplotmatrix_joined$visval<-ifelse(ggplotmatrix_joined$adjpvalue<0.05,ggplotmatrix_joined$linregval,0.5) } else { ggplotmatrix_joined$visval<-ggplotmatrix_joined$linregval } if(!isolate(input$genes_toggle)){ ggplotmatrix_joined$genes_text<-rep("",nrow(ggplotmatrix_joined)) } else { ggplotmatrix_joined$genes_text<-ggplotmatrix_joined$value1 } #browser() #as.integer(as.character(reshape2::colsplit(ggplotmatrix$Var2,"_",c("chr2","start2","end2"))$start2)) p <- ggplot(data = ggplotmatrix_joined ) + #geom_tile() + theme_void() geom_tile(aes(x = as.numeric(start2), y = as.numeric(start1), fill=visval,text=paste0("value:",visval,"\nrow:",Var1,"\ncol:",Var2,"\n",genes_text,"\nFDR p=",adjpvalue,"\n",isolate(input$cor_method)," Correlation=",correlation)),alpha=ifelse(ggplotmatrix_joined$adjpvaluechr<0.05,1.0,0.1)) + # scale_x_continuous(breaks = reshape2::colsplit(block_index_labels_col,"_",c("chr","start","end"))$start,labels = block_index_labels_col) + scale_y_continuous(breaks = reshape2::colsplit(block_index_labels_row,"_",c("chr","start","end"))$start,labels = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) } else { if(debug){browser()} if(!isolate(input$genes_toggle)){ ggplotmatrix$genes_text<-rep("",nrow(ggplotmatrix)) } else { ggplotmatrix$genes_text<-ggplotmatrix$value1 } #browser() ggplotmatrix$pvalue<-exp(-(abs(ggplotmatrix$value))) ggplotmatrix$adjpvaluechr<-p.adjust(p = ggplotmatrix$pvalue,method = "fdr") ggplotmatrix$adjpvaluegenome<-p.adjust(p = ggplotmatrix$pvalue,method = "fdr", n = dim(input_mat)[1]*dim(input_mat)[2]) p <- ggplot(data = ggplotmatrix ) + #geom_tile() + theme_void() geom_tile(aes(x = as.numeric(start2), y = as.numeric(start1), fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",genes_text,"\nFDR p=",adjpvaluechr,"\n")),alpha=ifelse((ggplotmatrix$adjpvaluechr<0.05 | !input$pval_filter_toggle),1.0,0.1)) + # scale_x_continuous(breaks = reshape2::colsplit(block_index_labels_col,"_",c("chr","start","end"))$start,labels = block_index_labels_col) + scale_y_continuous(breaks = reshape2::colsplit(block_index_labels_row,"_",c("chr","start","end"))$start,labels = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) } #end instructions done IF correlation is specified. #browser() #+ geom_contour(binwidth = .395,aes(z=value)) ### browser() #+ coord_flip() #+ scale_y_reverse(breaks=block_indices) #p #lumpy_points_toggle if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { if(exists("osteofn")) { tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(osteofn,"breakpoint_gint_lcc236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords.rds" )),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(paste0(osteofn,"lumpy_sv_236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror.rds" )),error = function(e) NULL) }else { tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint_lcc236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords.rds" ))),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv_236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror.rds" ))),error = function(e) NULL) } } if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_NBL_low_pass","TCGA_PRAD_low_pass")) { if(exists("basefn")) { #browser() tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(basefn,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" )),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(paste0(basefn,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" )),error = function(e) NULL) tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source))) tryCatch(TCGA_low_pass_sample_info<<-readRDS(paste0(basefn,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" )),error = function(e) NULL) if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos} } else { tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" ))),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" ))),error = function(e) NULL) tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source))) tryCatch(TCGA_low_pass_sample_info<<-readRDS(url(paste0(baseurl,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" ))),error = function(e) NULL) if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos} } } if(isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { subset_name<<-gsub("_subset","",gsub("TCGA_NBL_","",paste0(input$data_source))) if(exists("basefn")) { tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(basefn,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" )),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(paste0(basefn,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" )),error = function(e) NULL) tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source))) tryCatch(TCGA_low_pass_sample_info<<-readRDS(paste0(basefn,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" )),error = function(e) NULL) if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos} } else { tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" ))),error = function(e) NULL) tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" ))),error = function(e) NULL) tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source))) tryCatch(TCGA_low_pass_sample_info<<-readRDS(url(paste0(baseurl,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" ))),error = function(e) NULL) if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos} } } # return(lumpy_summarized_counts) #} #DISABLING CLIENT SIDE PROCESSING OF GenomicInteraction data. # submat_row_gr<-underscored_pos_to_GRanges(rownames(recast_matrix)) # submat_col_gr<-underscored_pos_to_GRanges(colnames(recast_matrix)) # breakpoint_gint_full_subset<-breakpoint_gint_full[anchorOne(breakpoint_gint_full)@seqnames %in% gsub("_","",isolate(input$chrom1)) & # anchorTwo(breakpoint_gint_full)@seqnames %in% gsub("_","",isolate(input$chrom2))] # # if( # grep(paste0("\\b",unique(as.character(submat_row_gr@seqnames)),"\\b"),gsub("_","",chromosomes))>grep(paste0("\\b",unique(as.character(submat_col_gr@seqnames)),"\\b"),gsub("_","",chromosomes)) # ){ # SVs_data_in_submatrix_coords<-rebinGenomicInteractions(gint=breakpoint_gint_full_subset, # whole_genome_matrix = NULL, # rownames_gr = submat_col_gr, # colnames_gr = submat_row_gr, # rownames_mat = colnames(recast_matrix), # colnames_mat = rownames(recast_matrix), # method="nearest") # } else {SVs_data_in_submatrix_coords<-rebinGenomicInteractions(gint=breakpoint_gint_full_subset, # whole_genome_matrix = NULL, # rownames_gr = submat_row_gr, # colnames_gr = submat_col_gr, # rownames_mat = rownames(recast_matrix), # colnames_mat = colnames(recast_matrix), # method="nearest") # } #END CLIENT SIDE GINT PROCESSING # if(input$contour){ # p <- ggplot(data = ggplotmatrix, aes(x = Var2, y = Var1,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1)) ) + #geom_tile() + theme_void() # geom_tile() + scale_x_discrete(breaks = block_index_labels_col) + # scale_y_discrete(breaks = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) + # ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) + coord_flip() #+ scale_y_reverse(breaks=block_indices) # } #rep(paste0(colnames(lumpy_summarized_counts[,3:ncol(lumpy_summarized_counts)]),collapse='/n'),nrow(lumpy_summarized_counts)) #tidyr::unite(data = lumpy_summarized_counts[,3:ncol(lumpy_summarized_counts)],sep="\n")[,1] # if(exists("lumpy_summarized_counts") && isolate(input$lumpy_points_toggle)){ lumpy_summarized_counts$textlabel<-unlist(strsplit(x = paste0("col:",lumpy_summarized_counts$row_bin_label,"\nrow:",lumpy_summarized_counts$col_bin_label,"\ntotal SVs:",lumpy_summarized_counts$total_samples, "\nhighest freq SV type:",lumpy_summarized_counts$highest_count_sample,lumpy_summarized_counts$highest_count_sample_count/lumpy_summarized_counts$total_samples*100,"%\n types, ranked:",lumpy_summarized_counts$concatenated_sample_names,collapse="@"),"@")) # p<-p + geom_point(data=lumpy_summarized_counts,mapping=aes(x=as.integer(as.character(lumpy_summarized_counts$col_bin_index)),y=as.integer(as.character(lumpy_summarized_counts$row_bin_index)), # color=lumpy_summarized_counts$highest_count_sample,size=lumpy_summarized_counts$total_samples, # text=lumpy_summarized_counts$textlabel # # )) if(is.null(lumpy_summarized_counts$start1)) {lumpy_summarized_counts<-dplyr::bind_cols(lumpy_summarized_counts,reshape2::colsplit(lumpy_summarized_counts$row_bin_label,"_",c("chr1","start1","end1"))) lumpy_summarized_counts<-dplyr::bind_cols(lumpy_summarized_counts,reshape2::colsplit(lumpy_summarized_counts$col_bin_label,"_",c("chr2","start2","end2"))) } p<-p + geom_point(data=lumpy_summarized_counts,mapping=aes(x=as.numeric(as.character(lumpy_summarized_counts$start1)),y=as.numeric(as.character(lumpy_summarized_counts$start2)), color=as.character(lumpy_summarized_counts$highest_count_sample),size=as.numeric(as.character(lumpy_summarized_counts$total_samples)), text=lumpy_summarized_counts$textlabel)) } # if(exists("SVs_data_in_submatrix_coords") && isolate(input$plot_points_toggle)) { SVs_data_in_submatrix_coords$col_bin_index<-as.numeric(as.character(SVs_data_in_submatrix_coords$col_bin_index)) SVs_data_in_submatrix_coords$row_bin_index<-as.numeric(as.character(SVs_data_in_submatrix_coords$row_bin_index)) if(is.null(SVs_data_in_submatrix_coords$start1)) {SVs_data_in_submatrix_coords<-dplyr::bind_cols(SVs_data_in_submatrix_coords,reshape2::colsplit(SVs_data_in_submatrix_coords$row_bin_label,"_",c("chr1","start1","end1"))) SVs_data_in_submatrix_coords<-dplyr::bind_cols(SVs_data_in_submatrix_coords,reshape2::colsplit(SVs_data_in_submatrix_coords$col_bin_label,"_",c("chr2","start2","end2"))) } SVs_data_in_submatrix_coords$textlabel<-unlist(strsplit(x = paste0("col:",SVs_data_in_submatrix_coords$row_bin_label,"\nrow:",SVs_data_in_submatrix_coords$col_bin_label,"\ntotal SVs:",SVs_data_in_submatrix_coords$total_samples, "\nhighest freq SV type:",SVs_data_in_submatrix_coords$highest_count_sample,SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples*100,"%\n types, ranked:", SVs_data_in_submatrix_coords$concatenated_sample_names,collapse="@"),"@")) #print(p_with_points) #},error = function(err) { # print(paste("Caught & handled error: ",err)) tryCatch( highest_over_tot<-as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples),error = function(e) NULL) tryCatch(colorvals<-as.character(cut(highest_over_tot,breaks=unique(quantile(highest_over_tot,probs=c(0.25,0.5,0.75))))),error = function(e) NULL) if(exists("colorvals")) { p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.numeric(as.character(SVs_data_in_submatrix_coords$start1)),y=as.numeric(as.character(SVs_data_in_submatrix_coords$start2)), text=SVs_data_in_submatrix_coords$textlabel, size=as.numeric(as.character(SVs_data_in_submatrix_coords$total_samples)), #shape=as.character(SVs_data_in_submatrix_coords$highest_count_sample), color= colorvals) ) + labs(color="",size="") } else { p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.numeric(as.character(SVs_data_in_submatrix_coords$start1)),y=as.numeric(as.character(SVs_data_in_submatrix_coords$start2)), text=SVs_data_in_submatrix_coords$textlabel, color="CGI SV", size=as.numeric(as.character(SVs_data_in_submatrix_coords$total_samples))) ) + labs(size="")} #+ scale_color_gradient(low="green",high="darkgreen") #color=as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples) # + scale_colour_gradientn(colours = c("blue","white","red"),values=c(0,0.5,1)) # p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.integer(as.character(SVs_data_in_submatrix_coords$col_bin_index)),y=as.integer(as.character(SVs_data_in_submatrix_coords$row_bin_index)), # text=tidyr::unite(data = SVs_data_in_submatrix_coords[,3:ncol(SVs_data_in_submatrix_coords)],sep="\n")[,1], # size=as.integer(as.character(SVs_data_in_submatrix_coords$total_samples)), # #shape=as.character(SVs_data_in_submatrix_coords$highest_count_sample), # color=as.character(arules::discretize(as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples),method="interval")) # #color=as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples) # )) #+ scale_colour_gradientn(colours = c("blue","white","red"),values=c(0,0.5,1)) # scale_colour_gradient2() #set the range to be specific if there are coordinates (the cell +/- 4), else choose the max range for the particular axis. if(debug){browser()} #check for the correct format. plotly_output<-plotly::ggplotly(p_with_points,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25)) } else {if(exists("p")) { plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25)) } } # #plotly_output<-plotly::ggplotly(p) %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=1280,height=1024) #%>% saveWidget(title = gsub("_","",paste0(chromosomes[isolate(input$chrom1)],"-",chromosomes[isolate(input$chrom2)])),file = paste0(chromosomes[isolate(input$chrom1)],chromosomes[isolate(input$chrom2)],"transparent_tooltipv27_coord_no_flip_downsample_upward_orientation_plotly_nrsample.html"),selfcontained = T) # if( (!is.null(isolate(input$loc_input_row)) | !is.null(isolate(input$loc_input_col)) ) & (!isolate(input$loc_input_row)=="" | !isolate(input$loc_input_col)=="")) { if(debug){browser()} #acknowledgement: thanks to stackoverflow comments that made package a reality. #find the location of the bin in terms of map coordinates for x #store this as the xcentercoord #do the same for y #store as ycentercoord rowsplit<-reshape2::colsplit(isolate(input$loc_input_row),c("\\:|\\-"),c("chr","start","end")) columnsplit<-reshape2::colsplit(isolate(input$loc_input_col),c("\\:|\\-"),c("chr","start","end")) xmin<-columnsplit$start xmin<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start2-xmin))]-1e6 xmax<-columnsplit$end xmax<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start2-xmax))]+1e6 ymin<-rowsplit$start ymin<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start1-ymin))]-1e6 ymax<-rowsplit$end ymax<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start1-ymax))]+1e6 xglobalmin<-min(ggplotmatrix$start2) yglobalmin<-min(ggplotmatrix$start1) xglobalmax<-max(ggplotmatrix$start2) yglobalmax<-max(ggplotmatrix$start1) #edge case-- if the xcentercoord is greater than max or less than zero, reset to zero. #edge case-- do the same for y if(xmin<xglobalmin){xmin<-xglobalmin} if(ymin<yglobalmin){ymin<-yglobalmin} #edge case-- if xmax is greater than the maximum y, then reset to max. #edge case-- do the same for y if(xmax>xglobalmax){xmax<-xglobalmax} if(ymax>yglobalmax){ymax<-yglobalmax} #ggplotly(p, dynamicTicks = T) %>% plotly::layout(xaxis=list(autorange=F, range=c(xcentercoord-4,xcentercoord+4)), yaxis=list(autorange=F, range=c(20,30))) if(!exists("xmin")){xmin<-xglobalmin} if(!exists("xmax")){xmax<-xglobalmax} if(!exists("ymin")){ymin<-yglobalmin} if(!exists("ymax")){ymax<-yglobalmax} #need to round the max and min for all. #xmin<-floor(xmin/1e6)*1e6 if(exists("p_with_points")){ plotly_output<-plotly::ggplotly(p_with_points,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25), xaxis=list(range=c(xmin,xmax),autorange=F), yaxis=list(range=c(ymin,ymax),autorange=F)) } else { if(exists("p")) { plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25),xaxis=list(range=c(xmin,xmax),autorange=F), yaxis=list(range=c(ymin,ymax),autorange=F)) } } return(plotly_output) } else {} if(debug){browser()} print(plotly_output) if(debug){browser()} return(plotly_output) }) outputOptions(output,"plotlyChromosomalHeatmap",suspendWhenHidden=F) output$whole_genome_image<-renderImage({ #output$whole_genome_image<-renderUI({ #https://community.rstudio.com/t/shinydashboard-render-only-the-clicked-tab/36493 input$whole_genome_max_cap input$goButton #browser() if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { data_prefix<-"osteo" pngfn <- osteofn } if(isolate(input$data_source)=="TCGA_NBL_low_pass") { data_prefix<-"nbl" pngfn <- basefn } if(is.null(data_prefix)){return(NULL)} # list(src = paste0("http://alps.nci.nih.gov/james/plotly_dashboard/whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"), # contentType = 'image/png', # width = isolate(input$heatmapHeight), # height = round(isolate(input$heatmapHeight)/1.25), # alt = "This is alternate text") if(debug){browser()} # tags$img(src = paste0("http://alps.nci.nih.gov/james/plotly_dashboard/whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"), # # contentType = 'image/png', # width = isolate(input$heatmapHeight), # height = round(isolate(input$heatmapHeight)/1.25), # alt = "whole genome png") # #browser() #tags$image(src=paste0(pngfn,"whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"),width="100%") #browser() return( list(src=paste0(pngfn,"whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"))) #,width="25%" },deleteFile = F ) # output$freq_table<-renderDataTable({ # # return(data.table()) # }) getGGplotMatrix<-function(){if(exists("ggplotmatrix")){return(ggplotmatrix)}else{return(NULL)}} getGGplotMatrix_full<-function(){if(exists("ggplotmatrix_full")){return(ggplotmatrix_full)}else{return(NULL)}} #TCGA_low_pass_sample_info get_tcga_lp_sample_info<-function(){if(exists("TCGA_low_pass_sample_info")){return(TCGA_low_pass_sample_info)}else{return(NULL)}} get_recast_matrix<-function(){if(exists("recast_matrix")){return(recast_matrix)}else{return(NULL)}} get_downsample_factor<-function(){if(exists("downsample_factor")){return(downsample_factor)}else{return(NULL)}} get_recast_matrix_full<-function(){if(exists("recast_matrix_full")){return(recast_matrix_full)}else{return(NULL)}} get_rownames_gr<-function(){if(exists("rownames_gr")){return(rownames_gr)}else{return(NULL)}} get_colnames_gr<-function(){if(exists("colnames_gr")){return(colnames_gr)}else{return(NULL)}} get_rownames_gr_full<-function(){if(exists("rownames_gr_full")){return(rownames_gr_full)}else{return(NULL)}} get_colnames_gr_full<-function(){if(exists("colnames_gr_full")){return(colnames_gr_full)}else{return(NULL)}} # get_recast_matrix<-function(){return(recast_matrix)} output$expression_data<-DT::renderDataTable({ #browser() if(is.null(plotly::event_data("plotly_click"))){return(data.table())} if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { recast_matrix<-get_recast_matrix() row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. #row_point_gr<-underscored_pos_to_GRanges(row_label) #column_point_gr<-underscored_pos_to_GRanges(column_label) #row_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1 #col_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 #row and col indices of the subset matrix. row_index_full<-grep(row_label,rownames(get_recast_matrix_full())) col_index_full<-grep(column_label,colnames(get_recast_matrix_full())) # #rowclick<-length(common_coords)-myReactives$currentClick$lat #colclick<-myReactives$currentClick$lng if(debug){browser()} if(is.null(expression_data_gr)){tryCatch(expression_data_gr<-readRDS(paste0(get("osteofn",.GlobalEnv),"expression_data_gr.rds")),error = function(e) NULL) } rowexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr,get_rownames_gr_full()[seq(from=row_index_full,to=row_index_full+3)])) colexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr,get_colnames_gr_full()[seq(from=col_index_full,to=col_index_full+3)]))} else { if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { if(debug){browser()} rownames_gr_full<-get_rownames_gr_full() colnames_gr_full<-get_colnames_gr_full() # if(!exists("expression_data_gr_nbl")){ tryCatch(expression_data_gr_nbl<-readRDS(paste0(get("basefn",.GlobalEnv),"tcga_nbl_expression.rds")),error = function(e) NULL) # } if(length(expression_data_gr_nbl)==0){ tryCatch(expression_data_gr_nbl<-readRDS(paste0(get("basefn",.GlobalEnv),"tcga_nbl_expression.rds")),error = function(e) NULL) } #mcols(expression_data_gr_nbl)$SYMBOL<-expression_data_gr_nbl$....external_gene_name if(debug){browser()} rowexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr_nbl,rownames_gr_full[rownames_gr_full@ranges@start==plotly::event_data("plotly_click")[["y"]]])) colexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr_nbl,colnames_gr_full[colnames_gr_full@ranges@start==plotly::event_data("plotly_click")[["x"]]])) } } rowexpression$rowcol<-"row" colexpression$rowcol<-"col" comb_expression_df<-rbind(rowexpression,colexpression) #comb_expression_df_t<-as.data.table(t(comb_expression_df)) #return(comb_expression_df_t) # cat(file=stderr(),paste0("expression_data")) # cat(file=stderr(),ls()) #make the rownames match for nbl outputexpression_df<-as.data.table(unique(comb_expression_df[,c("SYMBOL","seqnames","start","end","gene_type","rowMean","rowMeanPctl","rowVar","rowVarPctl")])) outputexpression_df_sorted<-outputexpression_df[order(-outputexpression_df$rowVarPctl),] return(as.data.table(outputexpression_df_sorted)) }) output$census_data<-DT::renderDataTable({ # if(is.null(plotly::event_data("plotly_click"))){return(data.table())} recast_matrix<-get_recast_matrix() if(length(intersect(ls(),"census_data_gr"))!=1) { tryCatch(census_data_gr<-readRDS(paste0(basefn,"censushg19.rds")),error = function(e) NULL)} row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. #row_point_gr<-underscored_pos_to_GRanges(row_label) #column_point_gr<-underscored_pos_to_GRanges(column_label) #row_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1 #col_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 #row and col indices of the subset matrix. row_index_full<-grep(row_label,rownames(get_recast_matrix_full())) col_index_full<-grep(column_label,colnames(get_recast_matrix_full())) # #rowclick<-length(common_coords)-myReactives$currentClick$lat #colclick<-myReactives$currentClick$lng rowcensus<-as.data.table(IRanges::subsetByOverlaps(census_data_gr,get_rownames_gr_full()[seq(from=row_index_full,to=row_index_full+3)])) colcensus<-as.data.table(IRanges::subsetByOverlaps(census_data_gr,get_colnames_gr_full()[seq(from=col_index_full,to=col_index_full+3)])) rowcensus$rowcol<-"row" colcensus$rowcol<-"col" comb_census_df<-rbind(rowcensus,colcensus) comb_census_df_t<-as.data.table(t(comb_census_df)) # cat(file=stderr(),paste0("census_data")) # cat(file=stderr(),ls()) #return(comb_census_df_t) #browser() return(unique(as.data.table(comb_census_df))) #[,c("SYMBOL","seqnames","start","end","gene_type","rowMean","rowMeanPctl","rowVar","rowVarPctl")] }) # output$census_data<-renderDataTable({ # row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. # column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. # if(is.null(myReactives$currentClick)){return(data.frame())} # # # rowclick<-round(length(common_coords)-myReactives$currentClick$lat) # colclick<-round(myReactives$currentClick$lng) # rowcensus<-as.data.table(subsetByOverlaps(census_data_gr,rownames_gr[rowclick])) # colcensus<-as.data.table(subsetByOverlaps(census_data_gr,colnames_gr[colclick])) # rowcensus$rowcol<-"row" # colcensus$rowcol<-"col" # comb_expression_df<-rbind(rowcensus,colcensus) # comb_expression_df_t<-t(comb_expression_df) # return(comb_expression_df_t) # # }) output$gene_data <- renderPrint({ if(is.null(plotly::event_data("plotly_click"))){return(data.table())} row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. #if(myReactives) # #all_input<-isolate(input) # cat(file=stderr(),paste0("gene_data")) # cat(file=stderr(),ls()) rowclick<-length(common_coords)-myReactives$currentClick$lat colclick<-myReactives$currentClick$lng row_genes<-genev[rowclick] col_genes<-genev[colclick] # output<-paste0("row genes:",as.character(genev[rowclick]), "column genes:",as.character(genev[colclick])) return(output) }) output$row_gene_data <- DT::renderDataTable({ if(is.null(plotly::event_data("plotly_click"))){return(data.table())} #browser() #row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. #column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. #start<-proc.time() if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset","linreg_osteosarcoma_CNVkit")) { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) #column_label<-paste0(isolate(input$chrom1),event_data("plotly_click")[["x"]],"_",event_data("plotly_click")[["x"]]+1e6-1) } row_genes_merged<-IRanges::mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(row_label)) row_genes<-sort(unique(row_genes_merged[row_genes_merged$....gene_biotype=="protein_coding","....external_gene_name"])) #cat(file=stderr(),paste0(names(proc.time()-start))) #cat(file=stderr(),paste0(proc.time()-start)) print(row_genes) dt<-as.data.table(row_genes) colnames(dt)<-"row genes" return(dt) #if(myReactives) # #all_input<-isolate(input) # cat(file=stderr(),paste0(event_data("plotly_click"))) #cat(file=stderr(),paste0(names(event_data("plotly_click")))) #cat(file=stderr(),paste0(event_data("plotly_click")["y"])) #cat(file=stderr(),paste0(row_label)) # cat(file=stderr(),ls()) #rowclick<-length(common_coords)-myReactives$currentClick$lat #colclick<-myReactives$currentClick$lng #row_genes<-genev[rowclick] #col_genes<-genev[colclick] # #output<-paste0("row genes:",as.character(genev[rowclick]), # "column genes:",as.character(genev[colclick])) #return(output) }) #,options = list(pageLength=5) output$col_gene_data <- DT::renderDataTable({ if(is.null(plotly::event_data("plotly_click"))){return(data.table())} #browser() #row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. #column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset","linreg_osteosarcoma_CNVkit")) { #browser() #row_label<-paste0(isolate(input$chrom2),event_data("plotly_click")[["y"]],"_",event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } #col_genes<-sort(unique(mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(column_label))$....external_gene_name)) col_genes_merged<-IRanges::mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(column_label)) col_genes<-sort(unique(col_genes_merged[col_genes_merged$....gene_biotype=="protein_coding","....external_gene_name"])) print(col_genes) #print(as.data.table(col_genes)) dt<-as.data.table(col_genes) colnames(dt)<-"column genes" return(dt) #if(myReactives) # #all_input<-isolate(input) # cat(file=stderr(),paste0(event_data("plotly_click"))) #cat(file=stderr(),paste0(names(event_data("plotly_click")))) #cat(file=stderr(),paste0(event_data("plotly_click")["y"])) #cat(file=stderr(),paste0(row_label)) # cat(file=stderr(),ls()) #rowclick<-length(common_coords)-myReactives$currentClick$lat #colclick<-myReactives$currentClick$lng #row_genes<-genev[rowclick] #col_genes<-genev[colclick] # #output<-paste0("row genes:",as.character(genev[rowclick]), # "column genes:",as.character(genev[colclick])) #return(output) }) #,options = list(pageLength=5) output$network <- visNetwork::renderVisNetwork({ if (input$goButton == 0) {return()} input$goButton #browser() # ggplotmatrix_filtered<-ggplotmatrix[ggplotmatrix$value > summary(heatmaply::percentize(ggplotmatrix$value))["3rd Qu."] | ggplotmatrix$value < summary(heatmaply::percentize(ggplotmatrix$value))["1st Qu."], ] # ggplotmatrix_filtered<-ggplotmatrix[heatmaply::percentize(ggplotmatrix$value) > 0.9999 | heatmaply::percentize(ggplotmatrix$value) < 0.0001, ] ggplotmatrix_filtered<-ggplotmatrix_full[order(ggplotmatrix_full$value),] ggplotmatrix_filtered<-ggplotmatrix_filtered[c(1:(isolate(input$n_nodes)/2),(nrow(ggplotmatrix_filtered)-(isolate(input$n_nodes)/2)):nrow(ggplotmatrix_filtered)),] ggplotmatrix_filtered<-ggplotmatrix_filtered[as.character(ggplotmatrix_filtered$Var1)!=as.character(ggplotmatrix_filtered$Var2),] vertex.attrs<-list(name = unique(c(as.character(ggplotmatrix_filtered$Var1), as.character(ggplotmatrix_filtered$Var2)))) edges<-rbind(as.character(ggplotmatrix_filtered$Var1),as.character(ggplotmatrix_filtered$Var2)) weights<-ggplotmatrix_filtered$value G <- igraph::graph.empty(n = 0, directed = T) G <- igraph::add.vertices(G, length(vertex.attrs$name), attr = vertex.attrs) G <- igraph::add.edges(G, edges,weight=weights) G_connected<-delete.isolates(G) # weights_discretized<-arules::discretize(E(G_connected)$weight) # G_connected_D3<-networkD3::igraph_to_networkD3(G_connected,group = as.character(arules::discretize(strength(G_connected)))) # forceNetwork(Links = G_connected_D3$links, Nodes = G_connected_D3$nodes, # Source = 'source', Target = 'target', # NodeID = 'name',Group='group',fontSize = 14,zoom=T) G_connected_vis<-visNetwork::toVisNetworkData(G_connected) G_connected_vis$edges$value<-G_connected_vis$edges$weight col_fun = circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red")) G_connected_vis$nodes$color<-sapply(col_fun(heatmaply::percentize(igraph::strength(G_connected))) ,function(x) substr(x,start = 1,stop = 7)) visNetwork::visNetwork(nodes = G_connected_vis$nodes,edges = G_connected_vis$edges,width = isolate(input$heatmapHeight),height = round(isolate(input$heatmapHeight)/1.25)) %>% visNetwork::visInteraction(hover = TRUE) %>% visNetwork::visEvents(hoverNode = "function(nodes) { Shiny.onInputChange('current_node_id', nodes); ;}") }) output$shiny_return <- DT::renderDataTable({ input$current_node_id if(is.null(isolate(input$current_node_id))){return(data.table())} #browser() #DT::datatable(iris, options = list(lengthMenu = c(5, 30, 50), pageLength = 5) #paste0(input$current_node_id) return(as.data.table(ggplotmatrix[ggplotmatrix$Var1 %in% isolate(input$current_node_id) | ggplotmatrix$Var2 %in% isolate(input$current_node_id),]))#c("Var1,Var2","value","value1") },options = list(pageLength=5))# #pageLength = 5) output$sample_info<-plotly::renderPlotly({ input$sample_hist_alpha if(is.null(plotly::event_data("plotly_click"))){return(data.table())} if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) } #browser() #ed <- event_data("plotly_click") if (is.null(plotly::event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")} if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit" | isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset") ) { recast_matrix<-get_recast_matrix() if(!is.null("recast_matrix")) { row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } if(length(bin_data$probe)==0) { bin_data$probe<-rownames(bin_data) } d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),]) if(nrow(d)==0){return("")} #p <- plotly::plot_ly(x = bin_data[1,], type = "histogram") # cat(file=stderr(),paste0("sample_info")) # cat(file=stderr(),ls()) sample_info_p <- plotly::plot_ly(alpha = isolate(input$sample_hist_alpha)) %>% plotly::add_histogram(x = as.numeric(d[1,]),name=d[1,"probe"]) %>% plotly::add_histogram(x = as.numeric(d[2,]),name=d[2,"probe"]) %>% plotly::layout(barmode = "overlay") print(sample_info_p) if(debug){browser()} return(sample_info_p) } } #end code for in-house data. if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_PRAD_low_pass")) { TCGA_low_pass_sample_info<-get_tcga_lp_sample_info() } }) output$sample_info_scatter<-plotly::renderPlotly({ if(is.null(plotly::event_data("plotly_click"))){return(plotly::plotly_empty())} #browser() req(plotly::event_data("plotly_click")) #if (is.null(event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")} recast_matrix<-get_recast_matrix() if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) } #if((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}) & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) } if(!is.null("recast_matrix")) { row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } if(length(bin_data$probe)==0) { bin_data$probe<-rownames(bin_data) } d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),]) #testing #browser() bin_data_colsplit<-reshape2::colsplit(bin_data$probe,"_",c("chr","start","end")) bin_data_colsplit[bin_data_colsplit$chr=="chr19",] #end testing if(nrow(d)==0){return("")} #p <- plotly::plot_ly(x = bin_data[1,], type = "histogram") # cat(file=stderr(),paste0("census_data")) # cat(file=stderr(),ls()) sample_info_p_scatter <- plotly::plot_ly(alpha = 0.6) %>% plotly::add_trace(x = as.numeric(d[1,]),name=d[1,"probe"],y=seq(1:ncol(d))) %>% plotly::add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>% # plotly::layout(barmode = "overlay") print(sample_info_p_scatter) if(debug){browser()} return(sample_info_p_scatter) } }) output$minimap<-plotly::renderPlotly({ #if(is.null(event_data("plotly_click"))){return(data.table())} #if(is.null(event_data("plotly_click"))){return(NULL)} req(plotly::event_data("plotly_click")) #event_data("plotly_click") #if (is.null(event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")} if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) } recast_matrix<-get_recast_matrix() ggplotmatrix_full<-getGGplotMatrix_full() recast_matrix_full<-get_recast_matrix_full() if(!is.null("recast_matrix") & !is.null("recast_matrix_full")) { row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } if(length(bin_data$probe)==0) { bin_data$probe<-rownames(bin_data) } d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),]) if(nrow(d)==0){return("")} row_labels_minimap<-rownames(recast_matrix_full)[grep(row_label,rownames(recast_matrix_full)):(grep(row_label,rownames(recast_matrix_full))+3)] #we subset by every fourth number along the rows and columns, hence we need n, n+1, n+2, n+3 (or n1:n2-1, the first number and all the numbers leading up to the next). col_labels_minimap<-colnames(recast_matrix_full)[grep(column_label,colnames(recast_matrix_full)):(grep(column_label,colnames(recast_matrix_full))+3)] ggplotmatrix_minimap<-ggplotmatrix_full[as.character(ggplotmatrix_full$Var1) %in% row_labels_minimap & as.character(ggplotmatrix_full$Var2) %in% col_labels_minimap, ] p <- ggplot(data = ggplotmatrix_minimap ) + #geom_tile() + theme_void() geom_raster(aes(x = Var2, y = Var1,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1))) + scale_x_discrete() + scale_y_discrete() + theme(axis.text.x = element_text(angle=60, hjust=1)) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) #+ coord_flip() #+ scale_y_reverse(breaks=block_indices) # cat(file=stderr(),paste0("minimap")) # cat(file=stderr(),ls()) plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=isolate(input$heatmapHeight)/1.25) #print(plotly_output) #essentially, grab the row and column bins (above) for the sampled matrix, then grab the same coordinates for the full matrix, plus four to x, plus four to y. #p <- plotly::plot_ly(x = bin_data[1,], type = "histogram") # sample_info_p_scatter <- plot_ly(alpha = 0.6) %>% # add_trace(x = as.numeric(d[1,]),name=d[1,"probe"],y=seq(1:ncol(d))) %>% # add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>% # # layout(barmode = "overlay") # print(sample_info_p_scatter) if(debug){browser()} return(plotly_output) } }) output$sample_info_scatter2<-plotly::renderPlotly({ if(debug){browser()} req(plotly::event_data("plotly_click")) if (is.null(plotly::event_data("plotly_click"))) {return(NULL)} #browser() if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) } if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit" | isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { recast_matrix<-get_recast_matrix() if(!is.null("recast_matrix")) { # row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")) { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } if(length(bin_data$probe)==0) { bin_data$probe<-rownames(bin_data) } d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),]) if(nrow(d)==0){return("")} d_sample_names<-names(d)[2:length(names(d))] #p <- plotly::plot_ly(x = bin_data[1,], type = "histogram") # #names(d) #sample_info_p_scatter2 <- plot_ly(alpha = 0.6,x = as.numeric(d[1,]),y=as.numeric(d[2,]),name=d_sample_names) # d_t<-as.data.frame(t(d)[2:ncol(d),]) colnames(d_t)<-d$probe d_t<-as.data.frame(sapply(as.data.frame(d_t),function(x) as.numeric(as.character(x)))) rownames(d_t)<-d_sample_names if(ncol(d_t)==1){d_t[,2]<-d_t[,1] colnames(d_t)[2]<-paste0(d$probe,"_")} #,text=paste0("x: ",paste0(colnames(d_t)[1])," ", d_t[,1],"\n y:",paste0(colnames(d_t)[2])," ",d_t[,2],"\n ",rownames(d_t)) #,color=rownames(d_t) # sample_info_p_scatter2<-ggplot(data = d_t,aes(x=d_t[,1],y=d_t[,2])) + geom_point(aes(color=rownames(d_t),text=paste0("x: ",paste0(colnames(d_t)[1])," ", d_t[,1],"\n y:",paste0(colnames(d_t)[2])," ",d_t[,2],"\n ",rownames(d_t)))) + theme(legend.position="none") + xlab(paste0(colnames(d_t)[1])) + ylab(paste0(colnames(d_t)[2])) + geom_smooth(method=lm) # %>% #name=d[1,"probe"],y=seq(1:ncol(d)) #add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>% # layout(barmode = "overlay") # cat(file=stderr(),paste0("sample_info_scatter2")) #cat(file=stderr(),ls()) # cat(file=stderr(),sapply(ls(),function(x) paste0(unlist(paste0(head(get(x))))))) # cat(file=stderr(),paste0("sample_info_p_scatter2")) # cat(file=stderr(),str(sample_info_p_scatter2)) # cat(file=stderr(),paste0("sample_info_p_scatter2_length")) # cat(file=stderr(),length(sample_info_p_scatter2)) # cat(file=stderr(),unlist(sapply(ls(),function(x) paste0(paste0(head(get(x))))))) # cat(file=stderr(),paste0("sample_info_p_scatter2")) # cat(file=stderr(),str(sample_info_p_scatter2)) #cat(file=stderr(),sapply(ls(),function(x) get(x))) print(plotly::ggplotly(sample_info_p_scatter2,tooltip=c("text"))) return(plotly::ggplotly(sample_info_p_scatter2,tooltip=c("text"))) } } #end in-house data processing if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_PRAD_low_pass")) { TCGA_low_pass_sample_info<-get_tcga_lp_sample_info() recast_matrix <- get_recast_matrix() if (!is.null("recast_matrix")) { row_label <- rownames(recast_matrix)[order(get_rownames_gr())][as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1])) + 1] column_label <- colnames(recast_matrix)[order(get_colnames_gr())][as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2])) + 1] if(isolate(input$data_source)=="TCGA_NBL_low_pass") { row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1) column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1) } d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),]) if("TCGA_CNV_data_gr.....relativeCvg" %in% colnames(TCGA_low_pass_sample_info)){ d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")]) d_row<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")]) d_col<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(column_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")]) } else { if("TCGA_CNV_data_gr.....relativeCvg" %in% colnames(TCGA_low_pass_sample_info)) d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")]) d_row<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")]) d_col<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(column_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")]) } if(nrow(d)==0){return("")} sample_info_p_scatter2<-ggplot(data = d_row,aes(x=unlist(d_row[,1]),y=unlist(d_col[,1]))) + geom_point(aes(color=unlist(d_row[,2]),shape=unlist(d_col[,2]), text=paste0("row_value: ",paste0(d_row[,1]),"/n sample: ",paste0(d_row[,2]), " col_value: ", d_col[,1],"\n sample:",paste0(d_col[,2])))) + theme(legend.position="none") + xlab("column segmentation value") + ylab("row segmentation value") + geom_smooth(method=lm) # cat(file=stderr(),paste0("sample_info_scatter2")) # cat(file=stderr(),ls()) } #d["TCGA_CNV_data_gr.....sample" } }) output$freq_table <- DT::renderDataTable({ #if(isolate(is.null(input$subset))){selected_rows<-1:nrow(mappability_df)} #textv_subset<-textv[selected_rows] #d<-as.character(names(event_data("plotly_hover"))) # # cat(file=stderr(),paste0(get_recast_matrix() # [ # as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1, # as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 # ])) # cat(file=stderr(),rownames(get_recast_matrix())[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1]) #browser() #if(is.null(freq_data)){ tryCatch(freq_data<-data.table::fread(paste0(osteofn,"OS_freq_data.txt")),error = function(e) NULL)} if(is.null(freq_data)){ tryCatch( freq_data<-data.table::as.data.table(readRDS(paste0(osteofn,"OS_freq_data_lcc236.rds"))),error = function(e) NULL)} recast_matrix<-get_recast_matrix() #cat(file=stderr(),paste0(d)) if(!is.null("recast_matrix")) { row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label. column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label. d<-as.data.table(freq_data[freq_data$pos %in% c(row_label,column_label)]) # cat(file=stderr(),paste0("freq_table")) # cat(file=stderr(),ls()) if (is.null(d)) {return(data.table())} else { return(d)} } else {return(data.table())} # cat(file=stderr(),paste0(event_data("plotly_click"))) # cat(file=stderr(),paste0(names(event_data("plotly_click")))) # cat(file=stderr(),paste0(names(event_data("plotly_click")[["pointNumber"]]))) # cat(file=stderr(),paste0(event_data("plotly_click")[["pointNumber"]])) # cat(file=stderr(),paste0(event_data("plotly_click")["pointNumber"])) # cat(file=stderr(),paste0(event_data("plotly_click")["curveNumber"])) # cat(file=stderr(),paste0(event_data("plotly_click")["x"])) #cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1 ) #row number #cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 ) #col number # # cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 ) #col number # cat(file=stderr(),paste0(chromstarts_linreg)) # cat(file=stderr(),paste0(head(common_coords_linreg))) # cat(file=stderr(),paste0(head(common_coords_linreg))) # cat(file=stderr(),paste(names(input))) # cat(file=stderr(),paste(input$chrom2)) # cat(file=stderr(),paste(chromstarts_linreg[grep(input$chrom2,chromosomes)]+as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2])))) # cat(file=stderr(),paste(common_coords_linreg[chromstarts_linreg[grep(input$chrom2,chromosomes)]+as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))])) # cat(file=stderr(),paste(d)) #need to convert to global coordinates # #cat(file=stderr(),exists("ggplotmatrix")) #cat(file=stderr(),exists("event_data(\"plotly_click\")")) #cat(file=stderr(),exists("event_data")) #cat(file=stderr(),paste0(event_data)) #cat(file=stderr(),length(event_data)) #cat(file=stderr(),paste0(event_data[[1]])) #cat(file=stderr(),paste0(signedRescale)) # if(exists("ggplotmatrix") & !is.null(ggplotmatrix)){ # recast_matrix<-reshape2::dcast(data=ggplotmatrix,formula=Var1 ~ Var2, var = ggplotmatrix$value) #this creates a matrix with # if(ncol(recast_matrix)!=nrow(recast_matrix)) # { # rownames(recast_matrix)<-recast_matrix$Var1 # recast_matrix<-recast_matrix[,2:ncol(recast_matrix)] # }} # cat(file=stderr(),rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] ) # cat(file=stderr(),colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] ) # cat(file=stderr(),colnames(recast_matrix)) # cat(file=stderr(),rownames(recast_matrix)) # cat(file=stderr(),paste(head(ggplotmatrix))) # cat(file=stderr(),paste(input)) # cat(file=stderr(),paste(names(input))) # cat(file=stderr(),paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))])) # d<-freq_data[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1,as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #print(event_data("plotly_click")) #showLog() #class(event_data$plotly_click$pointNumber) #print(str(event_data("plotly_click"))) #d<-as.data.table(event_data("plotly_click")) #d <-freq_data[as.integer(event_data("plotly_click")[["pointNumber"]]+1),] # if (is.null(d)) {return(data.table())} else { # row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] # column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] # d<-as.data.table(freq_data[freq_data$pos %in% c(row_label,column_label)]) # } # cat(file=stderr(),paste0(d)) # return(d) }) } #}
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/CNVScopeserver.R
#' Convert GRanges object to underscord positions. #' #' This function converts row or column names (or any character vector of the format) into a GenomicRanges object. #' @param input_gr A GenomicRanges object #' @param minusOneToEnd Minus one position to end of each Genomic Range? #' @keywords Genomic Ranges position #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' col_gr<-underscored_pos_to_GRanges(colnames(nbl_result_matrix_sign_small)) #' GRanges_to_underscored_pos(col_gr) #' @export GRanges_to_underscored_pos<-function(input_gr,minusOneToEnd=T) { #importFrom GenomicRanges seqnames GRanges if(minusOneToEnd){adjustment<-1} else {adjustment=0} output_char<-paste0(GenomicRanges::seqnames(input_gr),"_",input_gr@ranges@start,"_",input_gr@ranges@start+input_gr@ranges@width-adjustment) return(output_char) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/GRanges_to_underscored_pos.R
#' Average edges of a matrix to facilitate downsampling. #' #' Averages the columns and rows of a matrix by a certain amount. #' @keywords rescale downsample average edges matrix #' @importFrom reshape2 colsplit #' @importFrom Matrix colMeans rowMeans #' @param unchangedmatrix A matrix to have edges averaged with genomic coordinates in the form chr1_50_100 set as the column and row names. #' @param nedges The number of edges to be averaged #' @param dimension Selectively averages edges in one dimension. Performs symmetric edge averaging by default. #' @return averaged_matrix A matrix with edges averaged, which may be more amenable to downsampling #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' dim(nbl_result_matrix_sign_small) #' nbl_result_matrix_sign_small_avg<-averageMatrixEdges(nbl_result_matrix_sign_small, #' nedges=1,dimension="row") #' dim(nbl_result_matrix_sign_small_avg) #' nbl_result_matrix_sign_small_avg<-averageMatrixEdges(nbl_result_matrix_sign_small, #' nedges=1,dimension="column") #' dim(nbl_result_matrix_sign_small_avg) #' @export averageMatrixEdges<-function(unchangedmatrix,nedges=1,dimension=c("row","column")) { #dimension<-gsub("col","column",dimension) if(!(length(intersect(dimension,"row"))==1 | length(intersect(dimension,"column"))==1 )) { errormsg<-paste0("Invalid dimension specification:\'",dimension,"\' Valid options are \'column\' and \'row\'") #if(length) print(paste0(dimension)) print(errormsg) stop() return(errormsg)} if("row" %in% dimension) { #dim(unchangedmatrix[(nrow(unchangedmatrix)-nedges):nrow(unchangedmatrix),]) #length(colMeans(unchangedmatrix[(nrow(unchangedmatrix)-nedges):nrow(unchangedmatrix),])) averaged_row<-(Matrix::colMeans(unchangedmatrix[(nrow(unchangedmatrix)-nedges):nrow(unchangedmatrix),])) averaged_rownames_df<-reshape2::colsplit(string = rownames(unchangedmatrix)[(nrow(unchangedmatrix)-nedges):nrow(unchangedmatrix)],pattern = "_",names = c("chrom","start","end")) #<-(nrow(unchangedmatrix)-nedges):nrow(unchangedmatrix) #averaged_matrix<-unchangedmatrix[1:nrow(unchangedmatrix)-nedges-1),] #dim(unchangedmatrix[1:(nrow(unchangedmatrix)-nedges-1),]) averaged_matrix<-rbind(unchangedmatrix[1:(nrow(unchangedmatrix)-nedges-1),],averaged_row) rownames(averaged_matrix)[nrow(averaged_matrix)]<-paste(c(as.character(averaged_rownames_df[1,c("chrom","start")]),as.character(averaged_rownames_df[nrow(averaged_rownames_df),c("end")])),collapse = "_") #dim(averaged_matrix) original_matrix<-unchangedmatrix unchangedmatrix<-averaged_matrix if(!("column" %in% dimension)) { return(averaged_matrix) } } if("column" %in% dimension) { #dim(unchangedmatrix) averaged_column<-(Matrix::rowMeans(unchangedmatrix[,(ncol(unchangedmatrix)-nedges):ncol(unchangedmatrix)])) averaged_colnames_df<-reshape2::colsplit(string = colnames(unchangedmatrix)[(ncol(unchangedmatrix)-nedges):ncol(unchangedmatrix)],pattern = "_",names = c("chrom","start","end")) averaged_matrix<-cbind(unchangedmatrix[,1:(ncol(unchangedmatrix)-nedges-1)],averaged_column) colnames(averaged_matrix)[ncol(averaged_matrix)]<-paste(c(as.character(averaged_colnames_df[1,c("chrom","start")]),as.character(averaged_colnames_df[nrow(averaged_colnames_df),c("end")])),collapse = "_") return(averaged_matrix) } }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/averageMatrixEdges.R
#' Calculate the probability distribution of CNV concordance events with a fast kernel #' #' This function produces several matrices, including a Z-score matrix #' from a matrix of the same size #' and a percentile matrix of these Z-scores #' @name calcCNVKernelProbDist #' @param submatrix A matrix of CNV data in an intrachromosomal region (e.g. chr1 vs chr1 or chr5 vs chr5) #' @param win a window size for the matrix that calculates the windowed average using the kernel function #' @param debug extra output for debugging. #' @param parallel use parallelization using mcmapply and doParallel? #' @param mcmcores The number of cores used for parallelization. #' @keywords CNV kernel probability distribution concordance fast #' @import doParallel #' @importFrom foreach foreach #' @importFrom Matrix bandSparse sparseMatrix #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' mat_prob_dist<-calcCNVKernelProbDist(nbl_result_matrix_sign_small,parallel=FALSE) #' mat_prob_dist #' @export globalVariables(c("x","y")) calcCNVKernelProbDist<-function(submatrix=NULL,win=5,debug=F,parallel=T,mcmcores=1) { x <- if(exists("x")){get("x")} else {NULL} y <- if(exists("y")){get("y")} else {NULL} submatrix<-as.matrix(submatrix) #win<-5 #library(spatialfil) if(debug) {win_start <- proc.time()} if(requireNamespace("smoothie",quietly = T)){ # k = list(matrix=matrix(1/(win^2),nrow=win,ncol=win),kernel='custom') # class(k)='convKern' # submatrix_win_avg = spatialfil::applyFilter(submatrix,k) submatrix_win_avg=smoothie::kernel2dsmooth( submatrix, kernel.type="boxcar", n=win) } else{ return("Please Install package smoothie to use this optional function./n This can be done by installing CNVScope using the dependencies=T flag") } if(debug){ print("win avg complete") print(proc.time() - win_start) } if(debug){diag_avg_start<-proc.time()} diag_avg_matrix<-matrix(0,ncol=ncol(submatrix),nrow=nrow(submatrix)) if(debug){ print("diag avg complete") print(proc.time() - diag_avg_start) } if(debug){diag_sd_start<-proc.time()} diag_sd_matrix<-matrix(0,ncol=ncol(submatrix),nrow=nrow(submatrix)) if(debug){ print("diag sd complete") print(proc.time() - diag_sd_start) } if(!parallel){foreach::registerDoSEQ()} if(parallel){doParallel::registerDoParallel()} #if(parallel){registerdoParallel()} coladjustments2<-foreach(y=c(nrow(submatrix),1),.combine="rbind") %dopar% { coladjustments<-foreach::foreach(x=1:ncol(submatrix), .export=ls(),.combine="rbind" ,.inorder=T) %dopar% { if(debug){loop_start<-proc.time()} off_diag_for_point<-submatrix[row(submatrix)==col(submatrix)-(y-x)] diag_avg<-mean(off_diag_for_point) diag_sd<-sd(off_diag_for_point) #diag_avg_matrix[row(diag_avg_matrix)==col(diag_avg_matrix)-(y-x)]<-diag_avg #diag_sd_matrix[row(diag_sd_matrix)==col(diag_sd_matrix)-(y-x)]<-diag_sd #if(x==319){Heatmap(diag_sd_matrix,cluster_columns = F,cluster_rows=F, #show_row_names = F,show_column_names = F)} diff<-(y-x) output<-c(diff,y,x,diag_avg,diag_sd) #names(output)<-c("diff","y","x") if(debug){ print(paste0(x/ncol(submatrix)*100,"% complete")) print(proc.time()-loop_start) } #print((unlist(ls()))) #sapply(ls(),function (x) x==Inf) output } colnames(coladjustments)<-c("diff","x","y","diag_avg","diag_sd") coladjustments } diag_sd_vec<-coladjustments2[,"diag_sd"] diag_sd_vec[is.na(diag_sd_vec)]<-0 coladjustments2[,"diag_sd"]<-diag_sd_vec if(parallel==T) { bands_mcmapply<-mcmapply(FUN=function(x,y,diag_avg) { rep(diag_avg,length(diag_avg_matrix[row(diag_avg_matrix)==col(diag_avg_matrix)-(y-x)])) },x=coladjustments2[,"x"],y=coladjustments2[,"y"],diag_avg=coladjustments2[,"diag_avg"], mc.cores=mcmcores ) bands<-bands_mcmapply[c(1:(length(bands_mcmapply)/2), (length(bands_mcmapply)/2+2):length(bands_mcmapply))] #bands_unique<-unique(bands) } else { bands_mapply<-mapply(FUN=function(x,y,diag_avg) { rep(diag_avg,length(diag_avg_matrix[row(diag_avg_matrix)==col(diag_avg_matrix)-(y-x)])) },x=coladjustments2[,"x"],y=coladjustments2[,"y"],diag_avg=coladjustments2[,"diag_avg"] ) bands<-bands_mapply[c(1:(length(bands_mapply)/2), (length(bands_mapply)/2+2):length(bands_mapply))] #bands_unique<-unique(bands) } diag_avg_matrix<-as.matrix(Matrix::bandSparse(n=nrow(submatrix),m=ncol(submatrix),bands, k=c(-(nrow(submatrix)-1):(ncol(submatrix)-1)), symmetric = F,giveCsparse = T)) #this case will work for symmetric matrices, untested on asymmetric. if(parallel==T) { bands_mcmapply<-mcmapply(FUN=function(x,y,diag_sd) { rep(diag_sd,length(diag_sd_matrix[row(diag_sd_matrix)==col(diag_sd_matrix)-(y-x)])) },x=coladjustments2[,"x"],y=coladjustments2[,"y"],diag_sd=coladjustments2[,"diag_sd"], mc.cores=mcmcores ) bands<-bands_mcmapply[c(1:(length(bands_mcmapply)/2), (length(bands_mcmapply)/2+2):length(bands_mcmapply))] #bands_unique<-unique(bands) } else { bands_mapply<-mapply(FUN=function(x,y,diag_sd) { rep(diag_sd,length(diag_sd_matrix[row(diag_sd_matrix)==col(diag_sd_matrix)-(y-x)])) },x=coladjustments2[,"x"],y=coladjustments2[,"y"],diag_sd=coladjustments2[,"diag_sd"] ) bands<-bands_mapply[c(1:(length(bands_mapply)/2), (length(bands_mapply)/2+2):length(bands_mapply))] #bands_unique<-unique(bands) } #browser() diag_sd_matrix<-as.matrix(Matrix::bandSparse(n=nrow(submatrix), m=ncol(submatrix),bands,k=c(-(nrow(submatrix)-1):(ncol(submatrix)-1)), symmetric = F,giveCsparse = T)) #this case will work for symmetric matrices, untested on asymmetric. diag_avg_matrix<-t(diag_avg_matrix) diag_sd_matrix<-t(diag_sd_matrix) zscore_matrix<-(submatrix_win_avg-diag_avg_matrix)/(diag_sd_matrix) zscore_matrix[1,ncol(zscore_matrix)]<-0 zscore_matrix[ncol(zscore_matrix),1]<-0 percentile_matrix<-pnorm((submatrix_win_avg-diag_avg_matrix)/diag_sd_matrix) output_list<-list(zscore_matrix,percentile_matrix,submatrix, coladjustments2,diag_avg_matrix,diag_sd_matrix) names(output_list)<-c("zscore_matrix","percentile_matrix","original_matrix", "coladjustments2","diag_avg_matrix","diag_sd_matrix") return(output_list) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/calcCNVKernelProbDist.R
#' Create a linear regression matrix. #' #' Creates a matrix of linear regression p-values, log transformed from every combination of columns in the parent matrix. #' @keywords lm linear regression matrix #' @import parallel #' @param bin_data The parent matrix, with columns to have linear regression performed on them. #' @param use_slurm Paralleize over a number of slurm HPC jobs? If false, the program will simply run locally. #' @param slurmjob the slurm job object produced by rslurm::slurm_apply(), after running the function initially. #' @param job_finished Are all the slurm jobs finished and the results need retrieving? #' @param n_nodes the number of nodes used in your slurm job. #' @param cpus_on_each_node The number of cpus used on each node #' @param memory_per_node the amount of ram per node (e.g. "32g" or "2g") #' @param walltime Time for job to be completed for SLURM scheduler in hh:mm:ss format. Defaults to 4h. #' @param partitions the partitions to which the jobs are to be scheduled, in #' order of priority. #' @return The output matrix, or if using slurm, the slurm job object (which should be saved as an rds file and reloaded when creating the output matrix). #' @examples #' #' #small example #' #bin_data<-matrix(runif(5*5),ncol=5) #' foreach::registerDoSEQ() #' #full_matrix<-suppressWarnings(calcVecLMs(bin_data)) #' #Please note that lm() will make a warning when there are two vectors that are too close #' #numerically (this will always happen along the diagonal). #' #This is normal behavior and is controlled & accounted for using this function as well as #' #the postProcessLinRegMatrix function (which converts the infinite values to a maximum). #' #' @export calcVecLMs<-function(bin_data,use_slurm=F,job_finished=F,slurmjob=NULL,n_nodes=NULL,cpus_on_each_node=2,memory_per_node="2g",walltime="4:00:00",partitions="ccr,quick") { #if(dim(bin_data)[1]<dim(bin_data)[2]){bin_data<-t(bin_data)} #importFrom rslurm slurm_apply get_slurm_out bin_data_df<-as.data.frame(bin_data) bin.pairs<-expand.grid(1:ncol(bin_data),1:ncol(bin_data)) colnames(bin.pairs)<-c("x","y") if(!use_slurm){ neglogpvalues<-mcmapply(x=bin.pairs[,1],y=bin.pairs[,2],function(x,y) -log(summary(lm(unlist(bin_data_df[,y])~unlist(bin_data_df[,x])))$coefficients[2,4]) ) output_matrix<-matrix(neglogpvalues,ncol=ncol(bin_data)) output_matrix[is.infinite(output_matrix)]<-max(output_matrix[!is.infinite(output_matrix)]) return(output_matrix) } if(use_slurm){ if(job_finished) { output_matrix<-matrix(rslurm::get_slurm_out(slurmjob),ncol=ncol(bin_data)) output_matrix[is.infinite(unlist(output_matrix))]<-max(unlist(output_matrix[!is.infinite(unlist(output_matrix))])) colnames(output_matrix)<-colnames(bin_data) rownames(output_matrix)<-colnames(bin_data) return(output_matrix) } else { if(is.null(n_nodes)){n_nodes<-ncol(bin_data_df)/2} lm_test_sjob <- rslurm::slurm_apply(function(x,y) -log(summary(lm(unlist(bin_data_df[,y])~unlist(bin_data_df[,x])))$coefficients[2,4]), bin.pairs, jobname = 'CNVScope_LM_apply', nodes = n_nodes, cpus_per_node = cpus_on_each_node, submit = T,slurm_options = list(partition=partitions,mem=memory_per_node,time=walltime)) return(lm_test_sjob) } } }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/calcVecLMs.R
#' Create chromosomal interaction matrices for CNVScope shiny application. #' #' Takes a linear regression matrix and sets infinites to a finite value, and changes the sign to match the sign of the correlation for each value. #' @keywords Interaction matrix #' @rawNamespace import(GenomicInteractions, except = c(start,end)) #' @import reshape2 magrittr foreach doParallel #' @importFrom biomaRt useMart getBM #' @importFrom reshape2 colsplit #' @param whole_genome_mat The matrix containing all of the data, from which the individual matrices will be split. #' @param output_dir the folder where the matrices in RData format, will be written. #' @param prefix filename prefix for individual matrices. Default: "nbl_" #' @examples #' #examples for this function would be too large to #' #include and should be run on an HPC machine node. #' #illustration of this process is shown clearly in #' #the vignette and can be done if a user properly #' #follows the instructions. #' # The function is intended to be run on a whole interactome matrix (chr1-X). #' @return The list of files already written to disk, with full filenames and paths. #' @export createChromosomalMatrixSet<-function(whole_genome_mat,output_dir=NULL,prefix="nbl_") { original_dir<-getwd() if(is.null(output_dir)){output_dir<-getwd()} if(!dir.exists(output_dir)){dir.create(output_dir)} setwd(output_dir) chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") chrom.pairs<-expand.grid(1:length(chromosomes),1:length(chromosomes)) grch37 = biomaRt::useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl") ensembl_gene_tx_table <- biomaRt::getBM(attributes = c("ensembl_gene_id", "ensembl_transcript_id","chromosome_name","transcript_start","transcript_end","start_position","end_position", "strand", "percentage_gene_gc_content","external_gene_name","gene_biotype"), mart = grch37) foreach(q=rev(1:nrow(chrom.pairs)),.export=ls(),.errorhandling = "pass") %dopar% { if(!file.exists(paste0(chromosomes[chrom.pairs[q,1]],chromosomes[chrom.pairs[q,2]],prefix,"sample_matched","_unrescaled",".RData"))) { print(paste0("line of incomplete matrix:",q,chromosomes[chrom.pairs[q,1]],chromosomes[chrom.pairs[q,2]])) full_submatrix_dimensions<-c(length(grep(chromosomes[chrom.pairs[q,1]],rownames(whole_genome_mat))),length(grep(chromosomes[chrom.pairs[q,2]],colnames(whole_genome_mat)))) ggplotmatrix<-writeAsymmetricMeltedChromosomalMatrixToDisk(whole_genome_matrix = whole_genome_mat,chrom1 = chrom.pairs[q,1],chrom2 = chrom.pairs[q,2],extra_data_matrix = NULL,transpose=T,sequential=T,debug=F,desired_range_start = min(full_submatrix_dimensions),desired_range_end = max(full_submatrix_dimensions),rescale = F,saveToDisk = F) #,flip_row_col_genes = T ggplotmatrix<-ggplotmatrix[,c("Var1","Var2","value","value1")] ggplotmatrix$Var1<-as.character(ggplotmatrix$Var1) ggplotmatrix$Var2<-as.character(ggplotmatrix$Var2) ggplotmatrix$value<-as.numeric(unlist(ggplotmatrix$value)) ggplotmatrix$value1<-as.character(ggplotmatrix$value1) ggplotmatrix$orig_value<-reshape2::colsplit(ggplotmatrix$value1,"value:",c("junk","orig_value"))$orig_value ggplotmatrix$value<-as.numeric(ggplotmatrix$orig_value) save("ggplotmatrix",file=paste0(chromosomes[chrom.pairs[q,1]],chromosomes[chrom.pairs[q,2]],prefix,"sample_matched","_unrescaled",".RData")) } } outputfilelist<-list.files(pattern=utils::glob2rx("*chr*.RData")) setwd(original_dir) return(outputfilelist) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/createChromosomalMatrixSet.R
#' List of Divisors #' #' Generates a list of divisors of an integer number. #' Identical to the same function within the numbers package. #' The code has been modified from the numbers package, #' following GPL 3.0 guidelines on 3/30/2022, section 5. #' Reference for GPL v3.0 LICENSE: #' https://www.gnu.org/licenses/gpl-3.0.en.html. #' #' #' @name divisors #' @keywords divisors numbers #' @param n an integer whose divisors will be generated. #' @return Returns a vector integers. #' @examples #' divisors(1) # 1 #' divisors(2) # 1 2 #' divisors(3) # 1 2 3 #' divisors(2^5) # 1 2 4 8 16 32 #' divisors(1000) # 1 2 4 5 8 10 ... 100 125 200 250 500 1000 #' divisors(1001) # 1 7 11 13 77 91 143 1001 #' @seealso [numbers::divisors()] #' @export divisors <- function(n) { if (n != floor(n) || n <= 0) stop("Argument 'n' must be a nonnegative integer.") if (n == 1) { return(1) } else if (n <= 1000) { return( (1:n)[(n %% 1:n) == 0] ) } else { pfs <- rle(primeFactors(n)) pfs_len <- pfs$length pfs_val <- pfs$values m <- length(pfs_len) D <- pfs_val[1]^c(0:pfs_len[1]) if (m == 1) return(D) for (k in 2:m) { D <- c( outer(D, pfs_val[k]^c(0:pfs_len[k])) ) } return( sort(D) ) } } primeSieve <- function(n) { if (!is.numeric(n) || length(n) != 1 || floor(n) != ceiling(n) || n < 1) stop("Argument 'n' must be an integer number greater or equal 1.") if (n > 2^53 - 1) stop("Argument 'n' must be smaller than 2^53 - 1.") if (n < 2) return(c()) p <- seq(1, n, by=2) q <- length(p) p[1] <- 2 if (n >= 9) { for (k in seq(3, sqrt(n), by=2)) { if (p[(k+1)/2] != 0) p[seq((k*k+1)/2, q, by=k)] <- 0 } } p[p > 0] } Primes <- function(n1 = 1, n2 = NULL) { if (is.null(n2)) return(primeSieve(n1)) if (!is.numeric(n1) || length(n1) != 1 || floor(n1) != ceiling(n1) || n1 <= 0 || !is.numeric(n2) || length(n2) != 1 || floor(n2) != ceiling(n2) || n2 <= 0 ) stop("Arguments 'n1' and 'n2' must be integers.") if (n2 > 2^53 - 1) stop("Upper bound 'n2' must be smaller than 2^53-1.") if (n1 > n2) stop("Upper bound must be greater than lower bound.") if (n2 <= 1000) { P <- primeSieve(n2) return(P[P >= n1]) } myPrimes <- primeSieve(floor(sqrt(n2))) N <- seq.int(n1, n2) n <- length(N) A <- numeric(n) if (n1 == 1) A[1] <- -1 for (p in myPrimes) { r <- n1 %% p # rest modulo p if (r == 0) { i <- 1 } else { i <- p - r + 1 } # find next divisible by p if (i <= n && N[i] == p) { i <- i + p } # if it is p itself, skip while (i <= n) { A[i] <- 1; i <- i + p } # mark those divisible by p } return(N[A == 0]) } primeFactors <- function(n) { if (!is.numeric(n) || length(n) != 1 || n != round(n) || n < 1) { warning("Argument 'n' must be a nonnegative integer.") return(NULL) } if (n < 4) return(n) if (n <= 2^53 - 1) { f <- c() p <- Primes(floor(sqrt(n))) d <- which(n %% p == 0) if (length(d) == 0) return(n) # n is prime for (q in p[d]) { while (n %% q == 0) { f <- c(f, q) n <- n/q } } if (n > 1) f <- c(f, n) } else { warning("Argument 'n' too big: use 'gmp::factorize()' for this.") f <- NA } return(f) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/divisors.R
#' Rescale positive and negative data, preserving sign information. #' #' Downsamples a matrix by a specified factor. #' @name downsample_genomic_matrix #' @keywords signed rescale positive negative matrix #' @import reshape2 foreach #' @importFrom OpenImageR down_sample_image #' @param whole_matrix A matrix to be downsampled, on a single chromosome #' @param downsamplefactor A factor by which to reduce the matrix. Must be something that both the row and columns can be divisible by. #' @param singlechromosome Single chromosome mode; Multi-chromosome not yet implemented (leave T) #' @return whole_matrix_dsamp A downsampled matrix. #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' downsample_genomic_matrix(whole_matrix=nbl_result_matrix_sign_small, #' downsamplefactor=5,singlechromosome=TRUE) #' @export #lines to disable spurrious CRAN notes below. i is obviously not global. globalVariables("i") dontCheck('i') downsample_genomic_matrix<-function(whole_matrix,downsamplefactor,singlechromosome=T) { i <- if(exists("i")){get("i")} else {NULL} if(singlechromosome) { if(nrow(whole_matrix)%%downsamplefactor==0 & ncol(whole_matrix)%%downsamplefactor==0 ) { whole_matrix_dsamp<-down_sample_image(whole_matrix,factor=downsamplefactor,gaussian_blur = T) } else {(return("downsample not a factor of rows and columns"))} #combine labels for the downsampled matrix (for factor 5, take five ranges and combine them into 1.) #essentially take the first part of the matrix (line 1 chr1_1235, then concatenate it to the final point in the range of the last bin). downsampled_colnames<-unlist(foreach(i=seq(from=1,to=ncol(whole_matrix),by=downsamplefactor)) %do% { paste0(paste(reshape2::colsplit(colnames(whole_matrix)[i],"_",names=c("chrom","start","end"))[,c(1,2)],collapse="_"),"_", reshape2::colsplit(colnames(whole_matrix)[i+downsamplefactor-1],"_",names=c("chrom","start","end"))[,c(3)]) }) downsampled_rownames<-unlist(foreach(i=seq(from=1,to=nrow(whole_matrix),by=downsamplefactor)) %do% { paste0(paste(reshape2::colsplit(rownames(whole_matrix)[i],"_",names=c("chrom","start","end"))[,c(1,2)],collapse="_"),"_", reshape2::colsplit(rownames(whole_matrix)[i+downsamplefactor-1],"_",names=c("chrom","start","end"))[,c(3)]) }) colnames(whole_matrix_dsamp)<-downsampled_colnames rownames(whole_matrix_dsamp)<-downsampled_rownames return(whole_matrix_dsamp) } if(!singlechromosome) { return("multi-chromosome not yet implemented") } } test_mat<-matrix(runif(81),nrow=9) colnames(test_mat)<-paste0("chr1_",1:ncol(test_mat)) rownames(test_mat)<-paste0("chr1_",1:nrow(test_mat)) downsample_genomic_matrix(whole_matrix=test_mat,downsamplefactor=3,singlechromosome=T)
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/downsample_genomic_matrix.R
#' Find the negative log p-value of a pair of vectors. #' #' Finds the negative log p-value of a matrix, if it exists. #' Checks first to see if there is a p-value to return. #' @keywords lm linear regression #' @param x a vector that is regressed in the fashion y~x. #' @param y a vector that is regressed in the fashion y~x. #' @param repval the replacement value if the regression cannot be performed, default 300 (the vectors are identical if this is used). #' @param lowrepval The low replacement value in the case that a regression p-value is undefined. #' @param signed change the sign of the negative log p-value based on the sign of beta? #' e.g. if the line has a negative slope, so will the returned value. #' If there is a positive slope, there will be a positive negative log p-value. #' if this option is disabled, then no sign changes will happen based on the sign of the slope. #' @return The negative log p-value or replacement value. #' @examples #' #small example #' xval<-c(1,1,1,1,1) #' yval<-c(1,2,3,4,5) #' a<-c(3,4,5,6,7) #' extractNegLogPval(x=xval,y=yval) #no possible p-value if one vector is constant. #' #Some edge cases this may not be correct (if the data lies near a constant), #' # but the indiviual sample data should reveal true trends. #' suppressWarnings(cor(xval,yval)) #you can't get a correlation value either. #' cor(a,a) #gives correlation of 1. #' extractNegLogPval(a,a) #' #gives replacement value. #' suppressWarnings(extractNegLogPval(x=a,y=yval)) #' #gives 107.3909 and warns about a nearly perfect fit. #' @export extractNegLogPval<-function(x,y,repval=300,lowrepval=0,signed=F) { if(all.equal(x,y)==T){neglogpval<-repval} else{ coef<-summary(lm(y~x))$coefficients if(nrow(coef)>=2&ncol(coef)>=4) { neglogpval<-(-log(coef[2,4])) if(signed){neglogpval<-sign(coef[2,1])*neglogpval} } else {neglogpval<-lowrepval} } return(neglogpval) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/extractNegLogPval.R
#' Form sample matrix from GDC copy number data files. #' #' Reads a GDC segmetnation files, adds sample information, and forms a data matrix of samples and bins of a specified size. #' @name formSampleMatrixFromRawGDCData #' @keywords segmentation GDC #' @import doParallel #' @importFrom data.table fread #' @importFrom reshape2 colsplit #' @importFrom tidyr drop_na unite #' @importFrom stringr str_detect #' @importFrom plyr ddply #' @importFrom dplyr select mutate everything #' @param tcga_files GDC files to be read #' @param format file format, TCGA or TARGET. #' @param binsize the binsize, in base pairs (default 1Mb or 1e6). This value provides a good balance of resolution and speed with memory sensitive applications. #' @param freadskip the number of lines to skip in the GDC files, typically 14 (the first 13 lines are metadata and the first is a blank line in NBL data). Adjust as needed. #' @param debug debug mode enable (allows specific breakpoints to be checked). #' @param chromosomes A vector of chromosomes to be used. Defaults to chr1-chrX, #' but others can be added e.g. chrY or chrM for Y chromosome or mitochondrial DNA. #' Format expected is a character vector, e.g. c("chr1", "chr2", "chr3"). #' @param sample_pat Pattern used to extract sample name from filename. #' Use "" to use the filename. #' @param sample_col The name of the sample column (for custom format input). #' @param chrlabel The name of the chromosome column (for custom format input). #' @param startlabel The name of the start column (for custom format input). #' @param endlabel The name of the end column (for custom format input). #' @return A dataframe containing the aggregated copy number values, #' based on the parameters provided. #' @examples #' #Pipeline examples would be too large to include in package checks. #' #please see browseVignettes("CNVScope") for a demonstration. #' #' @export globalVariables(c('begin','s',".","pos",'....relativeCvg','....sample','current_gr.....Segment_Mean','....uuid'),add=F) formSampleMatrixFromRawGDCData<-function(tcga_files=NULL,format="TARGET",binsize=1e6, freadskip=NULL, parallel = F,debug=F, chromosomes=paste0("chr",c(seq(1:22),"X"),"_"),sample_pat="", sample_col="sample",chrlabel=">chr", startlabel="begin", endlabel="end",cnlabel="log2") { if (!requireNamespace('BSgenome.Hsapiens.UCSC.hg19', quietly = TRUE)) { return("Please install BSgenome.Hsapiens.UCSC.hg19 to use this function") } #importFrom GenomicRanges tileGenome mcols #importFrom IRanges mergeByOverlaps IRanges #importFrom GenomeInfoDb seqinfo chr <- if(exists("chr")){get("chr")} else {NULL} cn <- if(exists("cn")){get("cn")} else {NULL} #chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") # TCGA_CNV_data_with_sample_info<-ldply(tcga_files, # function(x) {input_csv<-fread(x,skip=freadskip) # sample_info_colsplit<-reshape2::colsplit(basename(x),"_|-|\\.",c("pre","project","num","sample","comparison","fn_ext")) # input_csv_with_sample_info<-dplyr::bind_cols(input_csv,sample_info_colsplit[rep(1,nrow(input_csv)),]) # return(input_csv_with_sample_info) # } # ) #creates chromosomes object. This is necessary to clean out #non-biological chromosomes of the form chrUn_gl000211 if(format=="TARGET" | format=="TCGA"){ if(format=="TARGET") { if(is.null(freadskip)) {freadskip=14} TCGA_CNV_data_with_sample_info<-plyr::ldply(tcga_files,function(x) freadGDCfile(x,fread_skip=freadskip)) #TCGA_CNV_data_with_sample_info <- tcga_files %>% purrr::map_dfr(freadGDCfile, format = "TCGA",fread_skip=freadskip) #combines the files in a dataframe with ldply and a modified fread. } if(format=="TCGA"){ TCGA_CNV_data_with_sample_info<-plyr::ldply(tcga_files,freadGDCfile,format="TCGA") #TCGA format files are really simple to read, compared to TARGET files. #TCGA_CNV_data_with_sample_info$....sample<-tcga_files #TCGA_CNV_data_with_sample_info <- tcga_files %>% purrr::map_dfr(freadGDCfile, format = "TCGA",fread_skip=freadskip) #return(TCGA_CNV_data_with_sample_info) #combines the files in a dataframe with ldply and a modified fread. } #TCGA_CNV_data_with_sample_info_small<-ldply(tcga_files[1:100],freadGDCfile) #end testing TCGA_CNV_data<-TCGA_CNV_data_with_sample_info if(format=="TCGA"){ if(!(all(stringr::str_detect(TCGA_CNV_data$Chromosome,"chr")))){ TCGA_CNV_data$Chromosome<-paste0("chr",TCGA_CNV_data$Chromosome) #1->chr1, 9->chr9. } colnames(TCGA_CNV_data)<-gsub("Chromosome",">chr",gsub("End","end",gsub("Start","begin",colnames(TCGA_CNV_data)))) #adds >chr, begin, and end columns if they come in a different form. } if(debug){browser()} TCGA_CNV_data_range_filtered<-TCGA_CNV_data %>% tidyr::drop_na(begin,end) #drops those with a missing begin and end. TCGA_CNV_data_dt<-data.table::as.data.table(TCGA_CNV_data_range_filtered) #converts to data table. TCGA_CNV_data_gr<-GenomicRanges::GRanges(seqnames = TCGA_CNV_data_range_filtered$`>chr`,ranges = IRanges::IRanges(start = TCGA_CNV_data_range_filtered$begin,end = TCGA_CNV_data_range_filtered$end),... = TCGA_CNV_data_range_filtered[,4:ncol(TCGA_CNV_data_range_filtered)]) #creates GRanges object with other columns appended. These can be accessed using mcols() bins<-GenomicRanges::tileGenome(GenomeInfoDb::seqinfo(BSgenome.Hsapiens.UCSC.hg19::Hsapiens),tilewidth=binsize,cut.last.tile.in.chrom = T) #creates bins using the tileGenome function. if(debug){browser()} bins<-bins[as.character(bins@seqnames) %in% gsub("_","",chromosomes)] #removes Y and junk chromosomes. #modify chromosomes object at the top to add a Y if needed. #Y is best removed unless ALL of the participants are male, e.g. prostate cancer. rownames_gr = bins colnames_gr = bins #sets row and column bins if(format=="TARGET") {samples<-unique(GenomicRanges::mcols(TCGA_CNV_data_gr)$....sample)} if(format=="TCGA") { #samples <- dirname(tcga_files) %>% tibble::as.tibble() %>% tidyr::separate(value, sep="/",into=c("dir","uuid")) %>% dplyr::pull(uuid) #mcols(TCGA_CNV_data_gr)$sample<-samples #best not done here. samples<-unique(GenomicRanges::mcols(TCGA_CNV_data_gr)$....uuid) GenomicRanges::mcols(TCGA_CNV_data_gr)$....sample<-GenomicRanges::mcols(TCGA_CNV_data_gr)$....uuid #add in sample column, add in sample mcol } #gets samples, sets ....sample column options(scipen=999) bins_underscored<-GRanges_to_underscored_pos(bins) #convert bins to underscored strings. if(parallel){doParallel::registerDoParallel()} if(!parallel){foreach::registerDoSEQ()} if(format=="TARGET"){ TCGA_CNV_data_gr_all_comparisons<-TCGA_CNV_data_gr TCGA_CNV_data_gr_single_comparison<-TCGA_CNV_data_gr[mcols(TCGA_CNV_data_gr)$....comparison=="NormalVsPrimary"] TCGA_CNV_data_gr<-TCGA_CNV_data_gr_single_comparison } sample_aggregated_segvals<-foreach(s=1:length(samples),.combine="cbind",.errorhandling = "stop",.export=ls(),.packages=c("magrittr","GenomicRanges","plyr","CNVScope")) %dopar% { #browser() if(format=="TCGA"){ current_gr<-TCGA_CNV_data_gr[mcols(TCGA_CNV_data_gr)$....uuid %in% samples[s]] #grabs the GRanges object for the sth uuid. } if(format == "TARGET") { current_gr<-TCGA_CNV_data_gr[mcols(TCGA_CNV_data_gr)$....sample %in% samples[s]] #grabs the current GRanges object for TARGET data. } current_merged_df<-as.data.frame(IRanges::mergeByOverlaps(bins,current_gr)) #takes the range and merges it over the bins. current_merged_df$pos<-unlist( tidyr::unite( current_merged_df[,c("bins.seqnames","bins.start","bins.end")],pos ) ) #takes three columns of chr, start, end and converts it to "chr_1000_2000" format. #sort(table(current_merged_df$pos),decreasing=T) if(format=="TARGET") { current_merged_df_bins_vals<-current_merged_df[,c("pos","....relativeCvg","....sample")] #,"....comparison" #grabs the BIN position, copy number, and sample of the current merged DF. current_merged_df_bins_vals$....relativeCvg<-as.numeric(as.character(current_merged_df_bins_vals$....relativeCvg)) #converts relative coverage to a number. current_merged_df_bins_vals<-na.omit(current_merged_df_bins_vals) #removes NAs current_merged_df_bins_aggregated<-plyr::ddply(na.omit(current_merged_df_bins_vals),.(pos),plyr::summarise,meanrelcvg=mean(....relativeCvg),samples=paste0(unique(....sample),collapse=",")) #removes NAs from the merged by overlaps df, grabs the position, mean copy number, and concatenated samples for each bin like "NAKKEF,PALHRL,ABCABC". #note that it removes nonunique samples from this new string of samples for each bin. } if(format=="TCGA") { current_merged_df_bins_vals <- current_merged_df[,c("pos","current_gr.....Segment_Mean","....uuid")] %>% na.omit() %>% dplyr::mutate(current_gr.....Segment_Mean = current_gr.....Segment_Mean %>% as.character() %>% as.numeric()) #grabs position, CN number, sample ID #drops NAs #converts the CN from factor to character to numeric. current_merged_df_bins_aggregated<-plyr::ddply(na.omit(current_merged_df_bins_vals),.(pos),plyr::summarise,segmentmean=mean(current_gr.....Segment_Mean),samples=paste0(unique(....uuid),collapse=",")) #removes NAs from the merged by overlaps df, grabs the position, mean copy number, and concatenated samples for each bin like "NAKKEF,PALHRL,ABCABC". #note that it removes nonunique samples from this new string of samples for each bin. } #current_merged_df_bins_aggregated_test<-ddply(na.omit(current_merged_df_bins_vals[1,]),.(pos),summarise,meanrelcvg=mean(current_merged_df_bins_vals$....relativeCvg))#,samples=list(unique(current_merged_df_bins_vals$....sample)) # #insert bins that are not represented. unused_bins<-bins_underscored[!(bins_underscored %in% current_merged_df_bins_aggregated$pos)] unused_bins_rows<-as.data.frame(cbind(unused_bins,rep(0,length(unused_bins)),rep(samples[s],length(unused_bins)))) #sets the unused bins to have zero CN. if(format=="TARGET"){colnames(unused_bins_rows)<-c("pos","meanrelcvg","samples") unused_bins_rows$meanrelcvg<-as.numeric(as.character(unused_bins_rows$meanrelcvg)) #converts CN to numeric. current_merged_df_bins_aggregated_with_unused<-rbind(current_merged_df_bins_aggregated[,c("pos","meanrelcvg","samples")],unused_bins_rows[,c("pos","meanrelcvg","samples")]) #combines the empty bins with the rest. } if(format=="TCGA"){colnames(unused_bins_rows)<-c("pos","segmentmean","samples") unused_bins_rows$segmentmean<-as.numeric(as.character(unused_bins_rows$segmentmean)) #converts CN to numeric. current_merged_df_bins_aggregated_with_unused<-rbind(current_merged_df_bins_aggregated[,c("pos","segmentmean","samples")],unused_bins_rows[,c("pos","segmentmean","samples")]) #combines the empty bins with the rest. } current_merged_df_bins_aggregated_with_unused<-current_merged_df_bins_aggregated_with_unused[order(underscored_pos_to_GRanges(current_merged_df_bins_aggregated_with_unused$pos)),] #orders the combined bins once more. #end testing if(format=="TARGET"){ relcvg_df<-as.data.frame(current_merged_df_bins_aggregated_with_unused$meanrelcvg) rownames(relcvg_df)<-current_merged_df_bins_aggregated_with_unused$pos colnames(relcvg_df)<-samples[s] print(paste0(samples[s]," complete")) #prints completion. #takes this merged dataframe, for the given sample and returns it to the loop. return(relcvg_df) } if(format=="TCGA"){ #TCGA follows precisely the same procedure as target. #current_merged_df_bins_aggregated_with_unused[,cnlabel] segmentmean_df<-as.data.frame(current_merged_df_bins_aggregated_with_unused$segmentmean) rownames(segmentmean_df)<-current_merged_df_bins_aggregated_with_unused$pos colnames(segmentmean_df)<-samples[s] print(paste0(samples[s]," complete")) return(segmentmean_df) } } return(sample_aggregated_segvals) #each round of the loop gives each CN mean for each bin in a single sample #foreach combines the samples into a dataframe with cbind. #the column names are the samples and the rownames are the bin positions. } if(format=="custom") { #read in the files if(is.null(freadskip)) {freadskip=0} TCGA_CNV_data_with_sample_info<-plyr::ldply(tcga_files,function(x) freadGDCfile(x,format = "custom",fread_skip=freadskip,sample_pattern = sample_pat)) #ensure labels of chr, begin, end are correctly specified. # chrlabel=">chr" # startlabel="begin" # endlabel="end" if(chrlabel!="chr") { colnames(TCGA_CNV_data_with_sample_info)[which(colnames(TCGA_CNV_data_with_sample_info)==chrlabel)]<-"chr" } if(chrlabel!="start") { colnames(TCGA_CNV_data_with_sample_info)[which(colnames(TCGA_CNV_data_with_sample_info)==startlabel)]<-"start" } if(chrlabel!="end") { colnames(TCGA_CNV_data_with_sample_info)[which(colnames(TCGA_CNV_data_with_sample_info)==endlabel)]<-"end" } TCGA_CNV_data<-TCGA_CNV_data_with_sample_info #drop na for start,end. #reorder to have chr,start,end in the first three columns (so that columns 4 to ncol can be appended to the table) #convert to data table from tibble. TCGA_CNV_data_range_filtered<-TCGA_CNV_data %>% tidyr::drop_na(start,end) %>% dplyr::select(chr,start,end,sample,dplyr::everything()) %>% data.table::as.data.table() #creates GRanges object with other columns appended. These can be accessed using mcols() TCGA_CNV_data_gr<-GenomicRanges::GRanges(seqnames = TCGA_CNV_data_range_filtered$`chr`,ranges = IRanges::IRanges(start = TCGA_CNV_data_range_filtered$start,end = TCGA_CNV_data_range_filtered$end),... = TCGA_CNV_data_range_filtered[,4:ncol(TCGA_CNV_data_range_filtered)]) #creates bins using the tileGenome function. bins<-GenomicRanges::tileGenome( GenomeInfoDb::seqinfo(BSgenome.Hsapiens.UCSC.hg19::Hsapiens),tilewidth=binsize,cut.last.tile.in.chrom = T) #removes Y and junk chromosomes. #modify chromosomes object at the top to add a Y if needed. #Y is best removed unless ALL of the participants are male, e.g. prostate cancer. bins<-bins[as.character(bins@seqnames) %in% gsub("_","",chromosomes)] #sets row and column bins rownames_gr = bins colnames_gr = bins #Gets samples & sets sample column #Four dots are added because the GRanges constructor adds 4 dots at the beginning of imported #metadata columns. samples<-unique(GenomicRanges::mcols(TCGA_CNV_data_gr)[[paste0("....",sample_col)]]) #set decimal places to maximum to ensure that positions are not truncated. options(scipen=999) bins_underscored<-GRanges_to_underscored_pos(bins) #convert bins to underscored strings. #register parallel backend, if in parallel mode. if(parallel){doParallel::registerDoParallel()} #Deregister existing parallel backends if parallel flag set to false. if(!parallel){foreach::registerDoSEQ()} #be sure that your sample includes the comparison, if each sample has multiple comparison types #e.g. IF PALGGG has a NormalVsPrimary and another comparison, be sure to only include one or the #other. if(debug){browser()} sample_aggregated_segvals<-foreach(s=1:length(samples),.combine="cbind",.errorhandling = "stop",.export=ls(),.packages=c("magrittr","GenomicRanges","plyr","CNVScope")) %dopar% { #Grab the sth sample. current_gr<-TCGA_CNV_data_gr[mcols(TCGA_CNV_data_gr)[,paste0("....",sample_col),drop=T] %in% samples[s]] current_merged_df<-as.data.frame(IRanges::mergeByOverlaps(bins,current_gr)) #takes the range and merges it over the bins. current_merged_df$pos<-unlist( tidyr::unite( current_merged_df[,c("bins.seqnames","bins.start","bins.end")],pos ) ) #takes three columns of chr, start, end and converts it to "chr_1000_2000" format. current_merged_df_bins_vals <- current_merged_df[,c("pos",paste0("....",cnlabel),paste0("....",sample_col))] %>% na.omit() #grabs position, CN number, sample ID current_merged_df_bins_vals[,paste0("....",cnlabel)]<-as.numeric(as.character(current_merged_df_bins_vals[,paste0("....",cnlabel),drop=T])) #converts the CN from factor to character to numeric. current_merged_df_bins_vals<-na.omit(current_merged_df_bins_vals) #removes NAs # current_merged_df_bins_aggregated<-na.omit(plyr::ddply(current_merged_df_bins_vals,plyr::.(pos),summarise,cn=mean(current_merged_df_bins_vals[,paste0("....",cnlabel)]),samples=paste0(unique(current_merged_df_bins_vals[,paste0("....",sample_col)]),collapse=","))) cnlabel_with_dots<-sym(paste0("....",cnlabel)) sample_with_dots<-sym(paste0("....",sample_col)) sample_with_dots_q<-enquo(sample_with_dots) # current_merged_df_bins_aggregated<-na.omit(plyr::ddply(current_merged_df_bins_vals,plyr::.(pos),summarise,cn=mean(rlang::sym(paste0("....",cnlabel))),samples=paste0(unique(current_merged_df_bins_vals[,paste0("....",sample_col)]),collapse=","))) # current_merged_df_bins_aggregated<-plyr::ddply(na.omit(current_merged_df_bins_vals),.(pos),summarise,meanrelcvg=mean(....relativeCvg),samples=paste0(unique(....sample),collapse=",")) #removes NAs from the merged by overlaps df, grabs the position, mean copy number, and concatenated samples for each bin like "NAKKEF,PALHRL,ABCABC". #note that it removes nonunique samples from this new string of samples for each bin. current_merged_df_bins_aggregated <- current_merged_df_bins_vals %>% dplyr::group_by(pos,!! sample_with_dots_q) %>% dplyr::summarize(cn :=mean(!!as.name(paste0("....",cnlabel)))) %>% na.omit() %>% as.data.frame() #insert bins that are not represented. unused_bins<-bins_underscored[!(bins_underscored %in% current_merged_df_bins_aggregated$pos)] options(stringsAsFactors = F) unused_bins_rows<-as.data.frame(cbind(as.character(unused_bins), as.character(rep(samples[s],length(unused_bins))), rep(0,length(unused_bins)) )) #sets the unused bins to have zero CN. colnames(unused_bins_rows)<-c("pos","samples",paste0(cnlabel)) unused_bins_rows[,cnlabel]<-as.numeric(as.character(unused_bins_rows[,cnlabel])) #converts CN to numeric. colnames(current_merged_df_bins_aggregated)<-c("pos","samples",cnlabel) current_merged_df_bins_aggregated_with_unused<-rbind(current_merged_df_bins_aggregated[,c("pos","samples",cnlabel)],unused_bins_rows[,c("pos","samples",cnlabel)]) #combines the empty bins with the rest. current_merged_df_bins_aggregated_with_unused<-current_merged_df_bins_aggregated_with_unused[order(underscored_pos_to_GRanges(current_merged_df_bins_aggregated_with_unused$pos)),] #orders the combined bins once more. #current_merged_df_bins_aggregated_with_unused[,cnlabel] cn_df<-as.data.frame(current_merged_df_bins_aggregated_with_unused[,cnlabel]) rownames(cn_df)<-current_merged_df_bins_aggregated_with_unused$pos colnames(cn_df)<-samples[s] print(paste0(samples[s]," complete")) return(cn_df) } return(sample_aggregated_segvals) } }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/formSampleMatrixFromRawGDCData.R
#' Read GDC segmentation datafile for low-pass sequencing data. #' #' Reads a GDC segmetnation file and extract the segmetnation data. #' @keywords read file #' @import magrittr #' @importFrom data.table fread #' @importFrom reshape2 colsplit #' @importFrom dplyr bind_cols #' @param file GDC file to be read #' @param fread_skip The number of metadata lines to be skipped(typically 14) #' @param format The format of the files (TCGA,TARGET, or custom). #' @param CN_colname The name of the column containing the copy number values. #' @param sample_pattern Regex pattern to obtain the sample ID from the filename. #' @param sample_colname Alternatively, a column can be specified with the sample ID on each line. #' @references https://docs.gdc.cancer.gov/Encyclopedia/pages/TCGA_Barcode/ #' @return input_tsv_with_sample_info A data frame containing the sample information extracted #' from the filename, including sample name & comparison type. #' @examples #' freadGDCfile(file = #' system.file("extdata","somaticCnvSegmentsDiploidBeta_TARGET-30-PANRVJ_NormalVsPrimary.tsv", #' package = "CNVScope")) #' @export #utils::globalVariables(c('....uuid','barcode1','barcode2','current_gr.....Segment_Mean','fn', 'sep', 'uuid'), add=F) #global variables calls were put in to make it pass CRAN checks. Feel free to disable as needed. freadGDCfile<-function(file,fread_skip=NULL, format = "TARGET",CN_colname="log2", sample_pattern="[^_]+",sample_colname=NULL) { #importFrom tibble as.tibble fn <- if(exists("fn")){get("fn")} else {NULL} barcode1 <- if(exists("barcode1")){get("barcode1")} else {NULL} barcode2 <- if(exists("barcode2")){get("barcode2")} else {NULL} value <- if(exists("value")){get("value")} else {NULL} uuid <- if(exists("uuid")){get("uuid")} else {NULL} sep <- if(exists("sep")){get("sep")} else {NULL} if(format=="TARGET"){ if(is.null(fread_skip)){ fread_skip=14} input_tsv<-data.table::fread(file,skip=fread_skip) sample_info_colsplit<-reshape2::colsplit(basename(file),"_|-|\\.",c("pre","project","num","sample","comparison","fn_ext")) input_tsv_with_sample_info<-dplyr::bind_cols(input_tsv,sample_info_colsplit[rep(1,nrow(input_tsv)),]) if(length(na.omit(unlist(sample_info_colsplit)))!=6){return(NULL)} } if(format=="TCGA") { if(!requireNamespace("tibble",quietly = T)){return("tibble package is required for processing TCGA format files.")} if(is.null(fread_skip)){ fread_skip=0} input_tsv_with_sample_info<-data.table::fread(file,skip=fread_skip) %>% dplyr::mutate(fn = basename(file),sep="--") %>% tidyr::separate(fn,sep="---", into = c("barcode1","barcode2","dataformat")) %>% tidyr::separate(barcode1,sep="-|_", into = c("project1","tss1","participant1","sample1","portion_analyte1","plate1","center1","barcode_id1"),remove=FALSE) %>% tidyr::separate(barcode2,sep="-|_", into = c("project2","tss2","participant2","sample2","portion_analyte2","plate2","center2","barcode_id2"),remove=FALSE) %>% dplyr::mutate( uuid = dirname(file) %>% tibble::as.tibble() %>% tidyr::separate(value, sep="/",into=c("dir","uuid")) %>% dplyr::pull(uuid)) %>% dplyr::select( -sep) } if(format=="custom") { if(is.null(fread_skip)){ fread_skip=0} #read data in input_tsv_with_sample_info<-data.table::fread(file,skip=fread_skip) #get basename and extract sample information. if(is.null(sample_colname)) { input_tsv_with_sample_info$sample<-stringr::str_extract(string = basename(file), pattern=sample_pattern) } if(!is.null(sample_colname)) { input_tsv_with_sample_info$sample<-input_tsv_with_sample_info[,sample_colname] } input_tsv_with_sample_info$fn<-basename(file) } return(input_tsv_with_sample_info) } #for reference, see TCGA barcode documentation.
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/freadGDCfile.R
#' Get the genes in the genomic ranges indicated by the row and column labels. #' #' Gets the genes in the ranges within each cell of the matrix. #' @keywords genomic matrix #' @importFrom biomaRt useMart getBM #' @import foreach doParallel #' @param genomic_matrix A matrix with row and column names of the format chr1_100_200 (chr,start,end) #' @param prot_only Inlcude only the protein coding genes from ensembl? #' @param sequential Turn off parallelism with doParallel? #' @param flip_row_col Give column genes along the rows and row genes down columns? #' @return concatenated_gene_matrix A matrix with row and column genes #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' load(system.file("extdata","ensembl_gene_tx_table_prot.rda",package = "CNVScope")) #' load(system.file("extdata","grch37.rda",package = "CNVScope")) #' getAnnotationMatrix(genomic_matrix=nbl_result_matrix_sign_small[1:5,1:5],sequential=TRUE, #' prot_only=TRUE) #' @export getAnnotationMatrix<-function(genomic_matrix,prot_only=T,sequential=F,flip_row_col=F) { #importFrom GenomicRanges GRanges seqnames mcols #importFrom IRanges IRanges subsetByOverlaps i <- if(exists("i")){get("i")} else {NULL} if(!exists("grch37")){ grch37 = biomaRt::useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl") } if(!exists("ensembl_gene_tx_table_prot") & prot_only==TRUE) { if((!exists("ensembl_gene_tx_table") | !exists("ensembl_gene_tx_table$gene_biotype") )) { ensembl_gene_tx_table <- biomaRt::getBM(attributes = c("ensembl_gene_id", "ensembl_transcript_id","chromosome_name","transcript_start","transcript_end","start_position","end_position", "strand", "percentage_gene_gc_content","external_gene_name","gene_biotype"), mart = grch37) ensembl_gene_gr<-GenomicRanges::GRanges(seqnames = paste0("chr",ensembl_gene_tx_table$chromosome_name),ranges = IRanges::IRanges(start = ensembl_gene_tx_table$start_position,end=ensembl_gene_tx_table$end_position),strand = ensembl_gene_tx_table$strand,...=ensembl_gene_tx_table) } ensembl_gene_tx_table_prot<-ensembl_gene_tx_table[ensembl_gene_tx_table$gene_biotype=="protein_coding",] } ensembl_gene_gr_prot<-GenomicRanges::GRanges(seqnames = paste0("chr",ensembl_gene_tx_table_prot$chromosome_name), ranges = IRanges::IRanges(start = ensembl_gene_tx_table_prot$start_position, end=ensembl_gene_tx_table_prot$end_position), strand = ensembl_gene_tx_table_prot$strand, ...=ensembl_gene_tx_table_prot) rownames_gr_genomic_matrix<-underscored_pos_to_GRanges(rownames(genomic_matrix)) colnames_gr_genomic_matrix<-underscored_pos_to_GRanges(colnames(genomic_matrix)) if(sequential){foreach::registerDoSEQ()} else {registerDoParallel()} if(prot_only) { print("prot_only") row_gene_strings_genomic_matrix<-foreach(i=1:length(rownames_gr_genomic_matrix),.inorder=T) %dopar% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr_prot,rownames_gr_genomic_matrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } col_gene_strings_genomic_matrix<-foreach(i=1:length(colnames_gr_genomic_matrix),.inorder=T) %dopar% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr_prot,colnames_gr_genomic_matrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } } else { print("all_genes") row_gene_strings_genomic_matrix<-foreach(i=1:length(rownames_gr_genomic_matrix),.inorder=T) %dopar% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr,rownames_gr_genomic_matrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } col_gene_strings_genomic_matrix<-foreach(i=1:length(colnames_gr_genomic_matrix),.inorder=T) %dopar% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr,colnames_gr_genomic_matrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } } if(!flip_row_col) { col_gene_strings_matrix_genomic_matrix_alt<-matrix(rep((unlist(col_gene_strings_genomic_matrix)),nrow(genomic_matrix)),ncol=ncol(genomic_matrix),nrow=nrow(genomic_matrix),byrow = T) row_gene_strings_matrix_genomic_matrix<-matrix(rep(unlist(row_gene_strings_genomic_matrix),ncol(genomic_matrix)),ncol=ncol(genomic_matrix),nrow=nrow(genomic_matrix)) #essential } if(flip_row_col) { row_gene_strings_matrix_genomic_matrix<-matrix(rep((unlist(col_gene_strings_genomic_matrix)),nrow(genomic_matrix)),ncol=ncol(genomic_matrix),nrow=nrow(genomic_matrix),byrow = T) col_gene_strings_matrix_genomic_matrix_alt<-matrix(rep(unlist(row_gene_strings_genomic_matrix),ncol(genomic_matrix)),ncol=ncol(genomic_matrix),nrow=nrow(genomic_matrix)) #essential } concatenated_gene_matrix<-matrix( paste0("row_genes:",row_gene_strings_matrix_genomic_matrix ,"\ncol genes:",col_gene_strings_matrix_genomic_matrix_alt,"\noriginal value:",as.matrix(genomic_matrix)) ,ncol=ncol(col_gene_strings_matrix_genomic_matrix_alt), nrow=nrow(row_gene_strings_matrix_genomic_matrix)) return(concatenated_gene_matrix) } #genomic_matrix=genomic_matrix #prot_only=T #sequential=F # subset(expressiondata,strsplit(col_gene_strings_matrix_genomic_matrix_alt[1],split=" ")[[1]] # expression_data<-data.table::fread(paste0(groupdir,"dalgleishjl/hicnv/cor_OS_discovery_exp-cgh.txt")) #i<-1 #col_gene_strings_matrix_genomic_matrix_alt_split<-sapply(col_gene_strings_matrix_genomic_matrix_alt,function(x) strsplit(x,split=" ")) #single_point_expression<-expression_data[expression_data$gene %in% unlist(col_gene_strings_matrix_genomic_matrix_alt_split[i]) & !(expression_data$chrom %in% c("chrY,chrM")),]
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/getAnnotationMatrix.R
#' Get Block Indices from an asymmetric (or symmetric) matrix. #' #' This function segments a matrix, including asymmetric matrices using multiple imputation (MI) techniques and a segmentation algorithm to generate breakpoints for column and row. #' #' @keywords HiCseg MI multiple imputation Hi-C CNV breakpoints jointseg #' @param genomicmatrix the large, whole matrix from which blocks are taken #' @param algorithm Algorithm to be used: HiCseg or jointSeg. #' @param nb_change_max the maximal number of changepoints, passed to HiCseg (if this algorithm is used). Note: HiCseg doesn't actually obey this limit. Rather, use it as a parameter to increase/decrease segmentation extent. #' @param distrib Passed to Hicseg_linkC_R, from their documentation: Distribution of the data: "B" is for Negative Binomial distribution, "P" is for the Poisson distribution and "G" is for the Gaussian distribution." #' @param model Passed on to HiCseg_linkC_R: "Type of model: "D" for block-diagonal and "Dplus" for the extended block-diagonal model." #' @param MI_strategy strategy to make the matrix temporarily symmetric. "average" adds a number of values equal to the average of the matrix, while copy copies part of the matrix to the shorter side, making a square matrix. #' @param transpose transpose the matrix and output the breakpoints? Some segmentation algorithms (e.g. HiCseg) produces different results when used against the transposed version of the matrix, as it expects symmetry. This allows the output of additional breakpoints Users can choose to take intersect() or union() on the results to get conserved changepoints or additional changepoints, depending on need. #' @return An output list of the following: #' @return breakpoints_col A vector of breakpoints for the columns. #' @return breakpoints_row A vector of breakpoints for the rows. #' @return breakpoints_col A vector of breakpoints for columns on the transposed genomic matrix. #' @return breakpoints_row A vector of breakpoints for the rows on the transposed genomic matrix. #' @examples #' #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' submatrix_tiny<-nbl_result_matrix_sign_small #' tiny_test<-getAsymmetricBlockIndices(submatrix_tiny,nb_change_max=10,algorithm="jointSeg") #' \dontrun{ #' submatrix_wide<-submatrix_tiny[1:5,] #' submatrix_narrow<-submatrix_tiny[,1:5] #' wide_test<-getAsymmetricBlockIndices(submatrix_wide,distrib = "G",model = "Dplus", #' nb_change_max = 1e4) #' #the below work, but the time to run all of these would be greater than 10 seconds.. #' random_wide<-matrix(runif(n = 400*200),ncol=400,nrow=200) #' random_narrow<-matrix(runif(n = 400*200),ncol=200,nrow=400) #' random_wide_test_avg<-getAsymmetricBlockIndices(random_wide, #' distrib = "G",model = "Dplus",nb_change_max = 1e4) #' random_narrow_test_avg<-getAsymmetricBlockIndices(random_narrow, #' distrib = "G",model = "Dplus",nb_change_max = 1e4) #' random_wide_test_copy<-getAsymmetricBlockIndices(random_wide, #' distrib = "G",model = "Dplus",nb_change_max = 1e4,MI_strategy = "copy") #' random_narrow_test_copy<-getAsymmetricBlockIndices(random_narrow, #' distrib = "G",model = "Dplus",nb_change_max = 1e4,MI_strategy = "copy") #' genomicmatrix=random_narrow #' nb_change_max=100 #' model = "D" #' distrib = "G" #' MI_strategy="copy" #' #question-- does it pick different breakpoints if transposed first? #' #Answer: yes, at least in Dplus model. #' rm(genomicmatrix) #' rm(model) #' rm(distrib) #' rm(MI_strategy) #' random_wide_test_copy<-getAsymmetricBlockIndices(genomicmatrix = random_wide, #' distrib = "G", #' model = "Dplus",nb_change_max = 1e2,MI_strategy = "copy") #' random_narrow_test_copy<-getAsymmetricBlockIndices(random_narrow,distrib = "G", #' model = "Dplus", #' nb_change_max = 1e2,MI_strategy = "copy") #' random_wide_test_copy_t<-getAsymmetricBlockIndices(genomicmatrix = t(random_wide), #' distrib = "G",model = "Dplus", #' nb_change_max = 1e2,MI_strategy = "copy") #' random_narrow_test_copy_t<-getAsymmetricBlockIndices(genomicmatrix = t(random_narrow), #' distrib = "G",model = "Dplus", #' nb_change_max = 1e2,MI_strategy = "copy") #' length(intersect(random_wide_test_copy$breakpoints_col, #' random_wide_test_copy_t$breakpoints_row))/length(unique(c(random_wide_test_copy$breakpoints_col, #' random_wide_test_copy_t$breakpoints_row))) #' random_wide_test_copy_with_transpose<-getAsymmetricBlockIndices(genomicmatrix = random_wide, #' distrib = "G",model = "Dplus",nb_change_max = 1e2,MI_strategy = "copy",transpose = T) #' random_narrow_test_copy_with_transpose<-getAsymmetricBlockIndices(genomicmatrix = random_narrow, #' distrib = "G",model = "Dplus",nb_change_max = 1e2,MI_strategy = "copy",transpose = T) #passes tests #' random_narrow_test_copy_with_transpose<-getAsymmetricBlockIndices(genomicmatrix = random_narrow, #' distrib = "G",model = "Dplus",nb_change_max = 1e2,MI_strategy = "copy",transpose = T) #' conserved_breakpoints_col<-intersect(random_narrow_test_copy_with_transpose$breakpoints_col, #' random_narrow_test_copy_with_transpose$t_breakpoints_row) #' conserved_breakpoints_row<-intersect(random_narrow_test_copy_with_transpose$breakpoints_row, #' random_narrow_test_copy_with_transpose$t_breakpoints_col) #' random_wide_test_copy_with_transpose<-getAsymmetricBlockIndices(genomicmatrix = random_wide, #' distrib = "G",model = "Dplus",nb_change_max = 1e2,MI_strategy = "copy",transpose = T) #' conserved_breakpoints_col<-intersect(random_wide_test_copy_with_transpose$breakpoints_col, #' random_wide_test_copy_with_transpose$t_breakpoints_row) #' conserved_breakpoints_row<-intersect(random_wide_test_copy_with_transpose$breakpoints_row, #' random_wide_test_copy_with_transpose$t_breakpoints_col) #' } #' @export getAsymmetricBlockIndices<-function(genomicmatrix=NULL,algorithm="HiCseg",nb_change_max=100,distrib = "G",model = "D",MI_strategy="average",transpose=T) { # if(Sys.info()["sysname"]=="Darwin"){algorithm="jointSeg"} if(algorithm=="jointSeg"){ breakpoints_col<-jointseg::jointSeg(genomicmatrix,K=nb_change_max)$bestBkp breakpoints_row<-jointseg::jointSeg(genomicmatrix,K=nb_change_max)$bestBkp if(transpose) { t_breakpoints_col<-jointseg::jointSeg(t(genomicmatrix),K=nb_change_max)$bestBkp t_breakpoints_row<-jointseg::jointSeg(t(genomicmatrix),K=nb_change_max)$bestBkp output_list<-list(breakpoints_col,breakpoints_row,t_breakpoints_col,t_breakpoints_row) names(output_list)<-c("breakpoints_col","breakpoints_row","t_breakpoints_col","t_breakpoints_row") } return(output_list)} if(nrow(genomicmatrix)==ncol(genomicmatrix)) { hicsegresults<-HiCseg::HiCseg_linkC_R(size_mat=dim(genomicmatrix)[1],nb_change_max = nb_change_max,distrib = distrib,mat_data = as.matrix(genomicmatrix),model = model) indices<-c(hicsegresults$t_hat) zerosremoved<-indices[!indices == 0] #truncating the block indicies for the shorter dimension #returning the row and column breakpoints (instead of just one set of them, as HiCseg typically does). return(zerosremoved) } else { #extending the matrix for rows if(ncol(genomicmatrix) > nrow(genomicmatrix)) { if(MI_strategy=="average") { extended_matrix<-rbind(genomicmatrix,rep(rep(mean(as.numeric(unlist(genomicmatrix)))),ncol(genomicmatrix)),c(ncol(genomicmatrix)-nrow(genomicmatrix))) } if(MI_strategy=="copy") { extended_matrix<-rbind(genomicmatrix,genomicmatrix[1:(ncol(genomicmatrix)-nrow(genomicmatrix)),])} if(transpose){ if(MI_strategy=="average") { t_extended_matrix<-cbind(t(genomicmatrix), matrix(rep(rep(mean(as.numeric(unlist(t(genomicmatrix)))),ncol(t(genomicmatrix))),c(nrow(t(genomicmatrix))-ncol(t(genomicmatrix)))),nrow = nrow(t(genomicmatrix)),ncol=c(nrow(t(genomicmatrix))-ncol(t(genomicmatrix))) )) } if(MI_strategy=="copy") { t_extended_matrix<-cbind(t(genomicmatrix),t(genomicmatrix)[,1:(nrow(t(genomicmatrix))-ncol(t(genomicmatrix)))]) } } } if(ncol(genomicmatrix) < nrow(genomicmatrix)) { if(MI_strategy=="average") { extended_matrix<-cbind(genomicmatrix,rep(rep(mean(as.numeric(unlist(genomicmatrix)))),nrow(genomicmatrix)),c(nrow(genomicmatrix)-ncol(genomicmatrix))) } if(MI_strategy=="copy") { extended_matrix<-cbind(genomicmatrix,genomicmatrix[,1:(nrow(genomicmatrix)-ncol(genomicmatrix))]) } if(transpose){ if(MI_strategy=="average") { t_extended_matrix<-rbind(t(genomicmatrix),matrix(rep( rep(mean(as.numeric(unlist(t(genomicmatrix)))) #a single mean ,nrow(t(genomicmatrix))) #a single row ,(ncol(t(genomicmatrix))-nrow(t(genomicmatrix)))),ncol=ncol(t(genomicmatrix)),nrow=(ncol(t(genomicmatrix))-nrow(t(genomicmatrix))))) } #the missing piece if(MI_strategy=="copy") { t_extended_matrix<-rbind(t(genomicmatrix),t(genomicmatrix)[1:(ncol(t(genomicmatrix))-nrow(t(genomicmatrix))),]) } } } if(transpose){ t_hicsegresults<-HiCseg::HiCseg_linkC_R(size_mat=dim(t_extended_matrix)[1],nb_change_max = nb_change_max,distrib = distrib,mat_data = as.matrix(t_extended_matrix),model = model) t_indices<-c(t_hicsegresults$t_hat) t_zerosremoved<-t_indices[!t_indices == 0] t_breakpoints_col<-t_zerosremoved[t_zerosremoved < ncol(t(genomicmatrix))] t_breakpoints_row<-t_zerosremoved[t_zerosremoved < nrow(t(genomicmatrix))] } hicsegresults<-HiCseg::HiCseg_linkC_R(size_mat=dim(extended_matrix)[1],nb_change_max = nb_change_max,distrib = distrib,mat_data = as.matrix(extended_matrix),model = model) } indices<-c(hicsegresults$t_hat) zerosremoved<-indices[!indices == 0] #truncating the block indicies for the shorter dimension breakpoints_col<-zerosremoved[zerosremoved < ncol(genomicmatrix)] breakpoints_row<-zerosremoved[zerosremoved < nrow(genomicmatrix)] #returning the row and column breakpoints (instead of just one set of them, as HiCseg typically does). output_list<-list(breakpoints_col,breakpoints_row) #it's computationally more efficient to return these as a pair as they will almost always be needed in pairs. names(output_list)<-c("breakpoints_col","breakpoints_row") if(transpose) {output_list<-list(breakpoints_col,breakpoints_row,t_breakpoints_col,t_breakpoints_row) output_list<-list(breakpoints_col,breakpoints_row,t_breakpoints_col,t_breakpoints_row) names(output_list)<-c("breakpoints_col","breakpoints_row","t_breakpoints_col","t_breakpoints_row") } #genomicmatrix<-t(genomicmatrix) return(output_list) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/getAsymmetricBlockIndices.R
#' Calculate block averages and areas in a matrix given breakpoints. #' #' This function produces several matrix outputs of averages and areas of matrix blocks, given a pair of vectors for breakpoints. #' @name getBlockAverageMatrixFromBreakpoints #' @keywords CNV kernel probability distribution concordance fast #' @import foreach doParallel #' @importFrom jointseg jointSeg #' @param whole_matrix the large, whole matrix from which blocks are taken #' @param breakpoints_col An integer list of column breakpoints, including 1 and the number of columns in the whole matrix. #' @param breakpoints_row An integer list of row breakpoints, including 1 and the number of rows in the whole matrix. #' @param outputs A list of the following possible outputs (default all): "blockaverages_reformatted_by_index","blockaverages_reformatted_by_label","blockaverages_matrix_idx_area","blockaverages_matrix_idx_avg","blockaverages_matrix_label_avg", or "blockaverages_matrix_label_area" #' @return An output list of the following: #' @return blockaverages_reformatted_by_index a matrix of the block averages and areas, in long format, with indexes used to generate the averages. #' @return blockaverages_reformatted_by_label a matrix of the block averages and areas, in long format, with labels of the indexes used to generate the averages. #' @return blockaverages_matrix_idx_area a matrix of the block areas, with indexes based on the original row/col index used to generate the data. #' @return blockaverages_matrix_idx_avg a matrix of the block averages, with indexes based on the original row/col index used to generate the data. #' @return blockaverages_matrix_label_area a matrix of the block areas, with indexes based on the original row/col label used to generate the data. #' @return blockaverages_matrix_label_avg a matrix of the block averages, with indexes based on the original row/col label used to generate the data. #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' set.seed(303) #' mat<-matrix(data=runif(n = 25),nrow=5,ncol=5,dimnames = list(c("chr1_0_5000", #' "chr1_5000_10000","chr1_10000_15000","chr1_15000_20000","chr1_20000_25000"), #' c("chr1_0_5000","chr1_5000_10000","chr1_10000_15000","chr1_15000_20000","chr1_20000_25000"))) #' breakpoints_col<-c(1,2,4,5) #' breakpoints_row<-c(1,2,4,5) #' foreach::registerDoSEQ() #' getBlockAverageMatrixFromBreakpoints(whole_matrix=mat,breakpoints_col=breakpoints_col, #' breakpoints_row=breakpoints_row) #' \dontrun{ #extra examples #' mat<-matrix(data=round(runif(min = 0,max=100,n = 25)),nrow=5,ncol=5, #' dimnames = list(c("chr1_0_5000","chr1_5000_10000","chr1_10000_15000","chr1_15000_20000", #' "chr1_20000_25000"),c("chr2_0_50000","chr2_50000_100000", #' "chr2_100000_150000","chr2_150000_200000","chr2_200000_250000"))) #' breakpoints_col<-c(1,2,4,5) #' breakpoints_row<-c(1,2,4,5) #' avg_results<-getBlockAverageMatrixFromBreakpoints(whole_matrix=mat, #' breakpoints_col=breakpoints_col,breakpoints_row=breakpoints_row) #' avg_results$blockaverages_reformatted_by_label #' avg_results$blockaverages_reformatted_by_index #' whole_matrix=mat #' mat<-matrix(data=round(runif(min = 0,max=100,n = 25)),nrow=5,ncol=5, #' dimnames = list(c("chr1_0_5000","chr1_5000_10000","chr1_10000_15000", #' "chr1_15000_20000","chr1_20000_25000"),c("chr2_0_50000", #' "chr2_50000_100000","chr2_100000_150000", #' "chr2_150000_200000","chr2_200000_250000"))) #' breakpoints_col<-c(1,2,4,5) #' breakpoints_row<-c(1,2,4,5) #' avg_results<-getBlockAverageMatrixFromBreakpoints(whole_matrix=mat, #' breakpoints_col=breakpoints_col,breakpoints_row=breakpoints_row) #' avg_results$blockaverages_reformatted_by_label #' avg_results$blockaverages_reformatted_by_index #' whole_matrix=mat #' submatrix<-nbl_result_matrix_sign_small #' breakpoints_row_jointseg<-jointseg::jointSeg(submatrix,K=5)$bestBkp #' breakpoints_col_jointseg<-jointseg::jointSeg(t(submatrix),K=5)$bestBkp #' submatrix_avg_results<-getBlockAverageMatrixFromBreakpoints(whole_matrix=submatrix, #' breakpoints_col=breakpoints_col_jointseg,breakpoints_row=breakpoints_row_jointseg) #' } #' @export globalVariables(c("j")) getBlockAverageMatrixFromBreakpoints<-function(whole_matrix,breakpoints_col,breakpoints_row,outputs=c("blockaverages_reformatted_by_index","blockaverages_reformatted_by_label","blockaverages_matrix_idx_area","blockaverages_matrix_idx_avg","blockaverages_matrix_label_avg","blockaverages_matrix_label_area")) { i <- if(exists("i")){get("i")} else {NULL} breakpoints_col<-as.integer(unique( gsub("^0$",1,c(0,breakpoints_col,ncol(whole_matrix))) )) breakpoints_row<-as.integer(unique( gsub("^0$",1,c(0,breakpoints_row,nrow(whole_matrix))) )) blockaverages<-foreach(j=1:(length(breakpoints_col)-1),.combine="rbind",.inorder=T) %do% { foreach(i=1:(length(breakpoints_row)-1),.combine="rbind",.inorder=T) %dopar% { #whole_matrix[i,j] print(paste0("i",i,"j",j)) t(as.data.frame(c( breakpoints_row[i],breakpoints_col[j],breakpoints_row[i+1],breakpoints_col[j+1],mean(as.numeric(unlist(whole_matrix[breakpoints_row[i]:breakpoints_row[i+1],breakpoints_col[j]:breakpoints_col[j+1]]))), as.numeric(abs(breakpoints_row[i]-breakpoints_row[i+1])*abs(breakpoints_col[j]-breakpoints_col[j+1])) ))) } } #blockaverages_reformatted<-matrix(as.numeric(blockaverages),ncol=3,nrow=((length(rowindices_subset)-1)*3)) colnames(blockaverages)<-c("rowstart","colstart","rowend","colend","average","area") rownames(blockaverages)<-NULL #blockaverages[blockaverages[,"area"]==0,]<-NULL blockaverages_reformatted_by_index<-blockaverages[(blockaverages[,"area"]!=0),] blockaverages_reformatted_by_label<-foreach(i=1:nrow(blockaverages_reformatted_by_index),.combine="rbind") %do% { if(nrow(blockaverages_reformatted_by_index)!=i) {outputline<-c( colnames(whole_matrix)[as.integer(gsub(0,1,blockaverages[i,1]))],rownames(whole_matrix)[as.integer(gsub(0,1,blockaverages[i,2]))], colnames(whole_matrix)[as.integer(gsub(0,1,blockaverages[i+1,1]))],rownames(whole_matrix)[as.integer(gsub(0,1,(blockaverages[i+1,2])))],blockaverages_reformatted_by_index[i,3],blockaverages_reformatted_by_index[i,"area"])} } colnames(blockaverages_reformatted_by_label)<-c("colstart","rowstart","colend","rowend","average","area") #still need to convert this into a matrix blockaverages_reformatted_by_index_df<-as.data.frame(blockaverages_reformatted_by_index) blockaverages_reformatted_by_label_df<-as.data.frame(blockaverages_reformatted_by_label) blockaverages_matrix_idx_avg<-dcast(blockaverages_reformatted_by_index_df,rowstart ~ colstart,value.var = "average") blockaverages_matrix_idx_area<-dcast(blockaverages_reformatted_by_index_df,rowstart ~ colstart,value.var = "area") blockaverages_matrix_label_avg<-blockaverages_matrix_idx_avg blockaverages_matrix_label_area<-blockaverages_matrix_idx_area #breakpoints_col_zero_corr<-gsub(0,1,breakpoints_col) #breakpoints_row_zero_corr<-gsub(0,1,breakpoints_row) colnames(blockaverages_matrix_label_avg)[2:length(colnames(blockaverages_matrix_label_avg))]<-colnames(whole_matrix)[as.integer(colnames(blockaverages_matrix_label_avg)[2:length(colnames(blockaverages_matrix_label_avg))])] rownames(blockaverages_matrix_label_avg)<-rownames(whole_matrix)[as.integer(rownames(blockaverages_matrix_label_avg))] colnames(blockaverages_matrix_label_area)[2:length(colnames(blockaverages_matrix_label_area))]<-colnames(whole_matrix)[as.integer(colnames(blockaverages_matrix_label_area)[2:length(colnames(blockaverages_matrix_label_area))])] rownames(blockaverages_matrix_label_area)<-rownames(whole_matrix)[as.integer(rownames(blockaverages_matrix_label_area))] # blockaverages_matrix_label<-dcast(blockaverages_reformatted_by_label_df[,c("rowstart","colstart","average")],rowstart ~ colstart,value.var = "average",fun.aggregate = mean) outputlist<-list(blockaverages_reformatted_by_index,blockaverages_reformatted_by_label,blockaverages_matrix_idx_area,blockaverages_matrix_idx_avg,blockaverages_matrix_label_avg,blockaverages_matrix_label_area) names(outputlist)<-c("blockaverages_reformatted_by_index","blockaverages_reformatted_by_label","blockaverages_matrix_idx_area","blockaverages_matrix_idx_avg","blockaverages_matrix_label_avg","blockaverages_matrix_label_area") return(outputlist) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/getBlockAverageMatrixFromBreakpoints.R
#' Calculate several base statistics for color rescaling. #' #' calculates several statistics from a large matrix that can then be applied to smaller submatrices without needing to load the entire matrix into memmory #' @keywords rescale color stats #' @import stats #' @param whole_matrix the whole matrix to get stats for. #' @param saveToDisk Save the statistics to disk as an RDS file in the local directory? #' @param output_fn the name of the output file. #' @return A list of the output statistics, including: #' the global min, max, length, sigma (matrix variance), pos_sigma (variance of the positive values), neg_sigma(variance of the negative values), global mean (global_mu), #' est_max_cap (global_mu+global_sigma_pos*2), as well as the number of rows and columns of the matrix. #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' getGlobalRescalingStats(nbl_result_matrix_sign_small) #' @export getGlobalRescalingStats<-function(whole_matrix,saveToDisk=F,output_fn=NULL) { if(is.null(output_fn)){output_fn<-"whole_matrix_stats.rds"} whole_matrix<-as.matrix(whole_matrix) global_max<-max(whole_matrix) global_min<-min(whole_matrix) global_length<-length(whole_matrix) global_sigma<-sqrt(var(as.numeric(whole_matrix))) global_sigma_pos<-sqrt(var(whole_matrix[whole_matrix>0])) if(length(whole_matrix[whole_matrix<0])==0) { global_sigma_neg=NULL } else { global_sigma_neg<-sqrt(var(whole_matrix[whole_matrix<0])) } global_mu<-mean(whole_matrix) est_max_cap<-global_mu+global_sigma_pos*2 nrow_mat<-nrow(whole_matrix) ncol_mat<-ncol(whole_matrix) output<-list(global_max=global_max,global_min=global_min,global_sigma=global_sigma,global_sigma_pos=global_sigma_pos,global_sigma_neg=global_sigma_neg,global_mu=global_mu,est_max_cap=est_max_cap,nrow_mat=nrow_mat,ncol_mat=ncol_mat,global_length=global_length) if(saveToDisk){saveRDS(object = "output",file=paste0(output_fn))} return(output) } #global_stats<-getGlobalRescalingStats(all_conc_cleaned_common_coords_linreg)
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/getGlobalRescalingStats.R
#' Create an HTML widget for use in shiny or webshot for a given pair of chromosomes. #' #' This function requires a matrix with genomic coordinates in the row and column names, and produces a heatmap with a tooltip #' @name getInterchromosomalInteractivePlot #' @keywords CNV heatmap HTML widget data.table readr #' @importFrom biomaRt getBM useMart #' @importFrom ggplot2 scale_fill_gradient2 #' @param whole_matrix the large, whole genomic matrix from which the submatrix is taken (rows) #' @param chrom1 The first chromsome used for the map (columns). #' @param chrom2 The second chromsome used for a map axis. #' @return An HTML widget. #' @examples #' \dontrun{ #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' getInterchromosomalInteractivePlot(whole_matrix=nbl_result_matrix_sign_small,chrom1=1, #' chrom2=1) #' } #' @export globalVariables(c('chromosomes')) getInterchromosomalInteractivePlot<-function(whole_matrix,chrom1,chrom2) { #importFrom GenomicRanges GRanges seqnames mcols #importFrom IRanges subsetByOverlaps #importFrom heatmaply heatmaply i <- if(exists("i")){get("i")} else {NULL} #if(rownames(whole_matrix)==colnames(whole_matrix)) chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") submatrix<-whole_matrix[grep(chromosomes[chrom1],rownames(whole_matrix)),grep(chromosomes[chrom2],colnames(whole_matrix))] grch37 = biomaRt::useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl") ensembl_gene_tx_table <- biomaRt::getBM(attributes = c("ensembl_gene_id", "ensembl_transcript_id","chromosome_name","transcript_start","transcript_end","start_position","end_position", "strand", "percentage_gene_gc_content","external_gene_name"), # filters = "ensembl_transcript_id", values = "ENST00000296026", mart = grch37) ensembl_gene_gr<-GenomicRanges::GRanges(seqnames = paste0("chr",ensembl_gene_tx_table$chromosome_name),ranges = IRanges::IRanges(start = ensembl_gene_tx_table$start_position,end=ensembl_gene_tx_table$end_position),strand = ensembl_gene_tx_table$strand,...=ensembl_gene_tx_table) if(substr(chrom1,start = nchar(chrom1),stop = nchar(chrom1))!="_"){chrom1<-paste0(chrom1,"_")} if(substr(chrom2,start = nchar(chrom2),stop = nchar(chrom2))!="_"){chrom1<-paste0(chrom2,"_")} rownames_gr_submatrix<-underscored_pos_to_GRanges(rownames(submatrix)) colnames_gr_submatrix<-underscored_pos_to_GRanges(colnames(submatrix)) row_gene_strings_submatrix<-foreach(i=1:length(rownames_gr_submatrix),.inorder=T) %do% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr,rownames_gr_submatrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } col_gene_strings_submatrix<-foreach(i=1:length(colnames_gr_submatrix),.inorder=T) %do% { print(i) outputstring<-paste( unique(gsub("\\..*[[:space:]]","",unique(GenomicRanges::mcols(IRanges::subsetByOverlaps(ensembl_gene_gr,colnames_gr_submatrix[i]))$....external_gene_name))) ,collapse=" ") if(is.null(outputstring) | anyNA(outputstring) | length(outputstring)==0) {outputstring<-""} outputstring } #col_gene_strings_matrix_submatrix<-matrix(rep(unlist(col_gene_strings_submatrix),nrow(submatrix)),ncol=nrow(submatrix),nrow=ncol(submatrix)) #col_gene_strings_matrix_submatrix_transposed<-t(col_gene_strings_matrix_submatrix) col_gene_strings_matrix_submatrix_alt<-matrix(rep((unlist(col_gene_strings_submatrix)),nrow(submatrix)),ncol=ncol(submatrix),nrow=nrow(submatrix),byrow = T) #necessary row_gene_strings_matrix_submatrix<-matrix(rep(unlist(row_gene_strings_submatrix),ncol(submatrix)),ncol=ncol(submatrix),nrow=nrow(submatrix)) #necessary concatenated_gene_matrix<-matrix( paste0("row_genes:",row_gene_strings_matrix_submatrix ,"\ncol genes:",col_gene_strings_matrix_submatrix_alt,"\noriginal value:",as.matrix(submatrix)) ,ncol=ncol(col_gene_strings_matrix_submatrix_alt), nrow=nrow(row_gene_strings_matrix_submatrix)) #if(Sys.info()['sysname']=="Windows"){groupdir<-"W:/"} else {groupdir<-"/data/CCRBioinfo/"} htmlwidget<-heatmaply::heatmaply(signedRescale(submatrix),Rowv = F,Colv = F,showticklabels=F,custom_hovertext = concatenated_gene_matrix, #file=paste0(groupdir,"dalgleishjl/hicnv/inter/fixed_rescale/largemem2/chr",chromosomes[chrom1],chromosomes[chrom2],"withrow_and_colgenes_genes_fixed_rescale.html"), scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1))) return(htmlwidget) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/getInterchromosomalInteractivePlot.R
#' Import a breakpoint BED file. #' #' Imports a BED file with breakpoints or other interactions, in a dual position format. #' #' @name importBreakpointBed #' @keywords bed #' @rawNamespace import(GenomicInteractions, except = c(start,end)) #' @importFrom rtracklayer import.bed #' @importFrom reshape2 colsplit #' @param breakpoint_fn the filename of the breakpoint bed file #' @return a Genomic Interactions Object #' @examples #' importBreakpointBed(breakpoint_fn = system.file("extdata", #' "sample_breakpoints.bed",package = "CNVScope")) #' @export globalVariables(c("mcols","mcols<-")) importBreakpointBed<-function(breakpoint_fn) { imported_bed<-rtracklayer::import.bed(breakpoint_fn) colsplit_locations<-reshape2::colsplit(string = GenomicRanges::mcols(imported_bed)$name,pattern = "_R_",names=c("otherdata","pos"))[,2] colsplit_locations_gr<-GenomicRanges::GRanges(colsplit_locations) colsplit_id<-reshape2::colsplit(string=reshape2::colsplit(string = GenomicRanges::mcols(imported_bed)$name,pattern = "Id:",names=c("otherdata","id_containing"))[,2],pattern="_",names=c("id","other"))[,1] gint<-GenomicInteractions(imported_bed,colsplit_locations_gr,... = colsplit_id) colsplit_bed<-reshape2::colsplit(string = GenomicRanges::mcols(imported_bed)$name,pattern = "_",names=c("otherdata","id_containing"))[,1] S4Vectors::mcols(gint)$id<-colsplit_id S4Vectors::mcols(gint)$bed<-colsplit_bed closeAllConnections() rm(imported_bed) return(gint) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/importBreakpointBed.R
#' Gets a small piece of a matrix (top left corner) for viewing, rather than pulling the first n rows. #' #' Gives a small square of a matrix to get an idea of content rather than grabbing the entire row. #' When this row is thousands of numbers long, this can be a problem. #' @keywords rescale downsample average edges matrix #' @param mat A matrix. #' @param n The length and width of the piece to view. #' @return averaged_matrix a small matrix of size n. #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' mathead(nbl_result_matrix_sign_small) #' @export mathead<-function(mat,n=6L) { return(mat[1:n,1:n]) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/mathead.R
#' Neuroblastoma sample CNV relationship matrix #' #' The first 25 Mb of chromosome 1, #' neuroblastoma copy number signed relation matrix. #' #' #' @format A matrix with 25 rows and 25 variables #' @source \url{https://gdc.cancer.gov/} #' @name nbl_result_matrix_sign_small NULL
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/nbl_result_matrix_sign_small.R
#' Postprocess linear regression matrix. #' #' Takes a linear regression matrix and sets infinites to a finite value, and changes the sign to match the sign of the correlation for each value. #' @keywords lm linear regression matrix correlation #' @import stats #' @importFrom matrixStats colSds #' @param input_matrix The input matrix, which consists of bins and samples (no LM or correlation has been done on the segmentation values) #' @param LM_mat The linear regression matrix, with rows and columns consisting of bins and the values being the negative log p-value between them. #' @param cor_type The correlation type ("pearson" (linear), "spearman" (rank), "kendall"(also rank-based)). #' Rank correlations capture nonlinear relationships as well as linear. Passed to stats::cor's method parameter. #' @param inf_replacement_val the value for which infinites are replaced, by default 300. #' @return The output matrix, or if using slurm, the slurm job object (which should be saved as an rds file and reloaded when creating the output matrix). #' @examples #' inputmat<-matrix(runif(15),nrow=3) #' colnames(inputmat)<-c("chr2_1_1000","chr2_1001_2000","chr2_2001_3000","chr2_3001_4000", #' "chr2_4001_5000") #' rownames(inputmat)<-c("PAFPJK","PAKKAT","PUFFUM") #' outputmat<-matrix(runif(15),nrow=3) #' outputmat<-cor(inputmat)*matrix(runif(25,-30,500),nrow=5) #' diag(outputmat)<-Inf #' postProcessLinRegMatrix(input_matrix=t(inputmat),LM_mat=outputmat,cor_type="pearson", #' inf_replacement_val=300) #' @export postProcessLinRegMatrix<-function(input_matrix,LM_mat,cor_type="pearson",inf_replacement_val=300) { #removing empty columns:" #input_matrix_zeros_removed<-as.data.frame(t(input_matrix))[,colSums(as.data.frame(t(input_matrix)))>0] input_matrix_zeros_removed<-as.data.frame(t(input_matrix))[,matrixStats::colSds(as.matrix(t(input_matrix)))!=0] #this will take care of zero bins and invariant bins. #input_matrix_zeros<-as.data.frame(t(input_matrix))[,colSums(as.data.frame(t(input_matrix)))==0] #correcting infinites LM_mat[is.infinite(unlist(LM_mat))]<-inf_replacement_val TCGA_low_pass_matrix<-LM_mat #input_matrix<-input_matrix_zeros_removed #adding column names #this has to take account of the constant columns that are removed in the LM process #These will not be represented as a linear regression p-value does not exist for two vectors where one is all the same value. rownames(TCGA_low_pass_matrix)<-colnames(input_matrix_zeros_removed) colnames(TCGA_low_pass_matrix)<-colnames(input_matrix_zeros_removed) #removes unnecessary substructure of the matrix/df. TCGA_low_pass_matrix2<- matrix(as.numeric(unlist(TCGA_low_pass_matrix)),ncol=ncol(TCGA_low_pass_matrix)) colnames(TCGA_low_pass_matrix2)<-colnames(TCGA_low_pass_matrix) rownames(TCGA_low_pass_matrix2)<-rownames(TCGA_low_pass_matrix) #fixing sign input_matrix_mat<-as.matrix(input_matrix_zeros_removed) input_matrix_cor<-cor(as.matrix(input_matrix_mat), use = "pairwise.complete.obs",method=cor_type) input_matrix_cor[input_matrix_cor>0]<-1 input_matrix_cor[input_matrix_cor<0]<- -1 TCGA_low_pass_matrix_sign_corrected<-(TCGA_low_pass_matrix2*input_matrix_cor) return(TCGA_low_pass_matrix_sign_corrected) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/postProcessLinRegMatrix.R
#' Assign GenomicInteractions to a predefined series of bins for row and column, corresponding to a genomic matrix. #' #' This function allows the user to assign a set of genomicinteractions to a pre-existing matrix with known dimensions and column/row names. It finds the row/column index of each point and produces a merged dataframe with the original annotation columns that correspond to each bin in the matrix, with appropriate labels & indexes. #' @name rebinGenomicInteractions #' @param gint A GenomicInteractions object needing to be binned. #' @param whole_genome_matrix A matrix with underscored positions for column and rownames e.g. chr1_1_5000,chr1_5001_10000. If this is provided, it will override rown/column names and GRanges objects. #' @param rownames_gr A Genomic Ranges object created from the whole genome matrix row names in chr_start_end format, e.g. chr1_1_5000. No effect if whole_genome_mattrix is specified. #' @param colnames_gr A Genomic Ranges object created from the whole genome matrix column names in chr_start_end format. No effect if whole_genome_mattrix is specified. #' @param rownames_mat The row names of the whole_genome_matrix in chr_start_end format. #' @param colnames_mat The column names of the whole_genome_matrix in chr_start_end format. #' @param method Method to rebin with-- can use overlap and nearest methods.Default: nearest. #' @keywords GenomicInteractions bin matrix colnames rownames binning bin #' @import GenomicInteractions #' @importFrom GenomicInteractions anchorOne anchorTwo #' @import foreach doParallel #' @examples #' foreach::registerDoSEQ() #' gint_small_chr1<-importBreakpointBed(breakpoint_fn = system.file("extdata", #' "sample_breakpoints_chr1.bed",package = "CNVScope")) #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' rebinGenomicInteractions(gint=gint_small_chr1,whole_genome_matrix=NULL, #' rownames_gr=underscored_pos_to_GRanges(rownames(nbl_result_matrix_sign_small)), #' colnames_gr=underscored_pos_to_GRanges(colnames(nbl_result_matrix_sign_small)), #' rownames_mat = rownames(nbl_result_matrix_sign_small), #' colnames_mat = colnames(nbl_result_matrix_sign_small), #' method="nearest") #' @export globalVariables("mcols") rebinGenomicInteractions<-function(gint=NULL,whole_genome_matrix=NULL,rownames_gr=NULL,colnames_gr=NULL,rownames_mat=NULL,colnames_mat=NULL,method="nearest") { #importFrom GenomicRanges nearest GRanges #importFrom InteractionSet findOverlaps #importFrom S4Vectors mcols mcols<- if (!requireNamespace('InteractionSet', quietly = TRUE)) { return("Please install InteractionSet to use this function") } i <- if(exists("i")){get("i")} else {NULL} if(is.null(gint)){return("No GenomicInteractions to rebin!")} if(!is.null(whole_genome_matrix)){ rownames_mat<-rownames(whole_genome_matrix) colnames_mat<-rownames(whole_genome_matrix) } output<-foreach(i=1:length(gint),.inorder = T,.combine="rbind",.errorhandling = "pass",.export = ls()) %dopar% #length(breakpoint_gint_full)length(breakpoint_gint_full) { #current_int_df<-as.data.table(gint[i]) # current_int_df<-as.data.frame(cbind(as.data.frame(GenomicInteractions::anchorOne(gint)[i]),as.data.frame(GenomicInteractions::anchorTwo(gint)[i]),as.data.frame(S4Vectors::mcols(gint[i])))) print(paste0(i/length(gint)*100,"%")) if(method=="overlap") { row_bin_index<-InteractionSet::findOverlaps(rownames_gr,GenomicInteractions::anchorOne(gint[i]))@from col_bin_index<-InteractionSet::findOverlaps(colnames_gr,GenomicInteractions::anchorTwo(gint[i]))@from} if(method=="nearest") { row_bin_index<-GenomicRanges::nearest(GenomicInteractions::anchorOne(gint[i]),rownames_gr) col_bin_index<-GenomicRanges::nearest(GenomicInteractions::anchorTwo(gint[i]),colnames_gr) } if(length(row_bin_index)==0 | length(col_bin_index)==0) { print(paste0("no match pair for row",i));return(NULL)} col_bin_label<-colnames_mat[col_bin_index] row_bin_label<-rownames_mat[row_bin_index] outputline<-c(row_bin_index,col_bin_index,row_bin_label,col_bin_label,sapply(current_int_df,as.character)) outputnames<-c("row_bin_index","col_bin_index","row_bin_label","col_bin_label",colnames(current_int_df)) if(length(row_bin_index)==0 | length(col_bin_index)==0) {outputline<-rep("",length(outputnames))} names(outputline)<-outputnames outputline } if(class(output)[1]=="character" & is.null(nrow(output))){ output_df<-as.data.frame(t(output)) colnames(output_df)<-c("row_bin_index","col_bin_index","row_bin_label","col_bin_label",as.character(names(gint))) } else { print("arrived in else condition") output_df<-as.data.frame(matrix(as.character(output),nrow=nrow(output))) colnames(output_df)<-c("row_bin_index","col_bin_index","row_bin_label","col_bin_label",as.character(colnames(as.data.frame(gint[1])))) } return(output_df) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/rebinGenomicInteractions.R
#' Runs the CNVScope plotly shiny application. #' #' Runs the interactive suite of tools locally. #' @name runCNVScopeLocal #' @keywords CNV heatmap shiny plotly #' @return none. Runs the application if the correct files are present. #' @examples #' \dontrun{ #' CNVScope::runCNVScopeLocal() #' } #' @export runCNVScopeLocal<-function(){ CNVScope::runCNVScopeShiny(useCNVScopePublicData = T) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/runCNVScopeLocal.R
#' Runs the CNVScope plotly shiny application. #' #' Runs the interactive suite of tools locally or on a server if called in a script file (e.g. App.R). #' Data sources are required. #' For a simple installation, please use the runCNVScopeLocal function. #' @name runCNVScopeShiny #' @keywords CNV heatmap shiny plotly #' @rawNamespace import(GenomicInteractions, except = c(start,end)) #' @import ggplot2 magrittr #' @rawNamespace import(RCurl, except = reset) #' @rawNamespace import(shiny, except = c(runExample,renderDataTable)) #' @rawNamespace import(data.table, except = c(melt, dcast)) #' @param baseurl the url of the source files for the application (e.g. the contents of plotly_dashboard_ext). This will be pulled from remotely. #' @param basefn the linux file path of the same source files. #' @param osteofn the linux file path of the OS files. #' @param debug Enable debugging output. #' @param useCNVScopePublicData Use files from the CNVScopePublicData package. #' @return none. Runs the application if the correct files are present. #' @examples #' #see runCNVScopeLocal(useCNVScopePublicData=T). #' \dontrun{ #' runCNVScopeShiny(useCNVScopePublicData=T) #' } #' @export #globalVariables(c("common_coords_linreg","expression_data_gr","chrom.pairs","."), add=F) runCNVScopeShiny<-function(baseurl=NULL,basefn=NULL, osteofn=NULL,debug=F, useCNVScopePublicData=F) { # if(requireNamespace("plotly",quietly = T)){ #rawNamespace import(GenomicFeatures ,except = show) #importFrom logging logerror #importFrom shinythemes shinytheme #importFrom BiocManager repositories #importFrom shinycssloaders withSpinner #rawNamespace import(shinyjs, except = runExample) #import htmltools htmlwidgets menu <- if(exists("menu")){get("menu")} else {NULL} browse <- if(exists("browse")){get("browse")} else {NULL} if(useCNVScopePublicData) { if (!file.exists(system.file("plotly_dashboard_ext","censushg19.rds", package = "CNVScopePublicData")) ) { cat("CNVScopeData package not detected. Install now?") install <- utils::menu(c("yes", "no")) if(install==1){remotes::install_github("jamesdalg/CNVscope_public_data")} } basefn=paste0(system.file("plotly_dashboard_ext/",package = "CNVScopePublicData"),"/") } chrom.pairs<-NULL options(scipen=999) #if(getRversion() >= "2.15.1") utils::globalVariables(c("."),add=F) head.matrix<-function(mat,n=6L) { return(mat[1:n,1:n]) } delete.isolates <- function(graph, mode = 'all') { isolates <- which(igraph::degree(graph, mode = mode) == 0) igraph::delete.vertices(graph, isolates) } options(repos = BiocManager::repositories()) options(shiny.error = browser) options(shiny.fullstacktrace = TRUE) getOption("repos") options(shiny.sanitize.errors = F) if(is.null(baseurl)&is.null(basefn)) { if(Sys.info()["nodename"]=="ncias-d2037-v.nci.nih.gov" | Sys.info()["nodename"]=="plotly.nci.nih.gov") { baseurl<-"file:///srv/shiny-server/plotly_dashboard/" basefn<-"/srv/shiny-server/plotly_dashboard/" } else { # baseurl<-"ftp://helix.nih.gov/pub/dalgleishjl/" } if(Sys.info()["nodename"]=="NCI-02105037-L") { #baseurl<-"file:///W|/dalgleishjl/hicnv/" #baseurl<-"ftp://helix.nih.gov/pub/dalgleishjl/" baseurl<-"http://alps.nci.nih.gov/james/" #baseurl<-"file:///W:/dalgleishjl/hicnv/" #basefn<-"W:/dalgleishjl/hicnv/" #osteofn<-"W:/dalgleishjl/hicnv/plotly_dashboard_ext/osteo/" } } if(debug){browser()} baseurl<<-baseurl basefn<<-basefn osteofn<<-osteofn #tryCatch(bin_data<-readRDS((url(paste0(baseurl,"bin_data.rds")))),error = function(e) NULL) #tryCatch(bin_data<-readRDS((paste0(basefn,"bin_data.rds"))),error = function(e) NULL) chromosomes<<-paste0("chr",c(seq(1:22),"X"),"_") options(shiny.error = function() { logging::logerror(sys.calls() %>% as.character %>% paste(collapse = ", ")) }) #tryCatch(load(url(paste0(paste0(baseurl,"common_coords_linreg.RData")))),error = function(e) NULL) #tryCatch(load(paste0(paste0(basefn,"common_coords_linreg.RData"))),error = function(e) NULL) #chromstarts_linreg<-unlist(foreach(i=1:length(1:length(chromosomes))) %do% {grep(chromosomes[i],common_coords_linreg)[1]}) swap_row_col_genes=F chrom.pairs<<-expand.grid(1:length(chromosomes),1:length(chromosomes)) chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") if(debug){browser()} if(exists("basefn")) {#local objects: tryCatch(freq_data<-data.table::fread(paste0(osteofn,"OS_freq_data.txt")),error = function(e) NULL) # tryCatch(breakpoint_gint_full<-readRDS(paste0(basefn,"breakpoint_gint_full.rds")),error = function(e) NULL) tryCatch(expression_data_gr<-readRDS(paste0(osteofn,"expression_data_gr.rds")),error = function(e) NULL) tryCatch(expression_data_gr_nbl<-readRDS(paste0(basefn,"tcga_nbl_expression.rds")),error = function(e) NULL) if(debug){browser()} # tryCatch(bin_data_gr<-readRDS(paste0(basefn,"bin_data_gr.rds")),error = function(e) NULL) #tryCatch(census_data_gr<-readRDS(paste0(basefn,"census_data_gr.rds")),error = function(e) NULL) tryCatch(census_data_gr<-readRDS(paste0(basefn,"censushg19.rds")),error = function(e) NULL) tryCatch(ensembl_gene_tx_data_gr<<-readRDS(paste0(basefn,"ensembl_gene_tx_table_gr.rds")),error = function(e) NULL) } else { if(exists("baseurl")) {tryCatch(freq_data<-data.table::fread(paste0(baseurl,"OS_freq_data.txt")),error = function(e) NULL) tryCatch(breakpoint_gint_full<-readRDS(url(paste0(baseurl,"breakpoint_gint_full.rds"))),error = function(e) NULL) if(debug){browser()} #tryCatch(expression_data_gr<-readRDS(url(paste0(baseurl,"expression_data_gr.rds"))),error = function(e) NULL) tryCatch(expression_data_gr_nbl<-readRDS(url(paste0(baseurl,"tcga_nbl_expression.rds"))),error = function(e) NULL) tryCatch(bin_data_gr<-readRDS(url(paste0(baseurl,"bin_data_gr.rds"))),error = function(e) NULL) #tryCatch(census_data_gr<-readRDS(url(paste0(baseurl,"census_data_gr.rds"))),error = function(e) NULL) tryCatch(census_data_gr<-readRDS(paste0(basefn,"censushg19.rds")),error = function(e) NULL) tryCatch(ensembl_gene_tx_data_gr<-readRDS(url(paste0(baseurl,"ensembl_gene_tx_table_gr.rds"))),error = function(e) NULL) } } CNVScopeui<-fluidPage(theme=shinythemes::shinytheme("flatly"), #shinythemes::themeSelector() tags$style(type="text/css", ".shiny-output-error { visibility: hidden; }", ".shiny-output-error:before { visibility: hidden; }"), # Application title titlePanel("CNVScope Interchromosomal Heatmaps"), uiOutput("privpol"), # Sidebar with a slider input for number of bins tabsetPanel(id = "tabs",tabPanel("Controls",fluidRow(column(width=2,offset = 0, #sidebarPanel(position="right", selectInput('data_source', 'data source', c("linreg_osteosarcoma_CNVkit","TCGA_NBL_low_pass","TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"), selected = "TCGA_NBL_low_pass"), #"TCGA_SARC_SNP6","TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass" ,"TCGA_PRAD_low_pass" selectInput('chrom2', 'Chromosome (rows)', chromosomes, selected = "chr17_"), selectInput('chrom1', 'Chromosome (columns)', chromosomes, selected = "chr19_"), sliderInput('max_cap',"saturation limit",min=0.1,max=300,value = 75), #input$sample_hist_alpha sliderInput('heatmapHeight',"heatmap height",min=640,max=2048,value = 1024), #sliderInput('n_nodes',"number of nodes (top/bottom)",min=5,max=200,value = 50), conditionalPanel("input.data_source== 'linreg_osteosarcoma_CNVkit'", checkboxInput('plot_points_toggle',"Plot Structural Variants",value = FALSE), checkboxInput('lumpy_points_toggle',"Plot Lumpy SVs",value = FALSE)), conditionalPanel("input.data_source== 'TCGA_NBL_low_pass'", checkboxInput('pval_filter_toggle',"P-value filter",value = FALSE)), checkboxInput("genes_toggle","Show Genes on Tooltip",value=TRUE) ),column(width=2,offset = 0,conditionalPanel("input.data_source== 'TCGA_NBL_low_pass'", selectInput('fdr_correction', 'FDR p-value correction', c("chromosome_pair","genome"), selected = "chromosome_pair"), selectInput('cor_method', 'Correlation Method', c("pearson","spearman","kendall","spearman - pearson"), selected = "pearson"), selectInput('visval', 'Visualized Relationship Metric', c("-log(Linear Regression P-value) * correlation sign","Correlation"), selected = "Correlation") ), #end conditional panel textInput('gene_input_row',"row_gene",NULL), textInput('loc_input_row',"row_location",NULL), textInput('gene_input_col',"col_gene",NULL), textInput('loc_input_col',"col_location",NULL) )),fluidRow(column(width=4,offset=0,align="center", actionButton("geneSearch", "find genes"), actionButton("goButton", "create plots") ))), tabPanel("Plots",fluidRow(column(2,DT::dataTableOutput("row_gene_data")),column(2,DT::dataTableOutput("col_gene_data")), column(5, h2("interactive chromosomal heatmap and minimap"), shinycssloaders::withSpinner(plotly::plotlyOutput("plotlyChromosomalHeatmap",height = "100%"))#, # visNetwork::visNetworkOutput("network",height="1024") ),column(1,offset=2,plotly::plotlyOutput("minimap",height=1024)#,verbatimTextOutput("shiny_return") )) ) ,#tabPanel("Top Network interactions", h2("Interactive Chromosomal Interaction network for top positive and negative relationships"), # fluidRow(dataTableOutput("shiny_return"),fluidRow(visNetwork::visNetworkOutput("network",height="1024"))#paste0(round(isolate(input$heatmapHeight)/1.25),"px")) # )), tabPanel("gain/loss frequency", conditionalPanel("!is.null(event_data('plotly_click')) & is.null(output$freq_table)", fluidRow(h2("gain/loss frequency"), # shinycssloaders::withSpinner(DT::dataTableOutput("freq_table"))))), tabPanel("COSMIC cancer gene census",h2("Cancer Gene Census Data"), fluidRow( shinycssloaders::withSpinner(DT::dataTableOutput("census_data")))), #end tabpanel tabPanel("sample info", fluidRow(column(2,offset=1,h3("sample histogram for row and column values at clicked point"),sliderInput('sample_hist_alpha',"histogram opacity",min=0.1,max=1,value = 0.6), shinycssloaders::withSpinner(plotly::plotlyOutput("sample_info"))), column(2,offset=1,h3("sample scatterplot for row and column segmentation values at clicked point"),shinycssloaders::withSpinner(plotly::plotlyOutput("sample_info_scatter"))), column(2,offset=1,h3("sample regression scatterplot for values at clicked point, colored by sample to show clustering"),shinycssloaders::withSpinner(plotly::plotlyOutput("sample_info_scatter2")))) ), tabPanel("expression_data", h2("expression data table for clicked point"), fluidRow( shinycssloaders::withSpinner(DT::dataTableOutput("expression_data"))) ), tabPanel("Whole Genome View",fluidRow(column(11,offset=2,conditionalPanel("input.data_source== 'linreg_osteosarcoma_CNVkit' | input.data_source=='TCGA_NBL_low_pass'",h2("whole genome view"), sliderInput(inputId = "whole_genome_max_cap",label = "Whole Genome p-value Saturation Cap",value = 75, min = 5,max=75,step = 5), shinycssloaders::withSpinner(plotOutput("whole_genome_image"))) )#end column )#end fluidRow ) #end tabPanel for Whole Genome view ) #end tabset panel # Show a plot of the generated distribution ) shinyApp(ui = CNVScopeui, server = CNVScopeserver) } #}
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/runCNVScopeShiny.R
#' Rescale positive and negative data, preserving sign information. #' #' Performs a signed rescale on the data, shrinking the negative and positive ranges into the [0,1] space, such that negative is always less than 0.5 and positive is always greater. #' @keywords signed rescale positive negative matrix #' @param matrix A matrix to be transformed #' @param global_max the global maximum (used if scaling using statistics from a large matrix upon a submatrix). #' @param global_min the global minimum #' @param global_sigma the global signma #' @param global_mu the global mu #' @param max_cap the maximum saturation-- decreases the ceiling considered for the scaling function. #' Useful to see greater differences if an image is too white, increase it if there is too much color to tell apart domains. #' @param method method to perform the rescaling. #' Options are "minmax" (default), "tan" for tangent, and "sd" for standard devation #' @param tan_transform apply a tangent transformation? #' @param global_sigma_pos The positive global sigma. See getGlobalRescalingStats. #' @param global_sigma_neg The negative global sigma. See getGlobalRescalingStats. #' @param asymptotic_max make the maximum value in the matrix not 1, but rather something slightly below. #' @return transformedmatrix A transformed matrix. #' @examples #' mat<-matrix(c(5,10,15,20,0,40,-45,300,-50),byrow=TRUE,nrow=3) #' rescaled_mat<-signedRescale(mat) #' mat #' rescaled_mat<-signedRescale(abs(mat)) #' @export signedRescale<-function(matrix,global_max=NULL,global_min=NULL,global_sigma=NULL,global_mu=NULL,max_cap=NULL,method="minmax",tan_transform=F,global_sigma_pos=NULL,global_sigma_neg=NULL,asymptotic_max=T) { #matrix<-as.matrix(matrix) transformedmatrix<-as.matrix(matrix) transformedmatrix[transformedmatrix==0]<-(0.5+1e9*.Machine$double.eps) if(!is.null(max_cap)){transformedmatrix[transformedmatrix>max_cap]<-max_cap} if(is.null(global_max)){global_max<-max(transformedmatrix[transformedmatrix>0])} if(is.null(global_min)){ if(length(transformedmatrix[transformedmatrix<0])!=0){ global_min<-min(transformedmatrix[transformedmatrix<0]) } else { global_min <- -Inf #this is only used in the case that there are no negative values, which stores no data. #essentially, this is a fix to remove warnings. The scaling will happen correctly. } } if(is.null(global_sigma_pos)){global_sigma_pos<-sd(transformedmatrix[transformedmatrix>0])} if(is.null(global_sigma_neg)){global_sigma_neg<-sd(transformedmatrix[transformedmatrix<0])} if(is.null(global_sigma)){global_sigma<-sd(transformedmatrix)} if(is.null(global_mu)){global_mu<-mean(transformedmatrix)} #if(tan_transform){transformedmatrix<-atan(transformedmatrix)/pi*2} if(method=="minmax"){ #browser() transformedmatrix[transformedmatrix>0 & transformedmatrix!=(0.5+1e9*.Machine$double.eps)]<-((transformedmatrix[transformedmatrix>0 & transformedmatrix!=(0.5+1e9*.Machine$double.eps)]/(global_max*2))+0.5) #divide by global max * 2, store into transformed matrix. transformedmatrix[matrix<0]<-((transformedmatrix[transformedmatrix<0]/(global_min*2))) #divide by global minimum * 2, store into transformed matrix. transformedmatrix[transformedmatrix<=0.5 & transformedmatrix>0]<-abs(0.5-transformedmatrix[transformedmatrix<=0.5 & transformedmatrix>0]) #(0,0.5), negative numbers in original matrix. #transformedmatrix[transformedmatrix==0]<-(0.5-1e9*.Machine$double.eps) } if(method=="tan"){transformedmatrix<-atan(transformedmatrix)/pi*2} if(method=="sd") { transformedmatrix[transformedmatrix>0]<-((transformedmatrix[transformedmatrix>0]/(global_max*2))+0.5) transformedmatrix[matrix<0]<-((transformedmatrix[transformedmatrix<0]/(global_min*2))) transformedmatrix[transformedmatrix<0.5 & transformedmatrix>0]<-abs(0.5-transformedmatrix[transformedmatrix<0.5 & transformedmatrix>0]) transformedmatrix[transformedmatrix==0]<-(0.5-1e9*.Machine$double.eps) } if(asymptotic_max) { #browser() transformedmatrix[transformedmatrix==1]<-(1-1e9*.Machine$double.eps) } return(transformedmatrix) } #all_conc_cleaned_common_coords_linreg_tiny<-all_conc_cleaned_common_coords_linreg[1:25,1:25] # all_conc_cleaned_common_coords_linreg_tiny.m<-melt(as.matrix(all_conc_cleaned_common_coords_linreg[1:5,1:5])) # signedRescale(all_conc_cleaned_common_coords_linreg_tiny)==CNVScope::signedRescale(all_conc_cleaned_common_coords_linreg_tiny) # bins<-fread(paste0(groupdir,"dalgleishjl/hicnv/binfile.txt")) # bins_t<-t(bins[,2:ncol(bins)]) # colnames(bins_t)<-bins$probe # #head(bins_t) # ComplexHeatmap::Heatmap(signedRescale(all_conc_cleaned_common_coords_linreg_tiny,tan_transform = F),cluster_rows = F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # ComplexHeatmap::Heatmap(CNVScope::signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),tan_transform = F),cluster_rows = F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),tan_transform = F),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # CNVScope::signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:85,1:85])) # matrix=all_conc_cleaned_common_coords_linreg[1:85,1:85] # setwd(paste0(groupdir,"dalgleishjl/hicnv/color_scale_test_plots/")) # # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="tan",tan_transform = F),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="minmax",tan_transform = F,max_cap = 200),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="minmax",tan_transform = F,max_cap = 100),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="minmax",tan_transform = F,max_cap = 50),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="minmax",tan_transform = F,max_cap = 25),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))) # foreach(i=c(10,25,50,75,100,200)) %dopar% # { # png(paste0("chr1_max_cap",i,".png")) # print(ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="minmax",tan_transform = F,max_cap = i),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red")))) # dev.off() # } # png(paste0("chr1_tan_transform",".png")) # print(ComplexHeatmap::Heatmap(signedRescale(as.matrix(all_conc_cleaned_common_coords_linreg[1:1107,1:1107]),method="tan",tan_transform = F),cluster_rows = F,show_row_names=F,show_column_names=F,cluster_columns = F,circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red")))) # dev.off()
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/signedRescale.R
#' Convert coordinates in underscored format to a GRanges object. #' #' This function creates a new GRanges object from a character vector of coordinates in the form "chr1_0_5000" and creates a GRanges object from them. #' @keywords CNV GRanges Genomic Ranges position #' @param underscored_positions A vector of positions of the form c("chr1_0_5000","chr1_7500_10000","chr1_10000_15000") #' @param extended_data Optional metadata columns. These columns cannot be named "start", "end", "width", or "element". Passed to GRanges object as ... #' @param zeroToOneBasedStart Converts a set of underscored positions that begin with zero to GRanges where the lowest positional value on a chromosome is 1. Essentially adds 1 to start #' @param zeroToOneBasedEnd Adds 1 to the end of the underscored positions #' @return A GRanges object #' @examples #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' underscored_pos_to_GRanges(colnames(nbl_result_matrix_sign_small)) #' @export underscored_pos_to_GRanges<-function(underscored_positions=NULL,extended_data=NULL,zeroToOneBasedStart=T,zeroToOneBasedEnd=F) { #importFrom GenomicRanges GRanges #importFrom IRanges IRanges #importFrom plyr ldply GenomicRanges::GRanges(seqnames = as.character(plyr::ldply(sapply(underscored_positions, function(x) strsplit(as.character(x),"_",fixed=T) ),rbind)["1"][,1]),IRanges::IRanges(start = as.numeric(as.character(plyr::ldply(sapply(underscored_positions, function(x) strsplit(as.character(x),"_",fixed=T) ),rbind)["2"][,1])) + as.numeric(zeroToOneBasedStart) ,end = as.numeric(as.character(plyr::ldply(sapply(underscored_positions, function(x) strsplit(as.character(x),"_",fixed=T) ),rbind)["3"][,1])) + as.numeric(zeroToOneBasedEnd)),... = extended_data) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/underscored_pos_to_GRanges.R
#' Write a matrix, with genes, of a submatrix of a whole genome interaction matrix to disk. #' #' Writes an RData file with a ggplot2 object within. #' @keywords ggplot2 heatmap plotly ggiraph genomic matrix #' @import magrittr #' @param whole_genome_matrix A matrix to have edges averaged with genomic coordinates in the form chr1_50_100 set as the column and row names. #' @param chrom1 first chromosome of the two which will subset the matrix. (this is done in row-column fasion). #' @param chrom2 second chromosome of the two which will subset the matrix. (this is done in row-column fasion). #' @param extra_data_matrix A matrix with additional variables about each point, one position per row with as many variables as remaining columns. #' @param transpose transpose the matrix? #' @param sequential disable parallelization with registerDoSEQ()? #' @param debug extra output #' @param desired_range_start start of range for width and height of matrix for downsampling #' @param desired_range_end end of range for width and height of matrix for downsampling #' @param saveToDisk saves the matrix to disk #' @param max_cap maximum saturation cap, passed to signedRescale #' @param rescale perform signedRescale() on matrix? #' @return ggplotmatrix a matrix with values sufficient to create a ggplot2 heatmap with geom_tile() or with ggiraph's geom_tile_interactive() #' @examples #' load(system.file("extdata","grch37.rda",package = "CNVScope")) #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' load(system.file("extdata","ensembl_gene_tx_table_prot.rda",package = "CNVScope")) #' writeAsymmetricMeltedChromosomalMatrixToDisk(whole_genome_matrix = #' nbl_result_matrix_sign_small, #' chrom1 = 1,chrom2 = 1,desired_range_start = 25, desired_range_end = 25) #' file.remove("chr1_chr1_melted.RData") #' @export writeAsymmetricMeltedChromosomalMatrixToDisk<-function(whole_genome_matrix,chrom1,chrom2,extra_data_matrix=NULL,transpose=F,sequential=T,debug=T,desired_range_start=50,desired_range_end=300,saveToDisk=T,max_cap=NULL,rescale=T) { #importFrom utils head if(!is.null(extra_data_matrix)) { extra_data_matrix_fn<-extra_data_matrix extra_data_df<-data.table::fread(extra_data_matrix,data.table=F) } chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") submatrix<-whole_genome_matrix[grep(chromosomes[chrom1],rownames(whole_genome_matrix)),grep(chromosomes[chrom2],colnames(whole_genome_matrix))] #insert intra code here for compatibility, remembering the bit at the end of the while loops. downsample_factor<-NULL desired_range<-IRanges::IRanges(desired_range_start,desired_range_end) downsample_factor_row<-NULL downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_col<-NULL downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") while(length(intersect(downsample_factor_col,downsample_factor_row))==0 & (nrow(submatrix)>desired_range_end | ncol(submatrix)>desired_range_end)) { downsample_factor<-NULL downsample_factor_row<-NULL downsample_factor_col<-NULL downsample_outcomes<-NULL while(length(downsample_factor_col)==0 & ncol(submatrix)>desired_range_end) { downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix if(debug){ print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors in desired range:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) print(paste0("row factors (including those outside desired range):",paste(downsample_outcomes_row$factor))) print(paste0("col factors (including those outside desired range):",paste(downsample_outcomes_col$factor)))} #browser() #temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(downsample_factor_col)==0 & ncol(submatrix)>(desired_range@start+1)){submatrix<-averageMatrixEdges(submatrix,dimension = "column")} } #only average edges if there isn't another way to downsample first. #I should change this such that it doens't downsample too much. This will likely be another edge case. } #downsample_outcomes_col<-downsample_outcomes #downsample_factor_col<-downsample_factor downsample_factor<-NULL if(debug){ print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) } while(length(downsample_factor_row)==0 & nrow(submatrix)>desired_range_end) { downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix if(debug){ print(paste0("row factors:",downsample_factor_row)) print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors in desired range:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) print(paste0("row factors (including those outside desired range):",paste(downsample_outcomes_row$factor))) print(paste0("col factors (including those outside desired range):",paste(downsample_outcomes_col$factor)))} #browser() if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(downsample_factor_row)==0 & nrow(submatrix)>(desired_range@start+1)){submatrix<-averageMatrixEdges(submatrix,dimension = "row")} } } #downsample_outcomes_row<-downsample_outcomes #downsample_factor_row<-downsample_factor #browser() downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix #browser() # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { if(length(intersect(downsample_factor_col,downsample_factor_row))==0) { if(length(downsample_factor_row)>length(downsample_factor_col)){ submatrix<-averageMatrixEdges(submatrix,dimension="column")} else { if(length(downsample_factor_row)<length(downsample_factor_col)){submatrix<-averageMatrixEdges(submatrix,dimension="row")} if(length(downsample_factor_row)==length(downsample_factor_col) & nrow(downsample_outcomes_row)>=nrow(downsample_outcomes_col)){submatrix<-averageMatrixEdges(submatrix,dimension="column")} if(length(downsample_factor_row)==length(downsample_factor_col) & nrow(downsample_outcomes_row)<nrow(downsample_outcomes_col)){submatrix<-averageMatrixEdges(submatrix,dimension="row")} }#end else } } #end outer else if(debug){print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix))))} } #end while if(nrow(submatrix)==ncol(submatrix)){if(length(downsample_factor_col)>0){downsample_factor<-min(downsample_factor_col)}} if(nrow(submatrix)!=ncol(submatrix)){if(length(intersect(downsample_factor_row,downsample_factor_col))>0){downsample_factor<-min(intersect(downsample_factor_row,downsample_factor_col))}} if(debug){ print(paste0("final col:",downsample_factor_col)) print(paste0("final row:",downsample_factor_row))} if(length(downsample_factor)==1) { submatrix_downsample<-downsample_genomic_matrix(submatrix,downsample_factor,singlechromosome = T) } else { submatrix_downsample<-submatrix } if(transpose){concatenated_gene_matrix<-getAnnotationMatrix(t(submatrix_downsample),prot_only = T,flip_row_col=T,sequential=T)} else{ concatenated_gene_matrix<-getAnnotationMatrix(submatrix_downsample,prot_only = T, flip_row_col=T,sequential = T)} concatenated_gene_matrix.m<-melt(concatenated_gene_matrix) concatenated_gene_matrix.m$Var1<-rownames(submatrix_downsample)[concatenated_gene_matrix.m$Var1] concatenated_gene_matrix.m$Var2<-colnames(submatrix_downsample)[concatenated_gene_matrix.m$Var2] if(transpose){ if(rescale==T){ggplotmatrix<-t(submatrix_downsample) %>% as.matrix() %>% signedRescale() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m)} if(rescale==F) {ggplotmatrix<-t(submatrix_downsample) %>% as.matrix() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m)} } else { if(rescale==T){ ggplotmatrix<-submatrix_downsample %>% as.matrix() %>% signedRescale() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m)} if(rescale==F){ ggplotmatrix<-submatrix_downsample %>% as.matrix() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m)} } if(!is.null(extra_data_matrix)) { #need to also downsample the extra_data_df to match the positions of the ggplotmatrix$Var1 and Var2 (which should be equal). ggplotmatrix$Var1<-as.character(ggplotmatrix$Var1) ggplotmatrix$Var2<-as.character(ggplotmatrix$Var2) row_merged_ggplotmatrix<-merge(ggplotmatrix,extra_data_df,by.x="Var1",by.y="pos",suffixes=c("","row")) #var1 row Var2 Column, y x. row_col_merged_ggplotmatrix<-merge(row_merged_ggplotmatrix,extra_data_df,by.x="Var2",by.y="pos",suffixes=c("","col")) print(utils::head(row_col_merged_ggplotmatrix,n=1)) if(transpose){if(saveToDisk){save("row_col_merged_ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted_with_external_data_transposed",".RData"))}} else {if(saveToDisk){save("row_col_merged_ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted_with_external_data",".RData"))}} } else { #write.csv(ggplotmatrix,paste0(chromosomes[chrom1],chromosomes[chrom2],"melted",".csv"),row.names = F) if(saveToDisk){save("ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted",".RData"))} #rm("ggplotmatrix") } return(ggplotmatrix) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/writeAsymmetricMeltedMatrixToDisk.R
#' Write a matrix, with genes, of a submatrix of a whole genome interaction matrix to disk. #' #' Writes an RData file with a ggplot2 object within the current directory. #' @keywords ggplot2 plotly ggiraph genomic matrix #' @import magrittr #' @param whole_genome_matrix A matrix to have edges averaged with genomic coordinates in the form chr1_50_100 set as the column and row names. #' @param chrom1 first chromosome of the two which will subset the matrix. (this is done in row-column fasion). #' @param chrom2 second chromosome of the two which will subset the matrix. (this is done in row-column fasion). #' @param extra_data_matrix A matrix with additional variables about each point, one position per row with as many variables as remaining columns. #' @param filename the filename to be written #' @param transpose transpose the matrix? #' @param sequential Disable paralleization with doParallel? registerDoSEQ() is used for this. #' @param debug verbose output for debugging #' @param desired_range_start the downsampled matrix must be of this size (rows & cols) at minimum #' @param desired_range_end the downsampled matrix must be of this size (rows & cols) at maximum #' @return ggplotmatrix a matrix with values sufficient to create a ggplot2 heatmap with geom_tile() or with ggiraph's geom_tile_interactive() #' @examples #' load(system.file("extdata","grch37.rda",package = "CNVScope")) #' load(system.file("extdata","nbl_result_matrix_sign_small.rda",package = "CNVScope")) #' load(system.file("extdata","ensembl_gene_tx_table_prot.rda",package = "CNVScope")) #' writeMeltedChromosomalMatrixToDisk(whole_genome_matrix = nbl_result_matrix_sign_small, #' chrom1 = 1,chrom2 = 1,desired_range_start = 25, desired_range_end = 25) #' file.remove("chr1_chr1_melted.RData") #' @export writeMeltedChromosomalMatrixToDisk<-function(whole_genome_matrix,chrom1,chrom2,filename,extra_data_matrix=NULL,transpose=F,sequential=T,debug=T,desired_range_start=50,desired_range_end=300) { #importFrom IRanges IRanges if(!is.null(extra_data_matrix)) { extra_data_matrix_fn<-extra_data_matrix extra_data_df<-data.table::fread(extra_data_matrix,data.table=F) } chromosomes<-paste0("chr",c(seq(1:22),"X"),"_") submatrix<-whole_genome_matrix[grep(chromosomes[chrom1],rownames(whole_genome_matrix)),grep(chromosomes[chrom2],colnames(whole_genome_matrix))] #insert intra code here for compatibility, remembering the bit at the end of the while loops. downsample_factor<-NULL desired_range<-IRanges::IRanges(desired_range_start,desired_range_end) downsample_factor_row<-NULL downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_col<-NULL downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") while(length(intersect(downsample_factor_col,downsample_factor_row))==0 & (nrow(submatrix)>desired_range_start & ncol(submatrix)>desired_range_start)) { downsample_factor<-NULL downsample_factor_row<-NULL downsample_factor_col<-NULL downsample_outcomes<-NULL while(length(downsample_factor_col)==0) { downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix if(debug){ print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors in desired range:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) print(paste0("row factors (including those outside desired range):",paste(downsample_outcomes_row$factor))) print(paste0("col factors (including those outside desired range):",paste(downsample_outcomes_col$factor)))} #browser() #temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(downsample_factor_col)==0 & ncol(submatrix)>(desired_range@start+1)){submatrix<-averageMatrixEdges(submatrix,dimension = "column")} } #only average edges if there isn't another way to downsample first. #I should change this such that it doens't downsample too much. This will likely be another edge case. } #downsample_outcomes_col<-downsample_outcomes #downsample_factor_col<-downsample_factor downsample_factor<-NULL if(debug){ print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) } while(length(downsample_factor_row)==0) { downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix if(debug){ print(paste0("row factors:",downsample_factor_row)) print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors in desired range:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix)))) print(paste0("row factors (including those outside desired range):",paste(downsample_outcomes_row$factor))) print(paste0("col factors (including those outside desired range):",paste(downsample_outcomes_col$factor)))} #browser() if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(downsample_factor_row)==0 & nrow(submatrix)>(desired_range@start+1)){submatrix<-averageMatrixEdges(submatrix,dimension = "row")} } } #downsample_outcomes_row<-downsample_outcomes #downsample_factor_row<-downsample_factor #browser() downsample_outcomes_col<-as.data.frame(cbind(divisors(ncol(submatrix)),ncol(submatrix)/divisors(ncol(submatrix)))) colnames(downsample_outcomes_col)<-c("factor","downsampled_size") downsample_factor_col<-downsample_outcomes_col[downsample_outcomes_col$downsampled_size>=desired_range@start & downsample_outcomes_col$downsampled_size<=(desired_range@start+desired_range@width),"factor"] downsample_outcomes_row<-as.data.frame(cbind(divisors(nrow(submatrix)),nrow(submatrix)/divisors(nrow(submatrix)))) colnames(downsample_outcomes_row)<-c("factor","downsampled_size") downsample_factor_row<-downsample_outcomes_row[downsample_outcomes_row$downsampled_size>=desired_range@start & downsample_outcomes_row$downsampled_size<=(desired_range@start+desired_range@width),"factor"] submatrix_temp<-submatrix #browser() # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) { # temp_ds_factor<-as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1] # if(ncol(submatrix)/temp_ds_factor>=desired_range_start & nrow(submatrix)/temp_ds_factor>=desired_range_start){ # submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=temp_ds_factor) # } # rm(temp_ds_factor) # } else { if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { # if(length(intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor))>1) {submatrix<-downsample_genomic_matrix(whole_matrix=submatrix,downsamplefactor=as.integer(grep(1,intersect(downsample_outcomes_row$factor,downsample_outcomes_col$factor),value=T,invert=T))[1])} else { if(length(intersect(downsample_factor_col,downsample_factor_row))==0) { if(length(downsample_factor_row)>length(downsample_factor_col)){ submatrix<-averageMatrixEdges(submatrix,dimension="row")} else { if(length(downsample_factor_row)<length(downsample_factor_col)){submatrix<-averageMatrixEdges(submatrix,dimension="column")} if(length(downsample_factor_row)==length(downsample_factor_col) & nrow(downsample_outcomes_row)>=nrow(downsample_outcomes_col)){submatrix<-averageMatrixEdges(submatrix,dimension="column")} if(length(downsample_factor_row)==length(downsample_factor_col) & nrow(downsample_outcomes_row)<nrow(downsample_outcomes_col)){submatrix<-averageMatrixEdges(submatrix,dimension="column")} }#end else } } #end outer else if(debug){print(paste0("col factors:",downsample_factor_col)) print(paste0("row factors:",downsample_factor_row)) print(paste0("current submatrix dimensions:",paste0(dim(submatrix))))} } #end while if(nrow(submatrix)==ncol(submatrix)){if(length(downsample_factor_col)>0){downsample_factor<-min(downsample_factor_col)}} if(nrow(submatrix)!=ncol(submatrix)){if(length(intersect(downsample_factor_row,downsample_factor_col))>0){downsample_factor<-min(intersect(downsample_factor_row,downsample_factor_col))}} if(debug){ print(paste0("final col:",downsample_factor_col)) print(paste0("final row:",downsample_factor_row))} if(is.null(downsample_factor)){downsample_factor<-1} submatrix_downsample<-downsample_genomic_matrix(submatrix,downsample_factor,singlechromosome = T) if(transpose){ concatenated_gene_matrix<-getAnnotationMatrix(t(submatrix_downsample),prot_only = T,flip_row_col=T,sequential=T)} else{ concatenated_gene_matrix<-getAnnotationMatrix(submatrix_downsample,prot_only = T,flip_row_col=T,sequential=T)} concatenated_gene_matrix.m<-melt(concatenated_gene_matrix) concatenated_gene_matrix.m$Var1<-rownames(submatrix_downsample)[concatenated_gene_matrix.m$Var1] concatenated_gene_matrix.m$Var2<-colnames(submatrix_downsample)[concatenated_gene_matrix.m$Var2] if(transpose){ ggplotmatrix<-t(submatrix_downsample) %>% as.matrix() %>% signedRescale() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m) } else { ggplotmatrix<-submatrix_downsample %>% as.matrix() %>% signedRescale() %>% melt() %>% dplyr::bind_cols(concatenated_gene_matrix.m) } if(!is.null(extra_data_matrix)) { #need to also downsample the extra_data_df to match the positions of the ggplotmatrix$Var1 and Var2 (which should be equal). ggplotmatrix$Var1<-as.character(ggplotmatrix$Var1) ggplotmatrix$Var2<-as.character(ggplotmatrix$Var2) row_merged_ggplotmatrix<-merge(ggplotmatrix,extra_data_df,by.x="Var1",by.y="pos",suffixes=c("","row")) #var1 row Var2 Column, y x. row_col_merged_ggplotmatrix<-merge(row_merged_ggplotmatrix,extra_data_df,by.x="Var2",by.y="pos",suffixes=c("","col")) print(utils::head(row_col_merged_ggplotmatrix,n=1)) if(transpose){save("row_col_merged_ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted_with_external_data_transposed",".RData"))} else {save("row_col_merged_ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted_with_external_data",".RData"))} } else { #write.csv(ggplotmatrix,paste0(chromosomes[chrom1],chromosomes[chrom2],"melted",".csv"),row.names = F) save("ggplotmatrix",file=paste0(chromosomes[chrom1],chromosomes[chrom2],"melted",".RData")) #rm("ggplotmatrix") } return(ggplotmatrix) }
/scratch/gouwar.j/cran-all/cranData/CNVScope/R/writeMeltedChromosomalMatrixToDisk.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(CNVScope) options(scipen=999) library(magrittr) ## ----aml_files,eval=F,echo=T-------------------------------------------------- # if(!dir.exists("extracted_aml_data")){dir.create("extracted_aml_data")} # untar("gdc_download_aml.tar.gz",exdir = "./extracted_aml_data") # target_files_aml<-list.files(path = "extracted_aml_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) # print(target_files_aml) ## ----eval=F,echo=T------------------------------------------------------------ # sample_aggregated_segvals_aml<-formSampleMatrixFromRawGDCData(tcga_files = target_files_aml,format = "TARGET") # saveRDS(sample_aggregated_segvals_aml,"aml_sample_matched_input_matrix.rds") ## ----aml_plots, eval=T,echo=T------------------------------------------------- sample_aggregated_segvals_aml<-readRDS("aml_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_aml[stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_7_mat<-sample_aggregated_segvals_aml[(stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7") & rownames(sample_aggregated_segvals_aml) %in% setdiff(rownames(sample_aggregated_segvals_aml),names(invariant_bins))),] %>% t() ## ----chr7_cor----------------------------------------------------------------- chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ## ----breakpoints-------------------------------------------------------------- if((Sys.info()['sysname'] == "Linux" | Sys.info()['sysname'] == "Windows")&requireNamespace("HiCseg",quietly = T)){ colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoint_labels} else { colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoint_labels } ## ----breakpoint_plot---------------------------------------------------------- chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank(),axis.title = element_blank()) + scale_x_continuous(breaks=breakpoints,labels=breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ## ----probdist----------------------------------------------------------------- if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_7_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix js_breakpoints<-jointseg::jointSeg(chr_7_probdist,K=20)$bestBkp js_breakpoint_labels<-colnames(chr_7_mat)[js_breakpoints] } else{ print("Please install smoothie in order to run this example.") } ## ----plot_probdist------------------------------------------------------------ if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist %>% # CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=Var1, y=Var2, fill=value)) + geom_tile() + # theme(axis.title = element_blank()) + #axis.text.x = element_blank(),axis.text.y=element_blank(), theme(axis.text.x = element_text(angle=90, hjust=1), axis.text.y = element_text(angle=0, hjust=1) ,axis.title = element_blank()) + # scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + # scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) } else{ print("Please install smoothie in order to run this example.") } ## ----census_data,eval=F------------------------------------------------------- # census_data <- readRDS(system.file("censushg19.rds",package = "CNVScope")) # census_data[census_data@seqnames %in% "chr7"] %>% sort() %>% tibble::as_tibble() %>% janitor::clean_names() %>% dplyr::select(seqnames,start,end,gene_symbol,tumour_types_somatic,tumour_types_germline) %>% dplyr::filter(start>60e6,stringr::str_detect(string = tumour_types_somatic,pattern="AML") | stringr::str_detect(string = tumour_types_germline,pattern="AML")) ## ----blca_files,eval=F,echo=T------------------------------------------------- # if(!dir.exists("extracted_blca_data")){dir.create("extracted_blca_data") # untar("gdc_download_blca.tar.gz",exdir = "./extracted_blca_data")} # tcga_files_blca<-list.files(path = "extracted_blca_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) # print(tcga_files_blca) ## ----eval=F,echo=T------------------------------------------------------------ # sample_aggregated_segvals_blca<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_blca,format = "TCGA",parallel=T) # saveRDS(sample_aggregated_segvals_blca,"blca_sample_matched_input_matrix.rds") ## ----blca_plots, eval=T,echo=T------------------------------------------------ sample_aggregated_segvals_blca<-readRDS("blca_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_blca[stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_17_mat<-sample_aggregated_segvals_blca[(stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17") & rownames(sample_aggregated_segvals_blca) %in% setdiff(rownames(sample_aggregated_segvals_blca),names(invariant_bins))),] %>% t() ## ----chr17_cor---------------------------------------------------------------- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ## ----probdist_chr17----------------------------------------------------------- if(requireNamespace("smoothie",quietly=T)){ chr_17_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_17_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix colnames(chr_17_probdist)<-colnames(chr_17_mat) rownames(chr_17_probdist)<-colnames(chr_17_mat) chr_17_js_breakpoints<-jointseg::jointSeg(chr_17_probdist,K=40)$bestBkp chr_17_js_breakpoint_labels<-colnames(cor(chr_17_mat))[chr_17_js_breakpoints] chr_17_js_breakpoint_labels } else{ print("Please install smoothie in order to run this example.") } ## ----breakpoint_plot_chr17,eval=F--------------------------------------------- # # breakpoint_plot_probdist <- chr_17_probdist %>% # cor(use="pairwise.complete.obs",method="pearson") %>% # CNVScope::signedRescale(max_cap=1) %>% # reshape2::melt() %>% # dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, # row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, # rel_prob=value) %>% # ggplot(aes(x=col_pos, # y=row_pos, # fill=rel_prob)) + geom_raster() + # theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + # scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + # ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + # labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 relationship probability") + # geom_contour(binwidth = .395, aes(z = value)) # breakpoint_plot <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% # CNVScope::signedRescale(max_cap=1) %>% # reshape2::melt() %>% # dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, # row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, # correlation=value) %>% # ggplot(aes(x=col_pos, # y=row_pos, # fill=correlation)) + geom_raster() + # theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + # scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + # ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + # labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 linear relationship domains") + # geom_contour(binwidth = .395, aes(z = value)) # breakpoint_plot_corr_diff <- ((chr_17_mat %>% cor(use="pairwise.complete.obs",method="spearman"))-(chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson"))) %>% # CNVScope::signedRescale(max_cap=1) %>% # reshape2::melt() %>% # dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, # row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, # corr_diff=value) %>% # ggplot(aes(x=col_pos, # y=row_pos, # fill=corr_diff)) + geom_raster() + # theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + # scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + # ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + # labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 nonlinear (red) relationship regions, inferred by nonlinear-linear correlation difference") + # geom_contour(binwidth = .395, aes(z = value)) # # breakpoint_plot # breakpoint_plot_probdist # breakpoint_plot_corr_diff # ## ----plotly_blca,eval=F------------------------------------------------------- # library(plotly) # breakpoint_plot %>% plotly::ggplotly() ## ----3D_blca,eval=F----------------------------------------------------------- # chr_17_long <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% # CNVScope::signedRescale(max_cap=1) %>% # reshape2::melt() %>% # dplyr::mutate(col_pos=as.numeric(reshape2::colsplit(Var1,"_",c("chr","start","end"))$start), # row_pos=as.numeric(reshape2::colsplit(Var2,"_",c("chr","start","end"))$start), # correlation=value) %>% dplyr::select(col_pos,row_pos,correlation) # plot_ly(data = chr_17_long, x=chr_17_long$col_pos,y=chr_17_long$row_pos,z=chr_17_long$correlation,color=c(0,0.5,1),colors=circlize::colorRamp(c("blue","white","red")),intensity=chr_17_long$correlation,type = "mesh3d") ## ----skcm_files,eval=F,echo=T------------------------------------------------- # if(!dir.exists("extracted_skcm_data")){dir.create("extracted_skcm_data")} # untar("gdc_download_skcm.tar.gz",exdir = "./extracted_skcm_data") # tcga_files_skcm<-list.files(path = "extracted_skcm_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) # print(tcga_files_skcm) ## ----eval=F,echo=T------------------------------------------------------------ # #ptm <- proc.time() # #doMC::registerDoMC() # #doParallel::registerDoParallel() # sample_aggregated_segvals_skcm<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_skcm,format = "TCGA",parallel = T) # #proc.time() - ptm # saveRDS(sample_aggregated_segvals_skcm,"skcm_sample_matched_input_matrix.rds") ## ----eval=F,echo=T------------------------------------------------------------ # if(!dir.exists("extracted_prad_data")){dir.create("extracted_prad_data") # untar("gdc_download_prad.tar.gz",exdir = "extracted_prad_data")} # tcga_files_prad<-list.files(path = "extracted_prad_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) # print(tcga_files_prad) # ## ----eval=F,echo=T------------------------------------------------------------ # sample_aggregated_segvals_output_full_prad<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_prad,format = "TCGA",binsize=1e6) # saveRDS(sample_aggregated_segvals_output_full_prad,"PRAD_sample_matched_input_matrix.rds") #
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/additonal_examples.R
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Additional Examples" author: "James Dalgleish" date: "7/9/2019" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Additional Visualization Examples} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(CNVScope) options(scipen=999) library(magrittr) ``` This vignette was created to showcase additional cancers and also to highlight additional, less-known features of the package. Additional examples: [TCGA-BLCA](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-BLCA%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TARGET-AML](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TARGET-AML%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TARGET-SKCM](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-SKCM%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TCGA-PRAD](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-PRAD%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) #Acute Myeloid Leukemia (AML) There are fewer TARGET datasets available than TCGA. We'll do AML first. We've named our downloaded archive gdc_download_aml.tar.gz. ```{r aml_files,eval=F,echo=T} if(!dir.exists("extracted_aml_data")){dir.create("extracted_aml_data")} untar("gdc_download_aml.tar.gz",exdir = "./extracted_aml_data") target_files_aml<-list.files(path = "extracted_aml_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) print(target_files_aml) ``` ```{r,eval=F,echo=T} sample_aggregated_segvals_aml<-formSampleMatrixFromRawGDCData(tcga_files = target_files_aml,format = "TARGET") saveRDS(sample_aggregated_segvals_aml,"aml_sample_matched_input_matrix.rds") ``` Now that we've created and saved the AML matrix, let's visualize it with a quick correlation map of a single chromosome, chromosome 7, the location of the BRAF gene and nearby EZH2 gene. The BRAF gene (chr7:140419127-140624564) is a locus for recurrent copy number aberrations. Reference: Tarlock et al.; Recurrent Copy Number Variants Are Highly Prevalent in Acute Myeloid Leukemia. Blood 2017; 130 (Supplement 1): 3800.). Some of the bins are invariant and correlation requires that the standard deviation be a value other than zero (otherwise correlation cannot be calculated). We will remove them in a couple steps and transpose the matrix, making the columns the bins and the samples the rows. ```{r aml_plots, eval=T,echo=T} sample_aggregated_segvals_aml<-readRDS("aml_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_aml[stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_7_mat<-sample_aggregated_segvals_aml[(stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7") & rownames(sample_aggregated_segvals_aml) %in% setdiff(rownames(sample_aggregated_segvals_aml),names(invariant_bins))),] %>% t() ``` Now we'll perform correlation on the plot. ```{r chr7_cor} chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` We could then utilize our domain finding function to find the borders of domains in the matrix. There's an obvious distruption near the left center of the matrix (see the blue streaks of anticorrelation?). ```{r breakpoints} if((Sys.info()['sysname'] == "Linux" | Sys.info()['sysname'] == "Windows")&requireNamespace("HiCseg",quietly = T)){ colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoint_labels} else { colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoint_labels } ``` Now, we'll make another plot, only labeling the breakpoints. ```{r breakpoint_plot} chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank(),axis.title = element_blank()) + scale_x_continuous(breaks=breakpoints,labels=breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` Finally, let's try our probability weighting function for the matrix and see if we can find clearer regions of association. We'll also try another segmentation algorithm with the jointseg package. In most cases, you can achieve a definite speed increase using the parallel=T option. We have disabled it to build the vignette without warnings. ```{r probdist} if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_7_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix js_breakpoints<-jointseg::jointSeg(chr_7_probdist,K=20)$bestBkp js_breakpoint_labels<-colnames(chr_7_mat)[js_breakpoints] } else{ print("Please install smoothie in order to run this example.") } ``` We'll notice that using this combination of techniques, we've managed to pick up an AML driver between domain endpoints, in a large region where the association is less than expected (as determined by the calcCNVprobdist function). Within the region of 115 and 120Mb lies the MET gene, where an LOH was detected in the paper. It's a pretty obvious signature, perhaps the most obvious in the whole plot. Further, we suggest that there is an minor signal off the diagonal in the region of this and the location of BRAF (chr7:140419127-140624564, another recurrent CNV in the paper), associated with the border of a region near 16-24Mb.Within this area are the PMS(a tumor suppressor), RAC1 (oncogene), and ETV1 (oncogene). PMS2 alteration has been implicated in AML previously (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3734905/). We don't suggest that this tool is perfect and will rapidly make clear all cancer drivers associated with CNVs, but we suggest that it can, with other tools, add to your investigative toolbox to substantiate known drivers and to elucidate new ones. ```{r plot_probdist} if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist %>% # CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=Var1, y=Var2, fill=value)) + geom_tile() + # theme(axis.title = element_blank()) + #axis.text.x = element_blank(),axis.text.y=element_blank(), theme(axis.text.x = element_text(angle=90, hjust=1), axis.text.y = element_text(angle=0, hjust=1) ,axis.title = element_blank()) + # scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + # scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) } else{ print("Please install smoothie in order to run this example.") } ``` Using the below code, we can find a few more genes to explore that are known to be associated with AML in the COSMIC cancer gene census. this requires the CNVScope Public Data package to be installed properly. ```{r census_data,eval=F} census_data <- readRDS(system.file("censushg19.rds",package = "CNVScope")) census_data[census_data@seqnames %in% "chr7"] %>% sort() %>% tibble::as_tibble() %>% janitor::clean_names() %>% dplyr::select(seqnames,start,end,gene_symbol,tumour_types_somatic,tumour_types_germline) %>% dplyr::filter(start>60e6,stringr::str_detect(string = tumour_types_somatic,pattern="AML") | stringr::str_detect(string = tumour_types_germline,pattern="AML")) ``` #TCGA Bladder Cancer (BLCA) Now for a TCGA set, let's try a bladder cancer dataset: ```{r blca_files,eval=F,echo=T} if(!dir.exists("extracted_blca_data")){dir.create("extracted_blca_data") untar("gdc_download_blca.tar.gz",exdir = "./extracted_blca_data")} tcga_files_blca<-list.files(path = "extracted_blca_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_blca) ``` ```{r,eval=F,echo=T} sample_aggregated_segvals_blca<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_blca,format = "TCGA",parallel=T) saveRDS(sample_aggregated_segvals_blca,"blca_sample_matched_input_matrix.rds") ``` For bladder cancer (BLCA), we'll look at ERBB2, mentioned in [this article](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3962515/) as having amplifications in up to 5% of samples. We may not be able to detect it in our sample, but we can prove that our method did indeed process the TCGA data format. ```{r blca_plots, eval=T,echo=T} sample_aggregated_segvals_blca<-readRDS("blca_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_blca[stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_17_mat<-sample_aggregated_segvals_blca[(stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17") & rownames(sample_aggregated_segvals_blca) %in% setdiff(rownames(sample_aggregated_segvals_blca),names(invariant_bins))),] %>% t() ``` Now we'll perform correlation on the plot. ```{r chr17_cor} chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` We can follow the same procedure with this case, but it may not yield precisely the same dramatic result. We expect with most investigative tools this is the case and do not guarantee a winning result each time. In this case, we are attempting to show that the tool DOES work on TCGA data. We can confidently say that we created a matched sample matrix with the TCGA format as well as the TARGET format. We recommend setting parallel=T on the next line for speed, but for the purposes of compatibility with all systems, we have set the code to run without additional cores in this example. ```{r probdist_chr17} if(requireNamespace("smoothie",quietly=T)){ chr_17_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_17_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix colnames(chr_17_probdist)<-colnames(chr_17_mat) rownames(chr_17_probdist)<-colnames(chr_17_mat) chr_17_js_breakpoints<-jointseg::jointSeg(chr_17_probdist,K=40)$bestBkp chr_17_js_breakpoint_labels<-colnames(cor(chr_17_mat))[chr_17_js_breakpoints] chr_17_js_breakpoint_labels } else{ print("Please install smoothie in order to run this example.") } ``` With the coordinates for *ERBB2* at chr17:37844167-37886679, we find a precise match in the second breakpoint. It lies exactly within it. All the breakpoints are plotted in the plot below. Beyond that, the centromere point is precisely identifiable. It not only shows pathophysiology, but illustrates clearly the position of the chromosomal landmark as the border between the largest domains. [Click here](https://grch37.ensembl.org/Homo_sapiens/Location/View?r=17:23000001-25000000) to view the ensembl-75 view of this position. It is clear that it precisely matches the location of the centromere. The contours have been added to highlight the domains. We have also plotted the weighted probability of relationships. Finally, we took an approach of investigating correlation differences between spearman (nonlinear) and pearson (linear) correlation. This tends to highlight a specific region in the 37Mb area of the chromosome (near ERBB2). ```{r breakpoint_plot_chr17,eval=F} breakpoint_plot_probdist <- chr_17_probdist %>% # cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, rel_prob=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=rel_prob)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 relationship probability") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, correlation=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=correlation)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 linear relationship domains") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot_corr_diff <- ((chr_17_mat %>% cor(use="pairwise.complete.obs",method="spearman"))-(chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson"))) %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, corr_diff=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=corr_diff)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 nonlinear (red) relationship regions, inferred by nonlinear-linear correlation difference") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot breakpoint_plot_probdist breakpoint_plot_corr_diff ``` ![BLCA, chromosome 17, with contours](./chr17_contour_blca.png) ![BLCA, chromosome 17, probdist](./chr17_probdist_blca.png) ![3D BLCA, chromosome 17, nonlinear-linear relationship differences](./chr17_corr_diff_blca.png) For an interactive plot of the bladder cancer interactome (in contour and 3D), try the following. WebGL is required for this exercise. Enable it under the general->advanced RStudio options if you have not already (see https://community.rstudio.com/t/webgl-is-not-supported-by-your-browser-plotly/13962/7). ```{r plotly_blca,eval=F} library(plotly) breakpoint_plot %>% plotly::ggplotly() ``` ```{r 3D_blca,eval=F} chr_17_long <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=as.numeric(reshape2::colsplit(Var1,"_",c("chr","start","end"))$start), row_pos=as.numeric(reshape2::colsplit(Var2,"_",c("chr","start","end"))$start), correlation=value) %>% dplyr::select(col_pos,row_pos,correlation) plot_ly(data = chr_17_long, x=chr_17_long$col_pos,y=chr_17_long$row_pos,z=chr_17_long$correlation,color=c(0,0.5,1),colors=circlize::colorRamp(c("blue","white","red")),intensity=chr_17_long$correlation,type = "mesh3d") ``` We'll briefly demonstrate that SKCM also works with our relationship modeling toolkit. ```{r skcm_files,eval=F,echo=T} if(!dir.exists("extracted_skcm_data")){dir.create("extracted_skcm_data")} untar("gdc_download_skcm.tar.gz",exdir = "./extracted_skcm_data") tcga_files_skcm<-list.files(path = "extracted_skcm_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_skcm) ``` ```{r,eval=F,echo=T} #ptm <- proc.time() #doMC::registerDoMC() #doParallel::registerDoParallel() sample_aggregated_segvals_skcm<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_skcm,format = "TCGA",parallel = T) #proc.time() - ptm saveRDS(sample_aggregated_segvals_skcm,"skcm_sample_matched_input_matrix.rds") ``` We have timed this as 142 seconds on a dual core i7, even with the recent performance-reducing patches. The parallel flag will automatically register the parallel backend for you using the DoParallel framework and use the maximum amount of cores. We will also show another TCGA example for PRAD: ```{r,eval=F,echo=T} if(!dir.exists("extracted_prad_data")){dir.create("extracted_prad_data") untar("gdc_download_prad.tar.gz",exdir = "extracted_prad_data")} tcga_files_prad<-list.files(path = "extracted_prad_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_prad) ``` With the full list of input files from the GDC, these can then be simply loaded into a function that will read all of them, sample match them, and aggregate the data into a bin-sample matrix. This matrix can then be saved into the fast, space efficient, RDS filetype. ```{r,eval=F,echo=T} sample_aggregated_segvals_output_full_prad<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_prad,format = "TCGA",binsize=1e6) saveRDS(sample_aggregated_segvals_output_full_prad,"PRAD_sample_matched_input_matrix.rds") ```
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/additonal_examples.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(magrittr) ## ----nbl_data_files_png,echo=F------------------------------------------------ knitr::include_graphics("NBL_data_files.png", dpi = 10) ## ----download_from_cart,echo=F------------------------------------------------ knitr::include_graphics("download_from_cart.png", dpi = 10) ## ----nbl_files,eval=F,echo=T-------------------------------------------------- # if(!dir.exists("extracted_nbl_data")){dir.create("extracted_nbl_data")} # untar("gdc_download_20180801_160142.tar.gz",exdir = "extracted_nbl_data") # target_files_nbl<-list.files(path = "extracted_nbl_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) # print(target_files_nbl) # ## ----eval=F,echo=T------------------------------------------------------------ # sample_aggregated_segvals_output_full<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl,format = "TARGET") # saveRDS(sample_aggregated_segvals_output_full,"NBL_sample_matched_input_matrix.rds") # ## ---- eval=F,echo=T----------------------------------------------------------- # nbl_custom_input_matrix<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, # format = "custom",binsize = 1e6,freadskip = 14,parallel=F,debug=F, # sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", # startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") # saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix.rds") # ## ---- echo=T,eval=F----------------------------------------------------------- # nbl_custom_input_matrix_hd<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, # format = "custom",binsize = 2.5e5,freadskip = 14,parallel=T,debug=F, # sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", # startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") # saveRDS(nbl_custom_input_matrix_hd,"NBL_custom_sample_matched_input_matrix_2.5e5binsize_parallel.rds") # ## ---- echo=T,eval=F----------------------------------------------------------- # nbl_custom_input_matrix_ld<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, # format = "custom",binsize = 1e8,freadskip = 14,parallel=F,debug=F, # sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", # startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") # saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix_1e8binsize.rds") #
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/create_input_matrix.R
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Creating Neuroblastoma Input Matrices from public GDC (TARGET) data" author: "James Dalgleish" date: "August 1, 2018" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Creating the TARGET Input matrix from public data} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(magrittr) ``` We begin by obtaining TARGET low-pass neuroblastoma data (NBL) from the GDC archive. Please note: TARGET_NBL_WGS_CNVLOH.tsv is a clinical metadata file and therefore not compatible with the ensuing functions to extract segment data. We have also chosen to use only a single comparison type (NormalVsPrimary) to ensure comparability and compatibility with the data. Users can download the tar.gz file and remove the tsv files into a single folder. We have already done that here. The source for these files is located [here](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TARGET-NBL%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:101,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) ```{r nbl_data_files_png,echo=F} knitr::include_graphics("NBL_data_files.png", dpi = 10) ``` The user simply chooses to add all the files to the cart, then click the black cart button in the top right hand corner. ```{r download_from_cart,echo=F} knitr::include_graphics("download_from_cart.png", dpi = 10) ``` On the cart page, click download, then cart. It will be downloaded as a tar.gz archive. You can untar it with R, but the files will be in a complex set of directories. It is best to list the files recursively with criteria that will obtain the segment files in tsv format, with that single comparison of interest. ```{r nbl_files,eval=F,echo=T} if(!dir.exists("extracted_nbl_data")){dir.create("extracted_nbl_data")} untar("gdc_download_20180801_160142.tar.gz",exdir = "extracted_nbl_data") target_files_nbl<-list.files(path = "extracted_nbl_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) print(target_files_nbl) ``` With the full list of input files from the GDC, these can then be simply loaded into a function that will read all of them, sample match them, and aggregate the data into a bin-sample matrix. This matrix can then be saved into the fast, space efficient, RDS filetype. ```{r,eval=F,echo=T} sample_aggregated_segvals_output_full<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl,format = "TARGET") saveRDS(sample_aggregated_segvals_output_full,"NBL_sample_matched_input_matrix.rds") ``` #Custom Data For those who have a desire to add custom data, we have provided a series of options that allow custom data, provided that a CSV file has a single column with copy number values. We will show that the same result can be obtained with the correct options, specifying the column of interest, using a completely different section of the function. Use the format="custom" option and be sure to specify the names following columns in your text file: *copy number column *chromosome, *start position *end position. In this case, we have also selected freadskip=14 as there are 14 lines before the tabular data begins. The sample pattern is a regular expression set to grab the sample name within the filename, setting sample_pat="" will grab the complete filename and let this denote unique samples. We suggest shorter sample than this, however. A sample column name (sample_col) can also be specified to specify samples on each line of the input files instead of using the filename or a portion thereof. For further information, please review the help files by typing: help("CNVScope::formSampleMatrixFromRawGDCData") ```{r, eval=F,echo=T} nbl_custom_input_matrix<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 1e6,freadskip = 14,parallel=F,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix.rds") ``` With this, your input matrix is complete. Next, please look at the [next vignette that details recursive linear regression and postprocessing.](create_output_matrix.html) Optional example for advanced Use: A high definition version is quite simple to do, using the above options, on TARGET, TCGA, or custom format data. Simply decrease the binsize to increase the resolution. In chromosome 2, we have found that the median bin size is 731069 and that smaller chromosomes benefit from binsize reduction more than larger chromosomes (except for chromosome 1). In this case, we decrease our binsize by a factor of 4, making the total number of values in the matrix increase by a factor of 16. It will take longer than the typical time. ```{r, echo=T,eval=F} nbl_custom_input_matrix_hd<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 2.5e5,freadskip = 14,parallel=T,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix_hd,"NBL_custom_sample_matched_input_matrix_2.5e5binsize_parallel.rds") ``` In the opposite manner, if you wanted to quickly create a low density version, this would be the way to go about it. It will make the input matrix creation process proceed a great deal faster and will require less RAM to display an interactive map. ```{r, echo=T,eval=F} nbl_custom_input_matrix_ld<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 1e8,freadskip = 14,parallel=F,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix_1e8binsize.rds") ``` A demonstration is below of varying resolutions, using correlation based maps (for space reasons): Examples of chr11 are shown below, in increasing order of resolution (1e8,1e7,1e6,2.5e5,1e5 binsize). Note that the resolution doesn't dramatically improve after 1e6, despite the time and computational effort required to generate it. ![chr11_1e8](chr11_1e8_nbl.jpg) ![chr11_1e7](chr11_1e7_nbl.jpg) ![chr11_1e6](chr11_1e6_nbl.jpg) ![chr11_2.5e5](chr11_2.5e5_nbl.jpg) ![chr11_1e5](chr11_1e5_nbl.jpg) A note on the resolution-performance/stability trade-off: We would recommend not decreasing the bin size below the mean width of the region for which you hope to gain sharper images. Further, increasing higher resolution beyond what the program was designed may result in instability. We chose the 1MB bin size because, after many iterations, it creates maps that enable visual appreciation of copy number regions, but is reasonably responsive and stable for both client and server. Higher dimension matrices were attempted in earlier versions of development, but would cause browser crashes in larger chromosomal maps. Further, higher dimension maps limit usage on multi-user shiny servers. So, we still recommend the 1MB binsize for the aforementioned reasons.
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/create_input_matrix.Rmd
## ----setup, include=F--------------------------------------------------------- knitr::opts_chunk$set(echo = F) if(Sys.info()['sysname']=="Windows"){groupdir<-"W:/"} else {groupdir<-"/data/CCRBioinfo/"} #knitr::opts_knit$set(root.dir = paste0(groupdir,"dalgleishjl/hicnv/hicnv/vignettes/")) # #setwd("~") #nbl_input_matrix<-readRDS("NBLTCGA_merged_df_aggregated_by_bin_fixed_comparisonv4.rds") #getwd() nbl_input_matrix<-readRDS("NBL_sample_matched_input_matrix.rds") #nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") ## ----echo=T,warning=F,message=F----------------------------------------------- library(CNVScope) ## ----echo=T------------------------------------------------------------------- nbl_input_matrix[1:5,1:5] ## ---- eval=F,echo=T----------------------------------------------------------- # library(parallel) # nbl_slurm_object_test_zero_removed<-calcVecLMs(bin_data =as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])),use_slurm = T,n_nodes = 975,memory_per_node = "32g",walltime = "04:00:00",cpus_on_each_node = 2,job_finished = F,slurmjob = NULL) # ## ---- eval=F,echo=T----------------------------------------------------------- # # saveRDS(nbl_slurm_object_test_zero_removed,"nbl_slurm_object_test_zero_removed.rds") ## ---- eval=F,echo=T----------------------------------------------------------- # library(matrixStats) # nbl_result_matrix<-matrix(as.numeric(unlist( get_slurm_out(nbl_slurm_object_test_zero_removed))),ncol=ncol(as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])) ) ) # # saveRDS(nbl_result_matrix,"nbl_result_matrix_full.rds") # saveRDS(nbl_result_matrix[1:25,1:25],"nbl_result_matrix_small.rds") # ## ---- echo=F,include=T,eval=F------------------------------------------------- # #source download locations # #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_full.rds?raw=true","nbl_result_matrix_full.rds") # #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_sign_corrected.rds","nbl_result_matrix_sign_corrected.rds") # #download.file("https://www.dropbox.com/s/sevuhos976c6guu/nbl_result_matrix_full.rds?dl=1","nbl_result_matrix_full.rds") # #download.file("https://www.dropbox.com/s/85hii5cd5epuuby/nbl_result_matrix_sign_corrected.rds?dl=1","nbl_result_matrix_sign_corrected.rds") # nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") # nbl_result_matrix_sign_corrected<-readRDS("nbl_result_matrix_sign_corrected.rds") ## ----------------------------------------------------------------------------- ## ----eval=T,echo=T------------------------------------------------------------ nbl_result_matrix_small<-readRDS("nbl_result_matrix_small.rds") nbl_result_matrix_small[1:5,1:5] nbl_result_matrix_sign_corrected<-postProcessLinRegMatrix(input_matrix = nbl_input_matrix[1:25,1:25],LM_mat = nbl_result_matrix_small,cor_type = "pearson",inf_replacement_val = 300) nbl_result_matrix_sign_corrected[1:5,1:5] ## ----echo=T------------------------------------------------------------------- nbl_result_matrix_sign_corrected[1:5,1:5] if (requireNamespace("ComplexHeatmap", quietly = TRUE) & requireNamespace("circlize", quietly = TRUE)) { ComplexHeatmap::Heatmap(signedRescale(as.matrix(nbl_result_matrix_sign_corrected)), col = circlize::colorRamp2(c(0,0.5,1),c("blue","white","red")), cluster_rows = F,cluster_columns = F, show_heatmap_legend = F, show_column_names = F, show_row_names = F) } else { print("ComplexHeatmap not installed.\n Please install ComplexHeatmap in order to create this plot.") } ## ----eval=F,echo=T------------------------------------------------------------ # if(!dir.exists("nbl_matrix_set")){dir.create("nbl_matrix_set")} # #setwd("nbl_matrix_set") # doMC::registerDoMC() # #use ONLY the whole matrix with chromosomes 1-X, not the small subset provided for documentation purposes. # createChromosomalMatrixSet(whole_genome_mat=nbl_result_matrix_sign_corrected,output_dir="nbl_matrix_set",prefix="nbl_") ## ----echo=T,eval=F------------------------------------------------------------ # list.files("nbl_matrix_set")
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/create_output_matrix.R
--- title: "Creating Linear Regression Matrices from Segment Data" author: "James Dalgleish" date: "July 25, 2018" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Linear Regression/Postprocess} %\VignetteEncoding{UTF-8} --- ```{r setup, include=F} knitr::opts_chunk$set(echo = F) if(Sys.info()['sysname']=="Windows"){groupdir<-"W:/"} else {groupdir<-"/data/CCRBioinfo/"} #knitr::opts_knit$set(root.dir = paste0(groupdir,"dalgleishjl/hicnv/hicnv/vignettes/")) # #setwd("~") #nbl_input_matrix<-readRDS("NBLTCGA_merged_df_aggregated_by_bin_fixed_comparisonv4.rds") #getwd() nbl_input_matrix<-readRDS("NBL_sample_matched_input_matrix.rds") #nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") ``` From our previous work, we created a small input matrix, with segmented 1Mb regions as our row labels and with sample names from TARGET data as our column labels. We can read that in using the following code: ```{r,echo=T,warning=F,message=F} library(CNVScope) ``` ```{r,echo=T} nbl_input_matrix[1:5,1:5] ``` calcVecLMs() comes standard in the CNVScope package. It allows calculation of the matrix with parallel processing using mclapply, but larger matrices will require a bit more power, and thus we use slurm_apply, from the rslurm pacakge to distribute the work over multiple cores. Our particular establishment has a limit approximating 1000 jobs, so it's best not to use more than that unless your cluster will support it. Conversely, you should use less if you can't submit that many individual jobs in a job array in your cluster. In this particular example, I've removed rows where there is no segmentation data, across the board using colSums(). ```{r, eval=F,echo=T} library(parallel) nbl_slurm_object_test_zero_removed<-calcVecLMs(bin_data =as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])),use_slurm = T,n_nodes = 975,memory_per_node = "32g",walltime = "04:00:00",cpus_on_each_node = 2,job_finished = F,slurmjob = NULL) ``` Saving the slurm object is essential as it will be required when you retrieve your results. ```{r, eval=F,echo=T} saveRDS(nbl_slurm_object_test_zero_removed,"nbl_slurm_object_test_zero_removed.rds") ``` Retrieving the data is as simple as using rslurm::get_slurm_out() on the saved slurm object and coercing it into a matrix with the original number of columns. The slurm object must have been read with readRDS() previously or done in the same session. For the purposes of making this tutorial, we have chosen to work on a small version of the whole matrix to make 5MB CRAN documentation limits. Previous versions of the tutorial included the whole matrix, but we leave that to the user to construct at this point. For reproducibility, one can find the original full data matrix at https://github.com/jamesdalg/CNVScope_public_data. ```{r, eval=F,echo=T} library(matrixStats) nbl_result_matrix<-matrix(as.numeric(unlist( get_slurm_out(nbl_slurm_object_test_zero_removed))),ncol=ncol(as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])) ) ) saveRDS(nbl_result_matrix,"nbl_result_matrix_full.rds") saveRDS(nbl_result_matrix[1:25,1:25],"nbl_result_matrix_small.rds") ``` ```{r, echo=F,include=T,eval=F} #source download locations #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_full.rds?raw=true","nbl_result_matrix_full.rds") #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_sign_corrected.rds","nbl_result_matrix_sign_corrected.rds") #download.file("https://www.dropbox.com/s/sevuhos976c6guu/nbl_result_matrix_full.rds?dl=1","nbl_result_matrix_full.rds") #download.file("https://www.dropbox.com/s/85hii5cd5epuuby/nbl_result_matrix_sign_corrected.rds?dl=1","nbl_result_matrix_sign_corrected.rds") nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") nbl_result_matrix_sign_corrected<-readRDS("nbl_result_matrix_sign_corrected.rds") ``` You'll notice that there are no signs in this matrix (they're just negative log p-values, which are always positive). We'll have to assign signs by the correlation matrix next, then we will chunk the large matrix into smaller, flattened matrices that the shiny app can handle. For lower capacity machines/clusters, an alternative may be using the cor function. In order to perfrom sign correction,fix the "Inf values" to a viewable value, and restore column and row names, postProcessLinRegMatrix() can be applied, yielding a final full matrix of the entire genome (Chr1->ChrX on both Axes). 300 has been used, although something a bit smaller will reduce saturation issues depending on the disparity between the lowest values in the matrix and 300. We'll plot the result below, using complexheatmap and a custom designed function that takes large asymmetric distributions of values and pushes them into the [0,1] colorspace with white at 0.5, corresponding to zero, values between 0 and 0.5 corresponding to negative values, and values from 0.5 to 1 corresponding to positive values (signedRescale). ```{r} ``` ```{r,eval=T,echo=T} nbl_result_matrix_small<-readRDS("nbl_result_matrix_small.rds") nbl_result_matrix_small[1:5,1:5] nbl_result_matrix_sign_corrected<-postProcessLinRegMatrix(input_matrix = nbl_input_matrix[1:25,1:25],LM_mat = nbl_result_matrix_small,cor_type = "pearson",inf_replacement_val = 300) nbl_result_matrix_sign_corrected[1:5,1:5] ``` ```{r,echo=T} nbl_result_matrix_sign_corrected[1:5,1:5] if (requireNamespace("ComplexHeatmap", quietly = TRUE) & requireNamespace("circlize", quietly = TRUE)) { ComplexHeatmap::Heatmap(signedRescale(as.matrix(nbl_result_matrix_sign_corrected)), col = circlize::colorRamp2(c(0,0.5,1),c("blue","white","red")), cluster_rows = F,cluster_columns = F, show_heatmap_legend = F, show_column_names = F, show_row_names = F) } else { print("ComplexHeatmap not installed.\n Please install ComplexHeatmap in order to create this plot.") } ``` Finally, the whole genome matrix is too big to plot interactively without crashing most browsers using the plotly package. We'll need to break things apart a bit. A final function will write chromosomal pair heatmaps to disk with genes from ensembl (hg19 coordinates) in encoded for each square in the matrix. Please only use this function on the WHOLE matrix, not on the small subset we have provided in documentation. ```{r,eval=F,echo=T} if(!dir.exists("nbl_matrix_set")){dir.create("nbl_matrix_set")} #setwd("nbl_matrix_set") doMC::registerDoMC() #use ONLY the whole matrix with chromosomes 1-X, not the small subset provided for documentation purposes. createChromosomalMatrixSet(whole_genome_mat=nbl_result_matrix_sign_corrected,output_dir="nbl_matrix_set",prefix="nbl_") ``` ```{r,echo=T,eval=F} list.files("nbl_matrix_set") ``` There should be 529 of these particular files upon running the code. If there are not, don't hesitate to run the code again. It can happen on a cluster. It is built to detect when chromosomal matrix is already written to disk.
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/create_output_matrix.Rmd
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) library(CNVScope) library(magrittr) ## ---- knit='asis'------------------------------------------------------------- if (requireNamespace("pwr", quietly = TRUE)) { library(pwr) large.effect.size<-pwr::cohen.ES(test="f2",size="large")$effect.size large.effect.size f2.res<-pwr::pwr.f2.test(u = 1, f2 = large.effect.size/(1 - large.effect.size), sig.level = 0.05, power = 0.8) f2.res n<-ceiling(f2.res$v+f2.res$u+1) n } else { print("Please install the pwr package in order to build this vignette.") } ## ----------------------------------------------------------------------------- if (requireNamespace("pwr", quietly = TRUE)) { pwr::pwr.r.test(r = 0.3, sig.level = 0.01, power = 0.8, alternative = "greater")$n %>% ceiling() } else { print("Please install the pwr package in order to build this vignette.") }
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/power_analysis.R
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Power Analysis" author: "James Dalgleish" date: "7/8/2019" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Power Analysis} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(CNVScope) library(magrittr) ``` Power Analysis: As this is a visual tool, the threshold for finding visual signatures is a visual process, and knowing how many samples is appropriate is a good question. One can use standard power and sample size calculators to determine the number of samples if the user knows the threshold they are trying to detect and at what power. As a simple exercise using the *pwr* package, we could find the number of samples to detect a linear association at the 0.05 level, with 80% power (standard in most textbooks and on the pwr vignette). Mind you, this is only the linear regression for a single point (one bin against another bin). ```{r, knit='asis'} if (requireNamespace("pwr", quietly = TRUE)) { library(pwr) large.effect.size<-pwr::cohen.ES(test="f2",size="large")$effect.size large.effect.size f2.res<-pwr::pwr.f2.test(u = 1, f2 = large.effect.size/(1 - large.effect.size), sig.level = 0.05, power = 0.8) f2.res n<-ceiling(f2.res$v+f2.res$u+1) n } else { print("Please install the pwr package in order to build this vignette.") } ``` We see that we need `r if (requireNamespace("pwr", quietly = TRUE)) {n}` samples to properly obtain a significance level of 0.05 for a given bin pair. In practice, however, we suggest using more than this. Samples over 100 (like the NBL data) achieve the results that most clearly show a good CNV signature. One could also do a power analysis from the correlation test within the *pwr* package as well. Wishing to detect a 0.3 difference (which is visually detectable for the average user) with 80% power requires over 100 samples. This seems to be the best approach. ```{r} if (requireNamespace("pwr", quietly = TRUE)) { pwr::pwr.r.test(r = 0.3, sig.level = 0.01, power = 0.8, alternative = "greater")$n %>% ceiling() } else { print("Please install the pwr package in order to build this vignette.") } ```
/scratch/gouwar.j/cran-all/cranData/CNVScope/inst/doc/power_analysis.Rmd
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Additional Examples" author: "James Dalgleish" date: "7/9/2019" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Additional Visualization Examples} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(CNVScope) options(scipen=999) library(magrittr) ``` This vignette was created to showcase additional cancers and also to highlight additional, less-known features of the package. Additional examples: [TCGA-BLCA](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-BLCA%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TARGET-AML](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TARGET-AML%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TARGET-SKCM](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-SKCM%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) [TCGA-PRAD](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TCGA-PRAD%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:0,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) #Acute Myeloid Leukemia (AML) There are fewer TARGET datasets available than TCGA. We'll do AML first. We've named our downloaded archive gdc_download_aml.tar.gz. ```{r aml_files,eval=F,echo=T} if(!dir.exists("extracted_aml_data")){dir.create("extracted_aml_data")} untar("gdc_download_aml.tar.gz",exdir = "./extracted_aml_data") target_files_aml<-list.files(path = "extracted_aml_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) print(target_files_aml) ``` ```{r,eval=F,echo=T} sample_aggregated_segvals_aml<-formSampleMatrixFromRawGDCData(tcga_files = target_files_aml,format = "TARGET") saveRDS(sample_aggregated_segvals_aml,"aml_sample_matched_input_matrix.rds") ``` Now that we've created and saved the AML matrix, let's visualize it with a quick correlation map of a single chromosome, chromosome 7, the location of the BRAF gene and nearby EZH2 gene. The BRAF gene (chr7:140419127-140624564) is a locus for recurrent copy number aberrations. Reference: Tarlock et al.; Recurrent Copy Number Variants Are Highly Prevalent in Acute Myeloid Leukemia. Blood 2017; 130 (Supplement 1): 3800.). Some of the bins are invariant and correlation requires that the standard deviation be a value other than zero (otherwise correlation cannot be calculated). We will remove them in a couple steps and transpose the matrix, making the columns the bins and the samples the rows. ```{r aml_plots, eval=T,echo=T} sample_aggregated_segvals_aml<-readRDS("aml_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_aml[stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_7_mat<-sample_aggregated_segvals_aml[(stringr::str_detect(rownames(sample_aggregated_segvals_aml),"chr7") & rownames(sample_aggregated_segvals_aml) %in% setdiff(rownames(sample_aggregated_segvals_aml),names(invariant_bins))),] %>% t() ``` Now we'll perform correlation on the plot. ```{r chr7_cor} chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` We could then utilize our domain finding function to find the borders of domains in the matrix. There's an obvious distruption near the left center of the matrix (see the blue streaks of anticorrelation?). ```{r breakpoints} if((Sys.info()['sysname'] == "Linux" | Sys.info()['sysname'] == "Windows")&requireNamespace("HiCseg",quietly = T)){ colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"))] breakpoint_labels} else { colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoints<-colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] %>% stringr::str_split_fixed(string = .,pattern="_",n=3) %>% as.matrix() %>% .[,2] %>% as.numeric() breakpoint_labels <- colnames(chr_7_mat)[CNVScope::getAsymmetricBlockIndices(cor(chr_7_mat,use="pairwise.complete.obs"),algorithm = "jointSeg",nb_change_max = round(min(dim(chr_7_mat))/5))$breakpoints_col] breakpoint_labels } ``` Now, we'll make another plot, only labeling the breakpoints. ```{r breakpoint_plot} chr_7_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank(),axis.title = element_blank()) + scale_x_continuous(breaks=breakpoints,labels=breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` Finally, let's try our probability weighting function for the matrix and see if we can find clearer regions of association. We'll also try another segmentation algorithm with the jointseg package. In most cases, you can achieve a definite speed increase using the parallel=T option. We have disabled it to build the vignette without warnings. ```{r probdist} if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_7_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix js_breakpoints<-jointseg::jointSeg(chr_7_probdist,K=20)$bestBkp js_breakpoint_labels<-colnames(chr_7_mat)[js_breakpoints] } else{ print("Please install smoothie in order to run this example.") } ``` We'll notice that using this combination of techniques, we've managed to pick up an AML driver between domain endpoints, in a large region where the association is less than expected (as determined by the calcCNVprobdist function). Within the region of 115 and 120Mb lies the MET gene, where an LOH was detected in the paper. It's a pretty obvious signature, perhaps the most obvious in the whole plot. Further, we suggest that there is an minor signal off the diagonal in the region of this and the location of BRAF (chr7:140419127-140624564, another recurrent CNV in the paper), associated with the border of a region near 16-24Mb.Within this area are the PMS(a tumor suppressor), RAC1 (oncogene), and ETV1 (oncogene). PMS2 alteration has been implicated in AML previously (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3734905/). We don't suggest that this tool is perfect and will rapidly make clear all cancer drivers associated with CNVs, but we suggest that it can, with other tools, add to your investigative toolbox to substantiate known drivers and to elucidate new ones. ```{r plot_probdist} if(requireNamespace("smoothie",quietly=T)){ chr_7_probdist %>% # CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=Var1, y=Var2, fill=value)) + geom_tile() + # theme(axis.title = element_blank()) + #axis.text.x = element_blank(),axis.text.y=element_blank(), theme(axis.text.x = element_text(angle=90, hjust=1), axis.text.y = element_text(angle=0, hjust=1) ,axis.title = element_blank()) + # scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + # scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_x_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + scale_y_continuous(breaks=js_breakpoints,labels=js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) } else{ print("Please install smoothie in order to run this example.") } ``` Using the below code, we can find a few more genes to explore that are known to be associated with AML in the COSMIC cancer gene census. this requires the CNVScope Public Data package to be installed properly. ```{r census_data,eval=F} census_data <- readRDS(system.file("censushg19.rds",package = "CNVScope")) census_data[census_data@seqnames %in% "chr7"] %>% sort() %>% tibble::as_tibble() %>% janitor::clean_names() %>% dplyr::select(seqnames,start,end,gene_symbol,tumour_types_somatic,tumour_types_germline) %>% dplyr::filter(start>60e6,stringr::str_detect(string = tumour_types_somatic,pattern="AML") | stringr::str_detect(string = tumour_types_germline,pattern="AML")) ``` #TCGA Bladder Cancer (BLCA) Now for a TCGA set, let's try a bladder cancer dataset: ```{r blca_files,eval=F,echo=T} if(!dir.exists("extracted_blca_data")){dir.create("extracted_blca_data") untar("gdc_download_blca.tar.gz",exdir = "./extracted_blca_data")} tcga_files_blca<-list.files(path = "extracted_blca_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_blca) ``` ```{r,eval=F,echo=T} sample_aggregated_segvals_blca<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_blca,format = "TCGA",parallel=T) saveRDS(sample_aggregated_segvals_blca,"blca_sample_matched_input_matrix.rds") ``` For bladder cancer (BLCA), we'll look at ERBB2, mentioned in [this article](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3962515/) as having amplifications in up to 5% of samples. We may not be able to detect it in our sample, but we can prove that our method did indeed process the TCGA data format. ```{r blca_plots, eval=T,echo=T} sample_aggregated_segvals_blca<-readRDS("blca_sample_matched_input_matrix.rds") invariant_bins<-which((sample_aggregated_segvals_blca[stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17"),] %>% t() %>% as.data.frame() %>% sapply(sd))==0) chr_17_mat<-sample_aggregated_segvals_blca[(stringr::str_detect(rownames(sample_aggregated_segvals_blca),"chr17") & rownames(sample_aggregated_segvals_blca) %in% setdiff(rownames(sample_aggregated_segvals_blca),names(invariant_bins))),] %>% t() ``` Now we'll perform correlation on the plot. ```{r chr17_cor} chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% ggplot(aes(x=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, y=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, fill=value)) + geom_raster() + theme(axis.text.x = element_blank(),axis.text.y=element_blank(),axis.title = element_blank()) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) ``` We can follow the same procedure with this case, but it may not yield precisely the same dramatic result. We expect with most investigative tools this is the case and do not guarantee a winning result each time. In this case, we are attempting to show that the tool DOES work on TCGA data. We can confidently say that we created a matched sample matrix with the TCGA format as well as the TARGET format. We recommend setting parallel=T on the next line for speed, but for the purposes of compatibility with all systems, we have set the code to run without additional cores in this example. ```{r probdist_chr17} if(requireNamespace("smoothie",quietly=T)){ chr_17_probdist <- CNVScope::calcCNVKernelProbDist(cor(chr_17_mat,use="pairwise.complete.obs"),parallel=F)$percentile_matrix colnames(chr_17_probdist)<-colnames(chr_17_mat) rownames(chr_17_probdist)<-colnames(chr_17_mat) chr_17_js_breakpoints<-jointseg::jointSeg(chr_17_probdist,K=40)$bestBkp chr_17_js_breakpoint_labels<-colnames(cor(chr_17_mat))[chr_17_js_breakpoints] chr_17_js_breakpoint_labels } else{ print("Please install smoothie in order to run this example.") } ``` With the coordinates for *ERBB2* at chr17:37844167-37886679, we find a precise match in the second breakpoint. It lies exactly within it. All the breakpoints are plotted in the plot below. Beyond that, the centromere point is precisely identifiable. It not only shows pathophysiology, but illustrates clearly the position of the chromosomal landmark as the border between the largest domains. [Click here](https://grch37.ensembl.org/Homo_sapiens/Location/View?r=17:23000001-25000000) to view the ensembl-75 view of this position. It is clear that it precisely matches the location of the centromere. The contours have been added to highlight the domains. We have also plotted the weighted probability of relationships. Finally, we took an approach of investigating correlation differences between spearman (nonlinear) and pearson (linear) correlation. This tends to highlight a specific region in the 37Mb area of the chromosome (near ERBB2). ```{r breakpoint_plot_chr17,eval=F} breakpoint_plot_probdist <- chr_17_probdist %>% # cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, rel_prob=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=rel_prob)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 relationship probability") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, correlation=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=correlation)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 linear relationship domains") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot_corr_diff <- ((chr_17_mat %>% cor(use="pairwise.complete.obs",method="spearman"))-(chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson"))) %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=reshape2::colsplit(Var1,"_",c("chr","start","end"))$start, row_pos=reshape2::colsplit(Var2,"_",c("chr","start","end"))$start, corr_diff=value) %>% ggplot(aes(x=col_pos, y=row_pos, fill=corr_diff)) + geom_raster() + theme(axis.text.x = element_text(angle=90, hjust=1),axis.text.y=element_blank()) + scale_x_continuous(breaks=reshape2::colsplit(chr_17_js_breakpoint_labels,"_",c("chr","start","end"))$start,labels=chr_17_js_breakpoint_labels) + ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + labs(x="col_pos",y="row_pos",value="Pearson Correlation:") + ggtitle("Chromosome 17 nonlinear (red) relationship regions, inferred by nonlinear-linear correlation difference") + geom_contour(binwidth = .395, aes(z = value)) breakpoint_plot breakpoint_plot_probdist breakpoint_plot_corr_diff ``` ![BLCA, chromosome 17, with contours](./chr17_contour_blca.png) ![BLCA, chromosome 17, probdist](./chr17_probdist_blca.png) ![3D BLCA, chromosome 17, nonlinear-linear relationship differences](./chr17_corr_diff_blca.png) For an interactive plot of the bladder cancer interactome (in contour and 3D), try the following. WebGL is required for this exercise. Enable it under the general->advanced RStudio options if you have not already (see https://community.rstudio.com/t/webgl-is-not-supported-by-your-browser-plotly/13962/7). ```{r plotly_blca,eval=F} library(plotly) breakpoint_plot %>% plotly::ggplotly() ``` ```{r 3D_blca,eval=F} chr_17_long <- chr_17_mat %>% cor(use="pairwise.complete.obs",method="pearson") %>% CNVScope::signedRescale(max_cap=1) %>% reshape2::melt() %>% dplyr::mutate(col_pos=as.numeric(reshape2::colsplit(Var1,"_",c("chr","start","end"))$start), row_pos=as.numeric(reshape2::colsplit(Var2,"_",c("chr","start","end"))$start), correlation=value) %>% dplyr::select(col_pos,row_pos,correlation) plot_ly(data = chr_17_long, x=chr_17_long$col_pos,y=chr_17_long$row_pos,z=chr_17_long$correlation,color=c(0,0.5,1),colors=circlize::colorRamp(c("blue","white","red")),intensity=chr_17_long$correlation,type = "mesh3d") ``` We'll briefly demonstrate that SKCM also works with our relationship modeling toolkit. ```{r skcm_files,eval=F,echo=T} if(!dir.exists("extracted_skcm_data")){dir.create("extracted_skcm_data")} untar("gdc_download_skcm.tar.gz",exdir = "./extracted_skcm_data") tcga_files_skcm<-list.files(path = "extracted_skcm_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_skcm) ``` ```{r,eval=F,echo=T} #ptm <- proc.time() #doMC::registerDoMC() #doParallel::registerDoParallel() sample_aggregated_segvals_skcm<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_skcm,format = "TCGA",parallel = T) #proc.time() - ptm saveRDS(sample_aggregated_segvals_skcm,"skcm_sample_matched_input_matrix.rds") ``` We have timed this as 142 seconds on a dual core i7, even with the recent performance-reducing patches. The parallel flag will automatically register the parallel backend for you using the DoParallel framework and use the maximum amount of cores. We will also show another TCGA example for PRAD: ```{r,eval=F,echo=T} if(!dir.exists("extracted_prad_data")){dir.create("extracted_prad_data") untar("gdc_download_prad.tar.gz",exdir = "extracted_prad_data")} tcga_files_prad<-list.files(path = "extracted_prad_data",pattern=glob2rx("*.tsv"),recursive=T,full.names = T) print(tcga_files_prad) ``` With the full list of input files from the GDC, these can then be simply loaded into a function that will read all of them, sample match them, and aggregate the data into a bin-sample matrix. This matrix can then be saved into the fast, space efficient, RDS filetype. ```{r,eval=F,echo=T} sample_aggregated_segvals_output_full_prad<-formSampleMatrixFromRawGDCData(tcga_files = tcga_files_prad,format = "TCGA",binsize=1e6) saveRDS(sample_aggregated_segvals_output_full_prad,"PRAD_sample_matched_input_matrix.rds") ```
/scratch/gouwar.j/cran-all/cranData/CNVScope/vignettes/additonal_examples.Rmd
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Creating Neuroblastoma Input Matrices from public GDC (TARGET) data" author: "James Dalgleish" date: "August 1, 2018" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Creating the TARGET Input matrix from public data} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) knitr::opts_knit$set(root.dir = '.') library(magrittr) ``` We begin by obtaining TARGET low-pass neuroblastoma data (NBL) from the GDC archive. Please note: TARGET_NBL_WGS_CNVLOH.tsv is a clinical metadata file and therefore not compatible with the ensuing functions to extract segment data. We have also chosen to use only a single comparison type (NormalVsPrimary) to ensure comparability and compatibility with the data. Users can download the tar.gz file and remove the tsv files into a single folder. We have already done that here. The source for these files is located [here](https://portal.gdc.cancer.gov/legacy-archive/search/f?filters=%7B%22op%22:%22and%22,%22content%22:%5B%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.data_category%22,%22value%22:%5B%22Copy%20number%20variation%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.experimental_strategy%22,%22value%22:%5B%22WGS%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22files.access%22,%22value%22:%5B%22open%22%5D%7D%7D,%7B%22op%22:%22in%22,%22content%22:%7B%22field%22:%22cases.project.project_id%22,%22value%22:%5B%22TARGET-NBL%22%5D%7D%7D%5D%7D&pagination=%7B%22files%22:%7B%22from%22:101,%22size%22:100,%22sort%22:%22cases.project.project_id:asc%22%7D%7D) ```{r nbl_data_files_png,echo=F} knitr::include_graphics("NBL_data_files.png", dpi = 10) ``` The user simply chooses to add all the files to the cart, then click the black cart button in the top right hand corner. ```{r download_from_cart,echo=F} knitr::include_graphics("download_from_cart.png", dpi = 10) ``` On the cart page, click download, then cart. It will be downloaded as a tar.gz archive. You can untar it with R, but the files will be in a complex set of directories. It is best to list the files recursively with criteria that will obtain the segment files in tsv format, with that single comparison of interest. ```{r nbl_files,eval=F,echo=T} if(!dir.exists("extracted_nbl_data")){dir.create("extracted_nbl_data")} untar("gdc_download_20180801_160142.tar.gz",exdir = "extracted_nbl_data") target_files_nbl<-list.files(path = "extracted_nbl_data",pattern=glob2rx("*NormalVsPrimary.tsv"),recursive=T,full.names = T) print(target_files_nbl) ``` With the full list of input files from the GDC, these can then be simply loaded into a function that will read all of them, sample match them, and aggregate the data into a bin-sample matrix. This matrix can then be saved into the fast, space efficient, RDS filetype. ```{r,eval=F,echo=T} sample_aggregated_segvals_output_full<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl,format = "TARGET") saveRDS(sample_aggregated_segvals_output_full,"NBL_sample_matched_input_matrix.rds") ``` #Custom Data For those who have a desire to add custom data, we have provided a series of options that allow custom data, provided that a CSV file has a single column with copy number values. We will show that the same result can be obtained with the correct options, specifying the column of interest, using a completely different section of the function. Use the format="custom" option and be sure to specify the names following columns in your text file: *copy number column *chromosome, *start position *end position. In this case, we have also selected freadskip=14 as there are 14 lines before the tabular data begins. The sample pattern is a regular expression set to grab the sample name within the filename, setting sample_pat="" will grab the complete filename and let this denote unique samples. We suggest shorter sample than this, however. A sample column name (sample_col) can also be specified to specify samples on each line of the input files instead of using the filename or a portion thereof. For further information, please review the help files by typing: help("CNVScope::formSampleMatrixFromRawGDCData") ```{r, eval=F,echo=T} nbl_custom_input_matrix<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 1e6,freadskip = 14,parallel=F,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix.rds") ``` With this, your input matrix is complete. Next, please look at the [next vignette that details recursive linear regression and postprocessing.](create_output_matrix.html) Optional example for advanced Use: A high definition version is quite simple to do, using the above options, on TARGET, TCGA, or custom format data. Simply decrease the binsize to increase the resolution. In chromosome 2, we have found that the median bin size is 731069 and that smaller chromosomes benefit from binsize reduction more than larger chromosomes (except for chromosome 1). In this case, we decrease our binsize by a factor of 4, making the total number of values in the matrix increase by a factor of 16. It will take longer than the typical time. ```{r, echo=T,eval=F} nbl_custom_input_matrix_hd<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 2.5e5,freadskip = 14,parallel=T,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix_hd,"NBL_custom_sample_matched_input_matrix_2.5e5binsize_parallel.rds") ``` In the opposite manner, if you wanted to quickly create a low density version, this would be the way to go about it. It will make the input matrix creation process proceed a great deal faster and will require less RAM to display an interactive map. ```{r, echo=T,eval=F} nbl_custom_input_matrix_ld<-CNVScope::formSampleMatrixFromRawGDCData(tcga_files = target_files_nbl, format = "custom",binsize = 1e8,freadskip = 14,parallel=F,debug=F, sample_pat = "(?<=30-)(.*?)(?=_)",sample_col = "sample",chrlabel=">chr", startlabel = "begin",endlabel = "end",cnlabel = "relativeCvg") saveRDS(nbl_custom_input_matrix,"NBL_custom_sample_matched_input_matrix_1e8binsize.rds") ``` A demonstration is below of varying resolutions, using correlation based maps (for space reasons): Examples of chr11 are shown below, in increasing order of resolution (1e8,1e7,1e6,2.5e5,1e5 binsize). Note that the resolution doesn't dramatically improve after 1e6, despite the time and computational effort required to generate it. ![chr11_1e8](chr11_1e8_nbl.jpg) ![chr11_1e7](chr11_1e7_nbl.jpg) ![chr11_1e6](chr11_1e6_nbl.jpg) ![chr11_2.5e5](chr11_2.5e5_nbl.jpg) ![chr11_1e5](chr11_1e5_nbl.jpg) A note on the resolution-performance/stability trade-off: We would recommend not decreasing the bin size below the mean width of the region for which you hope to gain sharper images. Further, increasing higher resolution beyond what the program was designed may result in instability. We chose the 1MB bin size because, after many iterations, it creates maps that enable visual appreciation of copy number regions, but is reasonably responsive and stable for both client and server. Higher dimension matrices were attempted in earlier versions of development, but would cause browser crashes in larger chromosomal maps. Further, higher dimension maps limit usage on multi-user shiny servers. So, we still recommend the 1MB binsize for the aforementioned reasons.
/scratch/gouwar.j/cran-all/cranData/CNVScope/vignettes/create_input_matrix.Rmd
--- title: "Creating Linear Regression Matrices from Segment Data" author: "James Dalgleish" date: "July 25, 2018" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Linear Regression/Postprocess} %\VignetteEncoding{UTF-8} --- ```{r setup, include=F} knitr::opts_chunk$set(echo = F) if(Sys.info()['sysname']=="Windows"){groupdir<-"W:/"} else {groupdir<-"/data/CCRBioinfo/"} #knitr::opts_knit$set(root.dir = paste0(groupdir,"dalgleishjl/hicnv/hicnv/vignettes/")) # #setwd("~") #nbl_input_matrix<-readRDS("NBLTCGA_merged_df_aggregated_by_bin_fixed_comparisonv4.rds") #getwd() nbl_input_matrix<-readRDS("NBL_sample_matched_input_matrix.rds") #nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") ``` From our previous work, we created a small input matrix, with segmented 1Mb regions as our row labels and with sample names from TARGET data as our column labels. We can read that in using the following code: ```{r,echo=T,warning=F,message=F} library(CNVScope) ``` ```{r,echo=T} nbl_input_matrix[1:5,1:5] ``` calcVecLMs() comes standard in the CNVScope package. It allows calculation of the matrix with parallel processing using mclapply, but larger matrices will require a bit more power, and thus we use slurm_apply, from the rslurm pacakge to distribute the work over multiple cores. Our particular establishment has a limit approximating 1000 jobs, so it's best not to use more than that unless your cluster will support it. Conversely, you should use less if you can't submit that many individual jobs in a job array in your cluster. In this particular example, I've removed rows where there is no segmentation data, across the board using colSums(). ```{r, eval=F,echo=T} library(parallel) nbl_slurm_object_test_zero_removed<-calcVecLMs(bin_data =as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])),use_slurm = T,n_nodes = 975,memory_per_node = "32g",walltime = "04:00:00",cpus_on_each_node = 2,job_finished = F,slurmjob = NULL) ``` Saving the slurm object is essential as it will be required when you retrieve your results. ```{r, eval=F,echo=T} saveRDS(nbl_slurm_object_test_zero_removed,"nbl_slurm_object_test_zero_removed.rds") ``` Retrieving the data is as simple as using rslurm::get_slurm_out() on the saved slurm object and coercing it into a matrix with the original number of columns. The slurm object must have been read with readRDS() previously or done in the same session. For the purposes of making this tutorial, we have chosen to work on a small version of the whole matrix to make 5MB CRAN documentation limits. Previous versions of the tutorial included the whole matrix, but we leave that to the user to construct at this point. For reproducibility, one can find the original full data matrix at https://github.com/jamesdalg/CNVScope_public_data. ```{r, eval=F,echo=T} library(matrixStats) nbl_result_matrix<-matrix(as.numeric(unlist( get_slurm_out(nbl_slurm_object_test_zero_removed))),ncol=ncol(as.data.frame(t(nbl_input_matrix[which(rowSds(as.matrix(nbl_input_matrix))!=0.0),])) ) ) saveRDS(nbl_result_matrix,"nbl_result_matrix_full.rds") saveRDS(nbl_result_matrix[1:25,1:25],"nbl_result_matrix_small.rds") ``` ```{r, echo=F,include=T,eval=F} #source download locations #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_full.rds?raw=true","nbl_result_matrix_full.rds") #download.file("https://github.com/jamesdalg/CNScope_public_data/blob/master/nbl_result_matrix_sign_corrected.rds","nbl_result_matrix_sign_corrected.rds") #download.file("https://www.dropbox.com/s/sevuhos976c6guu/nbl_result_matrix_full.rds?dl=1","nbl_result_matrix_full.rds") #download.file("https://www.dropbox.com/s/85hii5cd5epuuby/nbl_result_matrix_sign_corrected.rds?dl=1","nbl_result_matrix_sign_corrected.rds") nbl_result_matrix<-readRDS("nbl_result_matrix_full.rds") nbl_result_matrix_sign_corrected<-readRDS("nbl_result_matrix_sign_corrected.rds") ``` You'll notice that there are no signs in this matrix (they're just negative log p-values, which are always positive). We'll have to assign signs by the correlation matrix next, then we will chunk the large matrix into smaller, flattened matrices that the shiny app can handle. For lower capacity machines/clusters, an alternative may be using the cor function. In order to perfrom sign correction,fix the "Inf values" to a viewable value, and restore column and row names, postProcessLinRegMatrix() can be applied, yielding a final full matrix of the entire genome (Chr1->ChrX on both Axes). 300 has been used, although something a bit smaller will reduce saturation issues depending on the disparity between the lowest values in the matrix and 300. We'll plot the result below, using complexheatmap and a custom designed function that takes large asymmetric distributions of values and pushes them into the [0,1] colorspace with white at 0.5, corresponding to zero, values between 0 and 0.5 corresponding to negative values, and values from 0.5 to 1 corresponding to positive values (signedRescale). ```{r} ``` ```{r,eval=T,echo=T} nbl_result_matrix_small<-readRDS("nbl_result_matrix_small.rds") nbl_result_matrix_small[1:5,1:5] nbl_result_matrix_sign_corrected<-postProcessLinRegMatrix(input_matrix = nbl_input_matrix[1:25,1:25],LM_mat = nbl_result_matrix_small,cor_type = "pearson",inf_replacement_val = 300) nbl_result_matrix_sign_corrected[1:5,1:5] ``` ```{r,echo=T} nbl_result_matrix_sign_corrected[1:5,1:5] if (requireNamespace("ComplexHeatmap", quietly = TRUE) & requireNamespace("circlize", quietly = TRUE)) { ComplexHeatmap::Heatmap(signedRescale(as.matrix(nbl_result_matrix_sign_corrected)), col = circlize::colorRamp2(c(0,0.5,1),c("blue","white","red")), cluster_rows = F,cluster_columns = F, show_heatmap_legend = F, show_column_names = F, show_row_names = F) } else { print("ComplexHeatmap not installed.\n Please install ComplexHeatmap in order to create this plot.") } ``` Finally, the whole genome matrix is too big to plot interactively without crashing most browsers using the plotly package. We'll need to break things apart a bit. A final function will write chromosomal pair heatmaps to disk with genes from ensembl (hg19 coordinates) in encoded for each square in the matrix. Please only use this function on the WHOLE matrix, not on the small subset we have provided in documentation. ```{r,eval=F,echo=T} if(!dir.exists("nbl_matrix_set")){dir.create("nbl_matrix_set")} #setwd("nbl_matrix_set") doMC::registerDoMC() #use ONLY the whole matrix with chromosomes 1-X, not the small subset provided for documentation purposes. createChromosomalMatrixSet(whole_genome_mat=nbl_result_matrix_sign_corrected,output_dir="nbl_matrix_set",prefix="nbl_") ``` ```{r,echo=T,eval=F} list.files("nbl_matrix_set") ``` There should be 529 of these particular files upon running the code. If there are not, don't hesitate to run the code again. It can happen on a cluster. It is built to detect when chromosomal matrix is already written to disk.
/scratch/gouwar.j/cran-all/cranData/CNVScope/vignettes/create_output_matrix.Rmd
<style> .main-container { max-width: 1200px !important; } </style> <style type="text/css"> .main-container { max-width: 1200px; margin-left: auto; margin-right: auto; } </style> --- title: "Power Analysis" author: "James Dalgleish" date: "7/8/2019" output: rmarkdown::html_vignette vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Power Analysis} %\VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE) library(CNVScope) library(magrittr) ``` Power Analysis: As this is a visual tool, the threshold for finding visual signatures is a visual process, and knowing how many samples is appropriate is a good question. One can use standard power and sample size calculators to determine the number of samples if the user knows the threshold they are trying to detect and at what power. As a simple exercise using the *pwr* package, we could find the number of samples to detect a linear association at the 0.05 level, with 80% power (standard in most textbooks and on the pwr vignette). Mind you, this is only the linear regression for a single point (one bin against another bin). ```{r, knit='asis'} if (requireNamespace("pwr", quietly = TRUE)) { library(pwr) large.effect.size<-pwr::cohen.ES(test="f2",size="large")$effect.size large.effect.size f2.res<-pwr::pwr.f2.test(u = 1, f2 = large.effect.size/(1 - large.effect.size), sig.level = 0.05, power = 0.8) f2.res n<-ceiling(f2.res$v+f2.res$u+1) n } else { print("Please install the pwr package in order to build this vignette.") } ``` We see that we need `r if (requireNamespace("pwr", quietly = TRUE)) {n}` samples to properly obtain a significance level of 0.05 for a given bin pair. In practice, however, we suggest using more than this. Samples over 100 (like the NBL data) achieve the results that most clearly show a good CNV signature. One could also do a power analysis from the correlation test within the *pwr* package as well. Wishing to detect a 0.3 difference (which is visually detectable for the average user) with 80% power requires over 100 samples. This seems to be the best approach. ```{r} if (requireNamespace("pwr", quietly = TRUE)) { pwr::pwr.r.test(r = 0.3, sig.level = 0.01, power = 0.8, alternative = "greater")$n %>% ceiling() } else { print("Please install the pwr package in order to build this vignette.") } ```
/scratch/gouwar.j/cran-all/cranData/CNVScope/vignettes/power_analysis.Rmd
CNclusterNcenter <- function(segrat,blsize,minjoin,ntrial,bestbic, modelNames,cweight,bstimes,chromrange,seedme){ .lec.CreateStream(segrat$stream) .lec.SetSeed(segrat$stream,seedme) for( j in 1:segrat$sub).lec.ResetNextSubstream(segrat$stream) .lec.CurrentStream(segrat$stream) startcol<-"StartProbe" endcol<-"EndProbe" chromcol<-"chrom" medcol<-"segmedian" madcol<-"segmad" segrat$seg<-cbind(segrat$seg,t(apply(segrat$seg[,c(startcol,endcol,chromcol), drop=F],1,smedmad,v=segrat$rat))) dimnames(segrat$seg)[[2]]<-c(startcol,endcol,chromcol,medcol,madcol) seguse<-segrat$seg[segrat$seg[,chromcol]%in%chromrange,,drop=F] aux<-rep(0,length(segrat$rat)) aux[seguse[,startcol]]<-1 aux[seguse[,endcol]]<-(-1) aux<-cumsum(aux) aux[seguse[,endcol]]<-1 ratuse<-segrat$rat[aux==1] for(j in 1:ntrial){ aaa<-segsample(seguse,ratuse,blocksize=blsize) if(all(unique(aaa[,3])==0)){ aaa[,3]<-1e-10 } emfit<-Mclust(aaa[,3],maxG=15,modelNames=modelNames) if(emfit$bic>=bestbic){ bestaaa<-aaa bestem<-emfit bestbic<-emfit$bic } } newem<-consolidate(bestem,minjoin) newem<-get.center(newem,cweight) if(length(bestem$parameters$mean)==1){ profcenter<-median(bestaaa[,3]) }else{ profcenter<-weighted.median(bestaaa[,3],newem$z[,newem$center]) } mediandev<-segrat$seg[,medcol]-profcenter segs<-segsample(segrat$seg,segrat$rat,times=bstimes) if(all(unique(aaa[,3])==1e-10)){ segs[segs[,3]==0,3]<-1e-10 } segzall<-getz(segs[,3],bestem,newem$groups,times=bstimes) centerz<-segzall[,newem$center] maxz<-segzall[nrow(segzall)*(max.col(segzall)-1)+(1:nrow(segzall))] maxzcol<-max.col(segzall) maxzmean<-newem$mu[maxzcol]-newem$mu[newem$center] maxzsigma<-sqrt(newem$sigmasq[maxzcol]) cpb<-centerprob(segs[,3],bestem,newem$groups,times=bstimes,newem$center) w<-t(matrix(nrow=bstimes,data=segs[,3])) segerr<-sqrt(apply(w,1,var,na.rm=T)) .lec.CurrentStreamEnd() .lec.DeleteStream(segrat$stream) return(cbind(segrat$seg[,medcol],segrat$seg[,madcol],mediandev,segerr,centerz, cpb,maxz,maxzmean,maxzsigma)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/CNclusterNcenter.R
CNpreprocessing <- function(segall,ratall=NULL,idcol=NULL,startcol=NULL, endcol=NULL,medcol=NULL,madcol=NULL,errorcol=NULL,chromcol=NULL, bpstartcol=NULL,bpendcol=NULL,annot=NULL,annotstartcol=NULL,annotendcol=NULL, annotchromcol=NULL,useend=F,blsize=NULL,minjoin=NULL,ntrial=10,bestbic=-1e7, modelNames="E",cweight=NULL,bstimes=NULL, chromrange=NULL,myseed=123,distrib=c("vanilla","Rparallel"),njobs=1, normalength=NULL,normalmedian=NULL,normalmad=NULL,normalerror=NULL){ #try to see what's possible with this input if(is.null(idcol)){ cat("Found a single segmented profile with no ID","\n") if(!is.null(ratall)){ if(sum(apply(ratall,2,data.class)=="numeric")>1) stop("Ambiguity: more than 1 numeric column in raw data table\n") else{ idrat<-which(apply(ratall,2,data.class)=="numeric") segall<-data.frame(rep(as.character(idrat),nrow(segall)),segall) idcol<-"ID" dimnames(segall)[[2]][1]<-idcol } } } if(is.null(ratall))cat("No raw table, proceeding to comparison\n") else{ profnames<-unique(segall[,idcol]) if(!all(profnames%in%dimnames(ratall)[[2]])) stop("Found unmatched segmented profile IDs\n") if(is.null(startcol)|is.null(endcol)){ #will need an annotation table if(is.null(bpstartcol)|is.null(bpendcol)|is.null(chromcol)) stop("Unable to proceed: incomplete segment annotation\n") if(is.null(chromrange))chromrange<-sort(unique(segall[,chromcol])) if(is.null(annot)) stop("No annotation table; unable to determine boundary probes/bins\n") if(is.null(annotstartcol)|is.null(annotchromcol)) stop("No start and chrom column names provided for annotation table\n") if(useend&is.null(annotendcol)) stop("End column name required but not provided in annotation table\n") maxbpstart<-max(c(segall[,bpstartcol],annot[,annotstartcol]))+1 maxbpend<-ifelse(useend,max(c(segall[,bpendcol],annot[,annotendcol])), max(c(segall[,bpendcol],annot[,annotstartcol])))+1 startprobe<-match((segall[,chromcol]-1)*maxbpstart+segall[,bpstartcol], ceiling((annot[,annotchromcol]-1)*maxbpstart+annot[,annotstartcol])) endprobe<-ifelse(rep(useend,length(startprobe)), match((segall[,chromcol]-1)*maxbpend+segall[,bpendcol], ceiling((annot[,annotchromcol]-1)*maxbpend+annot[,annotendcol])), match((segall[,chromcol]-1)*maxbpend+segall[,bpendcol], ceiling((annot[,annotchromcol]-1)*maxbpend+annot[,annotstartcol]))) if(!all(!is.na(startprobe)&!is.na(endprobe))) stop("Incomplete start and end annotation of segments\n") segall<-data.frame(segall,startprobe,endprobe) dimnames(segall)[[2]][(ncol(segall)-1):ncol(segall)]<-c("StartProbe","EndProbe") startcol<-"StartProbe" endcol<-"EndProbe" } profpack<-vector(mode="list",length=length(profnames)) names(profpack)<-profnames for(pn in profnames){ profpack[[pn]]<-vector(mode="list",length=4) names(profpack[[pn]])<-c("seg","rat","stream","sub") profpack[[pn]]$seg<- segall[segall[,idcol]==pn,c(startcol,endcol,chromcol),drop=F] dimnames(profpack[[pn]]$seg)[[2]]<-c("StartProbe","EndProbe","chrom") profpack[[pn]]$rat<-ratall[,pn] profpack[[pn]]$stream<-pn profpack[[pn]]$sub<-match(pn,profnames) } rm(ratall) gc() distrib<-match.arg(distrib) if(distrib=="Rparallel"){ ncores<-min(njobs,length(profnames),detectCores()) cl<-parallel::makeCluster(getOption("cl.cores",ncores)) parallel::clusterEvalQ(cl=cl,expr=requireNamespace("rlecuyer")) parallel::clusterEvalQ(cl=cl,expr=requireNamespace("mclust")) parallel::clusterEvalQ(cl=cl,expr=requireNamespace("CNprep")) } processed<-switch(distrib, vanilla=lapply(X=profpack,FUN=CNclusterNcenter,blsize=blsize, minjoin=minjoin,ntrial=ntrial,bestbic=bestbic,modelNames=modelNames, cweight=cweight,bstimes=bstimes,chromrange=chromrange,seedme=myseed), Rparallel=parLapply(cl,X=profpack,fun=CNclusterNcenter,blsize=blsize, minjoin=minjoin,ntrial=ntrial,bestbic=bestbic,modelNames=modelNames, cweight=cweight,bstimes=bstimes,chromrange=chromrange,seedme=myseed)) if(distrib=="Rparallel")stopCluster(cl) segall<-cbind(segall,do.call(rbind,processed)) dimnames(segall)[[2]][(ncol(segall)-8):ncol(segall)]<- c("segmedian","segmad","mediandev","segerr","centerz","marginalprob", "maxz","maxzmean","maxzsigma") medcol<-"mediandev" madcol<-"segmad" errorcol<-"segerr" } if(!(is.null(normalength)|is.null(normalmedian)|is.null(medcol))){ if(is.null(bpstartcol)|is.null(bpendcol)){ #try to annotate if(is.null(startcol)|is.null(endcol)|is.null(annot)|is.null(annotstartcol) |is.null(annotendcol))stop("Insufficient annotation for comparison") tumorlength<-annot[segall[,endcol],annotendcol]- annot[segall[,startcol],annotstartcol]+1 } else tumorlength<-segall[,bpendcol]-segall[,bpstartcol]+1 tumormedian<-segall[,medcol] if(!is.null(madcol))tumormad<-segall[,madcol] if(!is.null(errorcol))tumorerror<-segall[,errorcol] segall<-cbind(segall,normalComparison(normalmedian,normalength, tumormedian,tumorlength,normalmad,normalerror,tumormad,tumorerror)) } return(segall) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/CNpreprocessing.R
applyCNPmask<-function(segtable,chrom,startPos,endPos,startProbe,endProbe, eventIndex,masktable,maskchrom,maskstart,maskend,maskindex,mincover=1, indexvals=c(-1,1)){ breakCNPs<-by(segtable,INDICES=as.factor(segtable[,chrom]), FUN=breakIntoCNPs.chrom,chrom=chrom,startPos=startPos,endPos=endPos, startProbe=startProbe,endProbe=endProbe,eventIndex=eventIndex, cnptable=masktable,cnpchrom=maskchrom,cnpstart=maskstart, cnpend=maskend,cnpindex=maskindex,mincover=mincover,indexvals=indexvals, simplify=T) myCNPs<-matrix(ncol=3,byrow=T,data=unlist(lapply(breakCNPs,t))) dimnames(myCNPs)[[2]]<-c("StartProbe","EndProbe","toremove") return(as.matrix(myCNPs)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/applyCNPmask.R
breakIntoCNPs.chrom<-function(segtable,chrom,startPos,endPos,startProbe, endProbe,eventIndex,cnptable,cnpchrom,cnpstart,cnpend,cnpindex,mincover, indexvals){ toremove<-rep(0,nrow(segtable)) segtable<-cbind(segtable,toremove) chr<-segtable[1,chrom] if(sum(segtable[,eventIndex]!=0)==0|sum(cnptable[,cnpchrom]==chr)==0) return(as.matrix(segtable[,c(startProbe,endProbe,"toremove")])) cnpsinchr<-cnptable[cnptable[,cnpchrom]==chr,,drop=F] for(i in indexvals) if(sum(segtable[,eventIndex]==i)>0&sum(cnpsinchr[,cnpindex]==i)>0){ acnpinchr<-cnpsinchr[cnpsinchr[,cnpindex]==i,,drop=F] amps<-which(segtable[,eventIndex]==i) segstartmat<-matrix(ncol=nrow(acnpinchr), data=rep(segtable[amps,startPos],nrow(acnpinchr))) segendmat<-matrix(ncol=nrow(acnpinchr), data=rep(segtable[amps,endPos],nrow(acnpinchr))) cnpstartmat<-t(matrix(ncol=length(amps), data=rep(acnpinchr[,cnpstart],length(amps)))) cnpendmat<-t(matrix(ncol=length(amps), data=rep(acnpinchr[,cnpend],length(amps)))) cnpcover<-rowSums(pmax(matrix(nrow=nrow(cnpendmat), ncol=ncol(cnpendmat),data=0),(pmin(segendmat,cnpendmat)- pmax(segstartmat,cnpstartmat)+1)))/ (segtable[amps,endPos]-segtable[amps,startPos]+1) toremove[amps[cnpcover>mincover]]<-1 } segtable[,"toremove"]<-toremove if(sum(toremove)>0)segtable[,c(startProbe,endProbe)]<- breakIntoGaps(segtable,"toremove",startProbe,endProbe) return(as.matrix(segtable[,c(startProbe,endProbe,"toremove")])) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/breakIntoCNPs.chrom.R
breakIntoGaps<-function(segtable,gapind,StartProbe,EndProbe){ if(sum(segtable[,gapind])==0) return(as.matrix(segtable[,c(StartProbe,EndProbe)])) gapstep<-segtable[,gapind]-c(0,segtable[-nrow(segtable),gapind]) gapstart<-which(gapstep==1) gapend<-which(gapstep==-1)-1 if(length(gapend)<length(gapstart))gapend<-c(gapend,nrow(segtable)) ranfrac<-runif(n=length(gapend)) ranfrac[gapstart==1]<-1 ranfrac[gapend==nrow(segtable)]<-0 midpoint<-round(ranfrac*segtable[gapstart,StartProbe]+ (1-ranfrac)*segtable[gapend,EndProbe]) segtable[(gapend+1)[gapend!=nrow(segtable)],StartProbe]<- midpoint[gapend!=nrow(segtable)] segtable[(gapstart-1)[gapstart!=1],EndProbe]<- ifelse(gapstart[gapstart!=1]!=nrow(segtable), midpoint[gapstart!=1]-1,midpoint[gapstart!=1]) return(as.matrix(segtable[,c(StartProbe,EndProbe)])) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/breakIntoGaps.R
centerprob <- function(logr,emfit,zgroup,times,center){ gz<-matrix(nrow=length(logr),ncol=length(emfit$parameters$mean),data=logr) #gz<-t(emfit$pro*exp(-0.5*(t(gz)-emfit$mu)^2/emfit$sigma)/ #sqrt(2*pi*emfit$sigma)) if(length(emfit$parameters$mean)==1)epro<-as.vector(1) if(length(emfit$parameters$mean)>1)epro<-emfit$parameters$pro gz<-t(epro*pnorm(-abs(t(gz)-emfit$parameters$mean)/ sqrt(emfit$parameters$variance$sigmasq))) gz<-gz%*%t(zgroup) # combine columns of z table using indicator matrix zgroup gz<-matrix(ncol=ncol(gz), data=apply(gz,2,cumsum)[seq(from=times,to=nrow(gz),by=times),]/times) #mean value within each segment gz<-(gz[,center]-c(0,gz[-nrow(gz),center]))/sum(epro[zgroup[center,]==1]) return(ifelse(gz<0.5,gz,1-gz)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/centerprob.R
consolidate <- function(emfit,minover){ newem<-list(mu=emfit$parameters$mean,pro=emfit$parameters$pro,z=emfit$z, groups=matrix(nrow=length(emfit$parameters$mean), ncol=length(emfit$parameters$mean),data=0), ngroups=length(emfit$parameters$mean), sigmasq=emfit$parameters$variance$sigmasq) if(is.null(emfit$z))newem$z<-matrix(ncol=1,data=rep(1,emfit$n)) if(length(newem$sigmasq)==1) newem$sigmasq<-rep(newem$sigmasq,length(emfit$parameters$mean)) diag(newem$groups)<-1 while(newem$ngroups>1){ #note the asymmetry; a fraction of me in you != a fraction of you in me avz<-(t(newem$z)/colSums(newem$z))%*%newem$z diag(avz)<-0 if(max(avz)>minover){ g1<-col(avz)[which.max(avz)] g2<-row(avz)[which.max(avz)] gl<-min(g1,g2) gr<-max(g1,g2) newem$z[,gl]<-newem$z[,gl]+newem$z[,gr] newem$z<-matrix(ncol=ncol(newem$z)-1,nrow=nrow(newem$z), data=newem$z[,-gr]) numu<-(newem$mu[gl]*newem$pro[gl]+newem$mu[gr]*newem$pro[gr])/ (newem$pro[gl]+newem$pro[gr]) newem$sigmasq[gl]<-(newem$pro[gl]*(newem$sigmasq[gl]+newem$mu[gl]^2)+ newem$pro[gr]*(newem$sigmasq[gr]+newem$mu[gr]^2))/ (newem$pro[gl]+newem$pro[gr])-numu^2 newem$mu[gl]<-numu newem$mu<-newem$mu[-gr] newem$sigmasq<-newem$sigmasq[-gr] newem$pro[gl]<-newem$pro[gl]+newem$pro[gr] newem$pro<-newem$pro[-gr] newem$groups[gl,]<-newem$groups[gl,]+newem$groups[gr,] newem$groups<-matrix(ncol=ncol(newem$groups),nrow=newem$ngroups-1, data=newem$groups[-gr,]) newem$ngroups<-newem$ngroups-1 } else break } return(newem) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/consolidate.R
containment.indicator <- function(vstart,vend,wstart,wend){ lw<-length(wstart) lv<-length(vstart) z<-cbind(c(vend,wend),c(1:lv,rep(0,lw)),c(rep(0,lv),1:lw)) z<-z[order(z[,1]),] endbeforeend<-cummax(z[,2])[order(z[,3])][sort(z[,3])!=0] z<-cbind(c(wstart,vstart),c(rep((lv+1),lw),1:lv),c(1:lw,rep(0,lv))) z<-z[order(z[,1]),] startafterstart<-rev(cummin(rev(z[,2])))[order(z[,3])][sort(z[,3])!=0] return(cbind(startafterstart,endbeforeend)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/containment.indicator.R
get.center <- function(emfit,mincenter){ #emfit must have come out of consolidate! newem<-list(mu=emfit$mu,pro=emfit$pro,z=emfit$z,groups=emfit$groups, ngroups=emfit$ngroups,sigmasq=emfit$sigmasq, center=which.min(abs(emfit$mu))) while(newem$ngroups>1){ omu<-order(abs(newem$mu)) if(newem$pro[omu[1]]<mincenter){ gl<-min(omu[1:2]) gr<-max(omu[1:2]) newem$z[,gl]<-newem$z[,gl]+newem$z[,gr] newem$z<-newem$z[,-gr,drop=F] numu<-(newem$mu[gl]*newem$pro[gl]+newem$mu[gr]*newem$pro[gr])/ (newem$pro[gl]+newem$pro[gr]) newem$sigmasq[gl]<-(newem$pro[gl]*(newem$sigmasq[gl]+newem$mu[gl]^2)+ newem$pro[gr]*(newem$sigmasq[gr]+newem$mu[gr]^2))/ (newem$pro[gl]+newem$pro[gr])-numu^2 newem$mu[gl]<-numu newem$mu<-newem$mu[-gr] newem$sigmasq<-newem$sigmasq[-gr] newem$pro[gl]<-newem$pro[gl]+newem$pro[gr] newem$pro<-newem$pro[-gr] newem$groups[gl,]<-newem$groups[gl,]+newem$groups[gr,] newem$groups<-newem$groups[-gr,,drop=F] newem$ngroups<-newem$ngroups-1 newem$center<-gl } else break } return(newem) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/get.center.R
getz <- function(logr,emfit,zgroup,times){ if(is.null(emfit$z))gz<-matrix(ncol=1,data=rep(1,length(logr))) else gz<-predict(emfit,newdata=logr)$z isfin<-matrix(ncol=ncol(gz),nrow=nrow(gz),data=is.finite(gz)) gz[!isfin]<-0 #just being honest: we don't know how to assign these gz<-matrix(ncol=ncol(gz), data=apply(gz,2,cumsum)[seq(from=times,to=nrow(gz),by=times),]) cisfin<-matrix(ncol=ncol(isfin), data=apply(isfin,2,cumsum)[seq(from=times,to=nrow(isfin),by=times),]) cisfin<-cisfin- rbind(matrix(nrow=1,data=rep(0,ncol(cisfin))),cisfin[-nrow(gz),,drop=F]) gz<- (gz-rbind(matrix(nrow=1,data=rep(0,ncol(gz))),gz[-nrow(gz),,drop=F]))/times gz[cisfin<times]<-NA return(gz%*%t(zgroup)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/getz.R
makeCNPmask<-function(imat,chromcol=1,startcol=2,endcol=3,nprof=1,uthresh,dthresh){ CNPmask<-by(imat,INDICES=as.factor(imat[,chromcol]),FUN=makeCNPmask.chrom, startcol=startcol,endcol=endcol,nprof=nprof,uthresh=uthresh, dthresh=dthresh,simplify=T) myCNPmask<-matrix(ncol=2,byrow=T,data=unlist(lapply(CNPmask,t))) myCNPmask<-cbind(unlist(lapply(1:length(unique(imat[,chromcol])), FUN=function(x) rep(as.numeric(names(CNPmask)[x]),nrow(CNPmask[[x]])))),myCNPmask) dimnames(myCNPmask)[[2]]<-c("chrom","start","end") return(myCNPmask) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/makeCNPmask.R
makeCNPmask.chrom<-function(imat,startcol=1,endcol=2,nprof=1,uthresh,dthresh){ astart<-imat[,startcol] aend<-imat[,endcol] z<-cbind(c(astart,aend,aend+1), c(rep(1,length(astart)),rep(0,length(aend)),rep(-1,length(aend)))) z<-z[order(z[,1]),] z[,2]<-cumsum(z[,2]) z<-z[nrow(z)-rev(match(rev(unique(z[,1])),rev(z[,1])))+1,] #z[,1] gives unique start and end positions; z[,2] gives event counts there z<-cbind(z,z[,2]>=(uthresh*nprof)) #mark positions w/counts above upper thresh zsteps<-z[,3]-c(0,z[-nrow(z),3]) ustart<-z[zsteps==1,1] zsteps<-z[,3]-c(z[-1,3],0) uend<-z[zsteps==1,1] #starts and ends of intervals w/count above upper thresh z[,3]<-z[,2]>=(dthresh*nprof) zsteps<-z[,3]-c(0,z[-nrow(z),3]) dstart<-z[zsteps==1,1] zsteps<-z[,3]-c(z[-1,3],0) dend<-z[zsteps==1,1] #likewise for the lower thresh if(length(ustart)>0){ ci<-containment.indicator(ustart,uend,dstart,dend) return(matrix(ncol=2,data=c( dstart[ci[,2]>=ci[,1]],dend[ci[,2]>=ci[,1]]), dimnames=list(NULL,c("start","end")))) } #ie intervals above lower thresh with counts above upper thresh inside else{ return(matrix(ncol=3,nrow=0,dimnames=list(NULL,c("chrom","start","end")))) } }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/makeCNPmask.chrom.R
normalComparison <- function(normalmedian,normalength, tumormedian,tumorlength,normalmad=NULL,normalerror=NULL,tumormad=NULL, tumorerror=NULL){ nisnull<-c(!(is.null(normalmad)|is.null(tumormad)), !(is.null(normalerror)|is.null(tumorerror))) nsred<-matrix(ncol=2+sum(nisnull),length(normalmedian), data=c(normalength,normalmedian, if(nisnull[1])normalmad,if(nisnull[2])normalerror),dimnames=list(NULL, c("length","mediandev",if(nisnull[1])"segmad",if(nisnull[2])"segerr"))) nsred<-nsred[order(nsred[,"mediandev"]),,drop=F] lnorm<-sum(nsred[,"length"]) z<-cbind(c(nsred[,"mediandev"],tumormedian),c(nsred[,"length"], rep(0,length(tumormedian))),c(rep(0,nrow(nsred)),1:length(tumormedian))) z<-z[order(z[,1]),,drop=F] z[,2]<-cumsum(z[,2])/lnorm z<-z[z[,3]!=0,,drop=F] negtail<-z[order(z[,3]),2] if(nisnull[1]){ z<-cbind(c(nsred[,"mediandev"]/nsred[,"segmad"], tumormedian/tumormad),c(nsred[,"length"], rep(0,length(tumormedian))), c(rep(0,nrow(nsred)),1:length(tumormedian))) z<-z[order(z[,1]),,drop=F] z[,2]<-cumsum(z[,2])/lnorm z<-z[z[,3]!=0,,drop=F] negtailnormad<-z[order(z[,3]),2] } if(nisnull[2]){ z<-cbind(c(nsred[,"mediandev"]/nsred[,"segerr"],tumormedian/tumorerror), c(nsred[,"length"],rep(0,length(tumormedian))), c(rep(0,nrow(nsred)),1:length(tumormedian))) z<-z[order(z[,1]),,drop=F] z[,2]<-cumsum(z[,2])/lnorm z<-z[z[,3]!=0,,drop=F] negtailnormerror<-z[order(z[,3]),2] } return(matrix(ncol=2+sum(nisnull),data=c(lnorm%/%tumorlength, negtail,if(nisnull[1])negtailnormad,if(nisnull[2])negtailnormerror), dimnames=list(NULL,c("samplesize","negtail",if(nisnull[1])"negtailnormad", if(nisnull[2])"negtailnormerror")))) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/normalComparison.R
segsample <- function(mysegs,ratcol,startcol="StartProbe",endcol="EndProbe", blocksize=0,times=0){ if(blocksize==0&times==0)stop("One of blocksize or times must be set") if(blocksize!=0&times!=0)stop("Only one of blocksize or times can be set") segtable<-mysegs[,c(startcol,endcol),drop=F] if(blocksize!=0)segtable<- segtable[rep(1:nrow(segtable), times=(segtable[,endcol]-segtable[,startcol]+1)%/%blocksize),] if(times!=0)segtable<-segtable[rep(1:nrow(segtable),each=times),] return(cbind(segtable, apply(segtable, 1, smedian.sample, v = ratcol))) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/segsample.R
smad <- function(pos,v)mad(v[pos[1]:pos[2]],na.rm=T)
/scratch/gouwar.j/cran-all/cranData/CNprep/R/smad.R
smedian <- function(pos,v)median(v[pos[1]:pos[2]],na.rm=T)
/scratch/gouwar.j/cran-all/cranData/CNprep/R/smedian.R
smedian.sample <- function(pos, v) { w<-v[pos[1]:pos[2]][!is.na(v[pos[1]:pos[2]])] return(median(sample(w,length(w),replace=T),na.rm=T)) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/smedian.sample.R
smedmad <- function(pos,v) c(median(v[pos[1]:pos[2]],na.rm=T),mad(v[pos[1]:pos[2]],na.rm=T))
/scratch/gouwar.j/cran-all/cranData/CNprep/R/smedmad.R
weighted.median <- function(v,weights){ weights<-weights[order(v)] v<-sort(v) sw<-sum(weights) return(v[which.min(abs(cumsum(weights)-0.5*sw))]) }
/scratch/gouwar.j/cran-all/cranData/CNprep/R/weighted.median.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @keywords internal #' @noRd #' NULL Reduced_Rank_COAP <- function(X_count, a, Z, rank_use, Mu_y_int, S_y_int, invLambda_int, B_int, bbeta_int, H_int, epsELBO, maxIter, verbose, sep_opt_beta, fast_svd = TRUE) { .Call(`_COAP_Reduced_Rank_COAP`, X_count, a, Z, rank_use, Mu_y_int, S_y_int, invLambda_int, B_int, bbeta_int, H_int, epsELBO, maxIter, verbose, sep_opt_beta, fast_svd) }
/scratch/gouwar.j/cran-all/cranData/COAP/R/RcppExports.R
# generate man files # devtools::document() # R CMD check --as-cran COAP_1.1.tar.gz ## usethis::use_data(dat_r2_mac) # pkgdown::build_site() # pkgdown::build_home() # pkgdown::build_reference() # pkgdown::build_article("COAPsimu") # pkgdown::build_article("ProFASTdlpfc2") # rmarkdown::render('./vignettes_PDF/COAPsimu.Rmd', output_format=c('html_document')) # rmarkdown::render('./vignettes_PDF/COAPsimu.Rmd', output_format=c('pdf_document'), clean = F) #' Generate simulated data #' @description Generate simulated data from covariate-augmented Poisson factor models #' @param seed a postive integer, the random seed for reproducibility of data generation process. #' @param n a postive integer, specify the sample size. #' @param p a postive integer, specify the dimension of count variables. #' @param d a postive integer, specify the dimension of covariate matrix. #' @param q a postive integer, specify the number of factors. #' @param rank0 a postive integer, specify the rank of the coefficient matrix. #' @param rho a numeric vector with length 2 and positive elements, specify the signal strength of loading matrix and regression coefficient, respectively. #' @param sigma2_eps a positive real, the variance of overdispersion error. #' @param seed.beta a postive integer, the random seed for reproducibility of data generation process by fixing the regression coefficient matrix beta. #' @return return a list including the following components: (1) X, the high-dimensional count matrix; (2) Z, the high-dimensional covriate matrix; (3) bbeta0, the low-rank large coefficient matrix; (4) B0, the loading matrix; (5) H0, the factor matrix; (6) rank: the true rank of bbeta0; (7) q: the true number of factors. #' @details None #' @seealso \code{\link{RR_COAP}} #' @references None #' @export #' @importFrom MASS mvrnorm #' @importFrom stats cov lm residuals rnorm rpois #' #' @examples #' n <- 300; p <- 100 #' d <- 20; q <- 6; r <- 3 #' datlist <- gendata_simu(n=n, p=p, d=20, q=q, rank0=r) #' str(datlist) gendata_simu <-function (seed = 1, n = 300, p = 50, d=20, q = 6, rank0=3, rho = c(1.5, 1), sigma2_eps=0.1, seed.beta=1){ #require(MASS) if(rank0<=1) stop("rank0 must be greater than 1!") cor.mat<-function (p, rho, type = "toeplitz") { if (p == 1) return(matrix(1, 1, 1)) mat <- diag(p) if (type == "toeplitz") { for (i in 2:p) { for (j in 1:i) { mat[i, j] <- mat[j, i] <- rho^(abs(i - j)) } } } if (type == "identity") { mat[mat == 0] <- rho } return(mat) } Diag<-function (vec){ q <- length(vec) if (q > 1) { y <- diag(vec) } else { y <- matrix(vec, 1, 1) } return(y) } if(length(rho)<2) stop("rho must be a numeric vector of length 2!") factor_term <- rho[1] factor_term_z <- rho[2] set.seed(seed.beta) # Fixed bbeta0 #bbeta0 <- matrix(rnorm(p*d), p, d) rank_true <- rank0 - 1 bbeta0 <- t(matrix(rnorm(d*rank_true), d, rank_true) %*% matrix(rnorm(rank_true* p), rank_true, p)) / p *4 * factor_term_z Ztmp <- matrix(rnorm(p * q), p, q) B <- qr(Ztmp) eigvs <- sqrt(sort(eigen(t(Ztmp) %*% Ztmp)$values, decreasing = T)) B1 <- qr.Q(B) %*% Diag(sqrt(eigvs)) B0 <- B1 %*% Diag(sign(B1[1, ])) ## Fixed B0 and mu0 for each repeat. # mu0 <- rnorm(p) * factor_term # Bm0 <- cbind(mu0, B0) set.seed(seed) if(d<2) stop("d must be greater than 1!") Z <- MASS::mvrnorm(n, mu=rep(0, d-1), Sigma = cor.mat(d-1, rho=0.5)) Z <- cbind(1, Z) epsi <- MASS::mvrnorm(n, mu=rep(0, p), Sigma = diag(rep(p,1))* sigma2_eps) H <- mvrnorm(n, mu = rep(0, q), cor.mat(q, 0.5)) H <- residuals(lm(H~Z)) svdH <- svd(cov(H)) H0 <- scale(H, scale = F) %*% svdH$u %*% Diag(1/sqrt(svdH$d)) %*% svdH$v g1 <- 1:p B0[g1, ] <- B0[g1, ]/max(B0[g1, ]) * factor_term ## scale mu <- exp(Z %*% t(bbeta0) + H0 %*% t(B0) + epsi) # + matrix(mu0, n, p, byrow = T) X <- matrix(rpois(n * p, lambda = mu), n, p) return(list(X = X, Z=Z, bbeta0=bbeta0, B0 = B0, H0 = H0, rank=rank0, q=q)) } # gendata_simu <-function (seed = 1, n = 300, p = 50, d=20, # q = 6, rank0=3, rho = c(1.5, 1), sigma2_eps=0.1){ # # require(MASS) # if(rank0<=1) stop("rank0 must be greater than 1!") # cor.mat<-function (p, rho, type = "toeplitz") { # if (p == 1) # return(matrix(1, 1, 1)) # mat <- diag(p) # if (type == "toeplitz") { # for (i in 2:p) { # for (j in 1:i) { # mat[i, j] <- mat[j, i] <- rho^(abs(i - j)) # } # } # } # if (type == "identity") { # mat[mat == 0] <- rho # } # return(mat) # } # Diag<-function (vec){ # q <- length(vec) # if (q > 1) { # y <- diag(vec) # } # else { # y <- matrix(vec, 1, 1) # } # return(y) # } # # if(length(rho)<2) stop("rho must be a numeric vector of length 2!") # # # factor_term <- rho[1] # factor_term_z <- rho[2] # set.seed(1) # Fixed bbeta0 # #bbeta0 <- matrix(rnorm(p*d), p, d) # rank_true <- rank0 - 1 # bbeta0 <- t(matrix(rnorm(d*rank_true), d, rank_true) %*% matrix(rnorm(rank_true* p), rank_true, p)) / p *4 * factor_term_z # Ztmp <- matrix(rnorm(p * q), p, q) # B <- qr(Ztmp) # eigvs <- sqrt(sort(eigen(t(Ztmp) %*% Ztmp)$values, decreasing = T)) # B1 <- qr.Q(B) %*% Diag(sqrt(eigvs)) # B0 <- B1 %*% Diag(sign(B1[1, ])) ## # set.seed(seed) # # if(d<2) stop("d must be greater than 1!") # cov_S <- cor.mat(d-1, rho=0.5) # zeroMat <- matrix(0, d-1, q) # cov_all <- rbind(cbind(cov_S, zeroMat), cbind(t(zeroMat), Diag(rep(1, q)) ) ) # Z_all <- MASS::mvrnorm(n, mu=rep(0, q+d-1), Sigma = cov_all) # Z <- cbind(1, Z_all[,1:(d-1)]) # epsi <- MASS::mvrnorm(n, mu=rep(0, p), Sigma = diag(rep(p,1))* sigma2_eps) # # H <- mvrnorm(n, mu = rep(0, q), cor.mat(q, 0.5)) # # H <- residuals(lm(H~Z)) # # svdH <- svd(cov(H)) # # H0 <- scale(H, scale = F) %*% svdH$u %*% Diag(1/sqrt(svdH$d)) %*%svdH$v # H0 <- Z_all[, d: (q+d-1)] # # H0 <- scale(H0, scale=F) # g1 <- 1:p # B0[g1, ] <- B0[g1, ]/max(B0[g1, ]) * factor_term ## scale # # # mu <- exp(Z %*% t(bbeta0) + H0 %*% t(B0) + epsi) # + matrix(mu0, n, p, byrow = T) # # X <- matrix(rpois(n * p, lambda = mu), n, p) # # return(list(X = X, Z=Z, bbeta0=bbeta0, B0 = B0, H0 = H0, rank=rank0, q=q)) # } # Diag <- function (vec) { q <- length(vec) if (q > 1) { y <- diag(vec) } else { y <- matrix(vec, 1, 1) } return(y) } add_identifiability <- function(H, B){ q <- ncol(H); n <- nrow(H) svdHB <- svd(H %*% t(B), nu=q, nv = q) signB1 <- sign(svdHB$v[1,]) H <- sqrt(n) * svdHB$u %*% Diag(signB1) B <- svdHB$v %*% Diag(svdHB$d[1:q]*signB1) / sqrt(n) return(list(H=H, B=B)) } #' Fit the COAP model #' @description Fit the covariate-augmented overdispersed Poisson factor model #' @param X_count a count matrix, the observed count matrix. #' @param multiFac an optional vector, the normalization factor for each unit; default as full-one vector. #' @param Z an optional matrix, the covariate matrix; default as a full-one column vector if there is no additional covariates. #' @param rank_use an optional integer, specify the rank of the regression coefficient matrix; default as 5. #' @param q an optional string, specify the number of factors; default as 15. #' @param epsELBO an optional positive vlaue, tolerance of relative variation rate of the envidence lower bound value, defualt as '1e-5'. #' @param maxIter the maximum iteration of the VEM algorithm. The default is 30. #' @param verbose a logical value, whether output the information in iteration. #' @param joint_opt_beta a logical value, whether use the joint optimization method to update bbeta. The default is \code{FALSE}, which means using the separate optimization method. #' @param fast_svd a logical value, whether use the fast SVD algorithm in the update of bbeta; default is \code{TRUE}. #' @return return a list including the following components: (1) H, the predicted factor matrix; (2) B, the estimated loading matrix; (3) bbeta, the estimated low-rank large coefficient matrix; (4) invLambda, the inverse of the estimated variances of error; (5) H0, the factor matrix; (6) ELBO: the ELBO value when algorithm stops; (7) ELBO_seq: the sequence of ELBO values. #' @details None #' @seealso None #' @references Liu, W. and Q. Zhong (2024). High-dimensional covariate-augmented overdispersed poisson factor model. arXiv preprint arXiv:2402.15071. #' @export #' @useDynLib COAP, .registration = TRUE #' @importFrom irlba irlba #' @importFrom Rcpp evalCpp #' #' #' @examples #' n <- 300; p <- 100 #' d <- 20; q <- 6; r <- 3 #' datlist <- gendata_simu(n=n, p=p, d=20, q=q, rank0=r) #' str(datlist) #' fitlist <- RR_COAP(X_count=datlist$X, Z = datlist$Z, q=6, rank_use=3) #' str(fitlist) RR_COAP <- function(X_count, multiFac=rep(1, nrow(X_count)), Z=matrix(1, nrow(X_count),1), rank_use=5, q=15, epsELBO=1e-5, maxIter=30, verbose=TRUE, joint_opt_beta=FALSE, fast_svd=TRUE){ # Z=NULL; q=15; epsELBO=1e-6; maxIter=10; verbose=TRUE get_initials <- function(X, q){ #require(irlba) n <- nrow(X); p <- ncol(X) mu <- colMeans(X) X <- X - matrix(mu, nrow=n, ncol=p, byrow=TRUE) svdX <- irlba(A =X, nv = q) PCs <- sqrt(n) * svdX$u loadings <- svdX$v %*% Diag(svdX$d[1:q]) / sqrt(n) dX <- PCs %*% t(loadings) - X Lam_vec <- colSums(dX^2)/n return(list(hH = PCs, hB = loadings, hmu=mu,sigma2vec = Lam_vec)) } message("Calculate initial values...") n <- nrow(X_count); p <- ncol(X_count); if(any(Z[,1]!=1)) warning("The first column of covariates Z is not a full-one column vector, so it will fit a model without intercept!") Mu_y_int = log(1+ X_count)#matrix(1, n, p); S_y_int = matrix(1, n, p); a <- multiFac fit_approxPCA <- get_initials(Mu_y_int, q=q) B_int <- fit_approxPCA$hB Mu_h_int <- fit_approxPCA$hH invLambda_int = rep(1, p); d <- ncol(Z) rank_use <- min(rank_use, d) bbeta_int <- matrix(0, p, d) bbeta_int[,1] <- colMeans(Mu_y_int) reslist <- Reduced_Rank_COAP(X_count, a, Z, rank_use, Mu_y_int, S_y_int, invLambda_int, B_int, bbeta_int, Mu_h_int, epsELBO=epsELBO, maxIter=maxIter, verbose=verbose, sep_opt_beta=!joint_opt_beta, fast_svd=fast_svd) reslist$ELBO_seq <- reslist$ELBO_seq[-1] return(reslist) } #' Select the parameters in COAP models #' @description Select the number of factors and the rank of coefficient matrix in the covariate-augmented overdispersed Poisson factor model #' @param X_count a count matrix, the observed count matrix. #' @param multiFac an optional vector, the normalization factor for each unit; default as full-one vector. #' @param Z an optional matrix, the covariate matrix; default as a full-one column vector if there is no additional covariates. #' @param q_max an optional string, specify the upper bound for the number of factors; default as 15. #' @param r_max an optional integer, specify the upper bound for the rank of the regression coefficient matrix; default as 24. #' @param threshold an optional 2-dimensional positive vector, specify the the thresholds that filters the singular values of beta and B, respectively. #' @param verbose a logical value, whether output the information in iteration. #' @param ..., other arguments passed to the function \code{\link{RR_COAP}}. #' @return return a named vector with names `hr` and `hq`, the estimated rank and number of factors. #' @details The threshold is to filter the singular values with low signal, to assist the identification of underlying model structure. #' @seealso \code{\link{RR_COAP}} #' @references None #' @export #' #' #' #' @examples #' n <- 300; p <- 200 #' d <- 20; q <- 6; r <- 3 #' datlist <- gendata_simu(n=n, p=p, d=20, q=q, rank0=r, rho=c(1,4)) #' str(datlist) #' set.seed(1) #' para_vec <- selectParams(X_count=datlist$X, Z = datlist$Z) #' print(para_vec) selectParams <- function(X_count, Z, multiFac=rep(1, nrow(X_count)), q_max=15, r_max=24, threshold=c(1e-1, 1e-2),verbose=TRUE, ...){ reslist <- RR_COAP(X_count, Z = Z,multiFac=multiFac, rank_use = r_max, q= q_max, verbose=verbose,...) thre1 <- threshold[1] beta_svalues <- svd(reslist$bbeta)$d beta_svalues <- beta_svalues[beta_svalues>thre1] ratio1 <- beta_svalues[-length(beta_svalues)] / beta_svalues[-1] hr <- which.max(ratio1[-length(ratio1)]) thre2 <- threshold[2] B_svalues <- svd(reslist$B)$d B_svalues <- B_svalues[B_svalues>thre2] ratio_fac <- B_svalues[-length(B_svalues)] / B_svalues[-1] hq <- which.max(ratio_fac) return(c(hr=hr, hq=hq)) }
/scratch/gouwar.j/cran-all/cranData/COAP/R/main.R
# # ## Check Cpp function ------------------------------------------------------ # # # # datList <- gendata(seed=20, n=100, p=100) # # colSums(datList$X) # # X_count <- datList$X; Z <- datList$Z; H <- datList$H0; B <- datList$B0 # # n <- nrow(X_count); p <- ncol(X_count); d <- ncol(Z); q <- ncol(H) # # # # # # # # Mu_y_int = log(1+ X_count)#matrix(1, n, p); # # S_y_int = matrix(1, n, p); # # a <- rep(1, n) # # set.seed(1) # # fit_lfm <- GFM::Factorm(Mu_y_int, q=q) # # #B_int = matrix(rnorm(p*q), p, q)*0.1; # # # Mu_h_int <- matrix(rnorm(n*q), n, q)#matrix(0, n, q) # # B_int <- fit_lfm$hB # # Mu_h_int <- fit_lfm$hH # # bbeta_int <- matrix(0, p, d) # # S_h_int <- array(dim=c(q,q, n)) ## fast computation version that does not update Mu_y and S_y. # # for(i in 1:n) S_h_int[,,i] <- diag(rep(1, q)) # # Sigma_h_int <- diag(rep(1, q)) # # invLambda_int = rep(1, p); ## the default p is 100, the default q is 15 # # tau_int = matrix(colMeans(Mu_y_int), p, 1); # # # # # # # # # reslist <- APoisFactor(X_count, a, Z, Mu_y_int, S_y_int, invLambda_int, B_int, # # # bbeta_int, Mu_h_int, S_h_int, Sigma_h_int, epsELBO=1e-9, # # # maxIter=30, verbose=T, fast_version=1) # # # # # # reslist <- APoisFactor(X_count, a, Z=cbind(1, Z), Mu_y_int, S_y_int, invLambda_int, B_int, # # # cbind(tau_int,bbeta_int), Mu_h_int, S_h_int, Sigma_h_int, epsELBO=1e-9, # # # maxIter=30, verbose=T, fast_version=2) # # # # # Compare with GFM -------------------------------------------------------- # rank0 <- 6; q = 5; d= 50 # datList <- gendata_simu(seed = 1, n=300, p=300, d= d, rank0 = rank0, q= q, rho=c(1, 4), # sigma2_eps = 3) # X_count <- datList$X; Z <- datList$Z # H <- datList$H0; B <- datList$B0 # hq <- 5; hr <- 6 # system.time( # reslist <- RR_COAP(X_count, Z= Z, q=hq, rank_use= hr) # ) # system.time( # reslist <- RR_COAP(X_count, Z= Z, q=hq, fast_svd = F) # ) # # # reslist <- RR_COAP(X_count, Z=Z, q=hq, fast_version = "Rough_Approx") # # plot(reslist$ELBO_seq[-1], type='o') # # reslist$B[1:4,1:5] # bbeta0 <- cbind( datList$mu0, datList$bbeta0) # # reslist$bbeta[1:5,1:4]; bbeta0[1:5,1:4] # GFM::measurefun(reslist$H, H) # GFM::measurefun(reslist$B, B) # # str(reslist) # norm_vec <- function(x) sqrt(sum(x^2/ length(x))) # norm_vec(reslist$bbeta-bbeta0) # # # fit_gfm <- GFM::gfm(list(X_count), type='poisson', q= q) # GFM::measurefun(fit_gfm$hH, H) # GFM::measurefun(fit_gfm$hB, B) # norm_vec(fit_gfm$hmu- bbeta0[,1]) # fit_lfm <- GFM::Factorm(X_count, q=q) # GFM::measurefun(fit_lfm$hH, H) # GFM::measurefun(fit_lfm$hB, B) # # # # # # ### There is no covariates # reslist3 <- RR_COAP(X_count, q=hq) # cancor(datList$H0, reslist3$H)$cor # # # # # # # gendata_simu_lowrank_select <- function (seed = 1, n = 300, p = 50, d=20, q = 6, rank0=3, rho = c(1.5, 1), sigma2_eps=0.1){ # require(MASS) # if(rank0<=1) stop("rank0 must be greater than 1!") # cor.mat<-function (p, rho, type = "toeplitz") { # if (p == 1) # return(matrix(1, 1, 1)) # mat <- diag(p) # if (type == "toeplitz") { # for (i in 2:p) { # for (j in 1:i) { # mat[i, j] <- mat[j, i] <- rho^(abs(i - j)) # } # } # } # if (type == "identity") { # mat[mat == 0] <- rho # } # return(mat) # } # Diag<-function (vec){ # q <- length(vec) # if (q > 1) { # y <- diag(vec) # } # else { # y <- matrix(vec, 1, 1) # } # return(y) # } # # if(length(rho)<2) stop("rho must be a numeric vector of length 2!") # # # factor_term <- rho[1] # factor_term_z <- rho[2] # set.seed(1) # Fixed bbeta0 # #bbeta0 <- matrix(rnorm(p*d), p, d) # rank_true <- rank0 - 1 # bbeta0 <- t(matrix(rnorm(d*rank_true), d, rank_true) %*% matrix(rnorm(rank_true* p), rank_true, p)) / p *4 * factor_term_z # Ztmp <- matrix(rnorm(p * q), p, q) # B <- qr(Ztmp) # eigvs <- sqrt(sort(eigen(t(Ztmp) %*% Ztmp)$values, decreasing = T)) # B1 <- qr.Q(B) %*% Diag(sqrt(eigvs)) # B0 <- B1 %*% Diag(sign(B1[1, ])) ## Fixed B0 and mu0 for each repeat. # # mu0 <- rnorm(p) * factor_term # # Bm0 <- cbind(mu0, B0) # set.seed(seed) # # if(d<2) stop("d must be greater than 1!") # Z <- MASS::mvrnorm(n, mu=rep(0, d-1), Sigma = cor.mat(d-1, rho=0.5)) # Z <- cbind(1, Z) # epsi <- MASS::mvrnorm(n, mu=rep(0, p), Sigma = diag(rep(p,1))* sigma2_eps) # H <- mvrnorm(n, mu = rep(0, q), cor.mat(q, 0.5)) # H <- residuals(lm(H~Z)) # svdH <- svd(cov(H)) # H0 <- scale(H, scale = F) %*% svdH$u %*% Diag(1/sqrt(svdH$d)) %*% # svdH$v # # g1 <- 1:p # B0[g1, ] <- B0[g1, ]/max(B0[g1, ]) * factor_term ## scale # # # mu <- exp(Z %*% t(bbeta0) + H0 %*% t(B0) + epsi) # + matrix(mu0, n, p, byrow = T) # # X <- matrix(rpois(n * p, lambda = mu), n, p) # # return(list(X = X, Z=Z, bbeta0=bbeta0, B0 = B0, H0 = H0, rank=rank0, q=q)) # } # # rank0 <- 6; q = 5; d= 50 # datList <- gendata_simu_lowrank_select(seed = 1, n=300, p=300, d= d, rank0 = rank0, q= q, rho=c(3, 6)/2, # sigma2_eps = 3) # X_count <- datList$X; Z <- datList$Z # H <- datList$H0; B <- datList$B0 # hq <- 5; hr <- 6 # system.time( # reslist <- RR_COAP(X_count, Z= Z, q=hq, rank_use= hr) # ) # # GFM::measurefun(reslist$H, H) # GFM::measurefun(reslist$B, B) # # str(reslist) # norm_vec <- function(x) sqrt(sum(x^2/ length(x))) # norm_vec(reslist$bbeta-bbeta0) # # # fit_gfm <- GFM::gfm(list(X_count), type='poisson', q= q) # GFM::measurefun(fit_gfm$hH, H) # GFM::measurefun(fit_gfm$hB, B) # fit_lfm <- GFM::Factorm(X_count, q=q) # GFM::measurefun(fit_lfm$hH, H) # GFM::measurefun(fit_lfm$hB, B) # # # ## Tune the signals to make the rank and number of factors are i -------- # # # ## rho=c(3, 9) is good. # datList <- gendata_simu(seed = 1,n=150, p=200, d=50, rank0 = 6, q=5, rho=c(3,5), sigma2_eps = 1) # q_max <- 15 # d <- ncol(datList$Z) # # reslist <- RR_COAP(datList$X, Z = datList$Z, rank_use = floor(d/2), q= q_max, verbose = T,joint_opt_beta=F) # # threshold <- 0.1 # svalues <- svd(reslist$bbeta)$d # # cumsum(svalues)/ sum(svalues) # # par(mfrow=c(2,1)) # svalues <- svalues[svalues>threshold] # ratio1 <- svalues[-length(svalues)] / svalues[-1] # # dat1 <- data.frame(Ratio=ratio1, r=1:length(ratio1)) # library(ggplot2) # p1 <- ggplot(data=dat1, aes(x=r, y=Ratio)) + geom_line(linewidth=0.8) +geom_point(size=1.8)+ # scale_x_continuous(breaks=seq(3, length(ratio1), by=3)) +theme_bw(base_size = 20) # plot(ratio1, type='o', xlab='r', ylab=paste0('ratio of eigenvalue of ',expression(beta))) # abline(v=6, col='red') # which.max(ratio1[-length(ratio1)]) # svalues <- svd(reslist$B)$d # svalues <- svalues[svalues>1e-2] # ratio_fac <- svalues[-length(svalues)] / svalues[-1] # dat2 <- data.frame(Ratio=ratio_fac, q=1:length(ratio_fac)) # p2 <- ggplot(data=dat2, aes(x=q, y=Ratio)) + geom_line(linewidth=0.8) +geom_point(size=1.5) + # theme_bw(base_size=20) # p12 <- SRTpipeline::drawFigs(pList=list(p1,p2), layout.dim = c(1,2), legend.position='none') # p12 # # ### Use the functions # res1 <- selectParams(X_count=datList$X, Z=datList$Z) # # # Test running time for each version -------------------------------------- # datList <- gendata(seed=2, n=100, p=100) # colSums(datList$X) # X_count <- datList$X; Z <- datList$Z; H <- datList$H0; B <- datList$B0 # n <- nrow(X_count); p <- ncol(X_count); d <- ncol(Z); q <- ncol(H) # version_vec <- c("Laplace_Taylor", "Rough_Approx") # time_vec <- rep(NA, length(version_vec)) # for(i in seq_along(version_vec)){ # # i <- 1 # cat(" version = ", version_vec[i], '\n') # tic <- proc.time() # reslist <- RR_COAP(X_count, Z=Z, q=q, fast_version = version_vec[i], maxIter = 20, epsELBO = 1e-20) # cat(cancor(reslist$H, H)$cor, '\n') # cat(cancor(reslist$B, B)$cor, '\n') # toc <- proc.time() # time_vec[i] <- toc[3] - tic[3] # # } # # reslist <- RR_COAP(X_count, q=q, fast_version = version_vec[i], maxIter = 20, epsELBO = 1e-20) # # version_vec <- c('Factorm', "approxPCA") # time_vec <- rep(NA, length(version_vec)) # for(i in seq_along(version_vec)){ # # i <- 3 # cat(" version = ", version_vec[i], '\n') # tic <- proc.time() # reslist <- APoissonFactor(X_count, Z=Z, q=q, initial = version_vec[i], maxIter = 20, epsELBO = 1e-20) # cat(cancor(reslist$H, H)$cor, '\n') # cat(cancor(reslist$B, B)$cor, '\n') # toc <- proc.time() # time_vec[i] <- toc[3] - tic[3] # # } # # # # # High-dimensional covariates --------------------------------------------- # # rank0 <- 6; q = 5; d= 50; # datList <- gendata_simu(seed = 3,n=1000, p=1000, d= d, rank0 = rank0, q= q, rho=c(3, 6)/5) # X_count <- datList$X # Z <- datList$Z # # reslist <- RR_COAP(datList$X, Z = datList$Z, rank_use = rank0, q= q, verbose = T) # # # library(microbenchmark) # mbm <- microbenchmark("fast" = { reslist <- RR_COAP(X_count, Z = Z, rank_use = rank0, fast_svd= T) # }, # "slow" = { # reslist <- RR_COAP(X_count, Z = Z, rank_use = 3, fast_svd= F) # },times = 2); mbm # # # ## Compare with GFM # reslist <- RR_COAP(X_count, rank_use = 3, epsELBO = 1e-4, fast_svd= T) # reslist <- RR_COAP(X_count, rank_use = 3, q=1, epsELBO = 1e-4, fast_svd= T) # # mbm <- microbenchmark("fast" = { # reslist <- RR_COAP(X_count, rank_use = rank0, epsELBO = 1e-20, maxIter=30, fast_svd= T) # }, # "slow" = { # reslist <-GFM::gfm(list(X_count), types="poisson", q=15, dc_eps = 1e-20, maxIter = 30) # },times = 2); mbm # # # ## Compare with MRRR # mrrr_run <- function(Y, X, rank0, family=list(poisson()), # familygroup=rep(1,ncol(Y)), epsilon = 1e-4, sv.tol = 1e-2, maxIter = 30, trace=TRUE){ # require(rrpack) # # n <- nrow(Y); p <- ncol(Y) # # # svdX0d1 <- svd(X)$d[1] # init1 = list(kappaC0 = svdX0d1 * 5) # offset = NULL # control = list(epsilon = epsilon, sv.tol = sv.tol, maxit = maxIter, # trace = trace, gammaC0 = 1.1, plot.cv = TRUE, # conv.obj = TRUE) # fit.mrrr <- mrrr(Y=Y, X=X[,-1], family = family, familygroup = familygroup, # penstr = list(penaltySVD = "rankCon", lambdaSVD = 0.1), # control = control, init = init1, maxrank = rank0) # # return(fit.mrrr) # } # # mbm <- microbenchmark("fast" = { # reslist <- RR_COAP(X_count, rank_use = rank0, q=q, epsELBO = 1e-4, fast_svd= T) # }, # "slow" = { # res_mrrr <- mrrr_run(Y=X_count, X = Z, rank0 = rank0) # },times = 2); mbm # # # mbm <- microbenchmark("fast" = { # reslist <- RR_COAP(X_count, rank_use = rank0, q=q, epsELBO = 1e-20, maxIter = 30 ,fast_svd= T) # }, # "slow" = { # res_mrrr <- mrrr_run(Y=X_count, X = Z, rank0=10, epsilon = 1e-20, sv.tol=1e-20, maxIter = 30) # , rank0 = rank0 # },times = 2); mbm # #
/scratch/gouwar.j/cran-all/cranData/COAP/R/test.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----eval = FALSE------------------------------------------------------------- # library(COAP) # library(GFM) ## ----eval = FALSE------------------------------------------------------------- # n <- 200; p <- 200; # d= 50 # rank0 <- 6; # q = 5; # datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(2, 2), # sigma2_eps = 1) # X_count <- datList$X; Z <- datList$Z # H0 <- datList$H0; B0 <- datList$B0 # bbeta0 <- cbind( datList$mu0, datList$bbeta0) # ## ----eval = FALSE------------------------------------------------------------- # hq <- 5; hr <- 6 # system.time({ # tic <- proc.time() # reslist <- RR_COAP(X_count, Z= Z, q=hq, rank_use= hr, epsELBO = 1e-6) # toc <- proc.time() # time_coap <- toc[3] - tic[3] # }) ## ----eval = FALSE------------------------------------------------------------- # library(ggplot2) # dat_iter <- data.frame(iter=1:length(reslist$ELBO_seq), ELBO=reslist$ELBO_seq) # ggplot(data=dat_iter, aes(x=iter, y=ELBO)) + geom_line() + geom_point() + theme_bw(base_size = 20) # ## ----eval = FALSE------------------------------------------------------------- # library(GFM) # metricList <- list() # metricList$COAP <- list() # metricList$COAP$Tr_H <- measurefun(reslist$H, H0) # metricList$COAP$Tr_B <- measurefun(reslist$B, B0) # # norm_vec <- function(x) sqrt(sum(x^2/ length(x))) # metricList$COAP$err_bb <- norm_vec(reslist$bbeta-bbeta0) # metricList$COAP$err_bb1 <- norm_vec(reslist$bbeta[,1]-bbeta0[,1]) # metricList$COAP$Time <- time_coap ## ----eval = FALSE------------------------------------------------------------- # metricList$LFM <- list() # tic <- proc.time() # fit_lfm <- Factorm(X_count, q=q) # toc <- proc.time() # time_lfm <- toc[3] - tic[3] # # hbb1 <- colMeans(X_count) # metricList$LFM$Tr_H <- measurefun(fit_lfm$hH, H0) # metricList$LFM$Tr_B <- measurefun(fit_lfm$hB, B0) # metricList$LFM$err_bb1 <- norm_vec(hbb1- bbeta0[,1]) # metricList$LFM$err_bb <- NA # metricList$LFM$Time <- time_lfm ## ----eval = FALSE------------------------------------------------------------- # metricList$PoissonPCA <- list() # library(PoissonPCA) # tic <- proc.time() # fit_poispca <- Poisson_Corrected_PCA(X_count, k= hq) # toc <- proc.time() # time_ppca <- toc[3] - tic[3] # # hbb1 <- colMeans(X_count) # metricList$PoissonPCA$Tr_H <- measurefun(fit_poispca$scores, H0) # metricList$PoissonPCA$Tr_B <- measurefun(fit_poispca$loadings, B0) # metricList$PoissonPCA$err_bb1 <- norm_vec(log(1+fit_poispca$center)- bbeta0[,1]) # metricList$PoissonPCA$err_bb <- NA # metricList$PoissonPCA$Time <- time_ppca ## ----eval =FALSE-------------------------------------------------------------- # ## ZIPFA runs very slowly, so we do not run it here. # library(ZIPFA) # metricList$ZIPFA <- list() # system.time( # tic <- proc.time() # fit_zipfa <- ZIPFA(X_count, k=hq, display = FALSE) # toc <- proc.time() # time_zipfa <- toc[3] - tic[3] # ) # # # # idx_max_like <- which.max(fit_zipfa$Likelihood) # hbb1 <- colMeans(X_count) # metricList$ZIPFA$Tr_H <- measurefun(fit_zipfa$Ufit[[idx_max_like]], H0) # metricList$ZIPFA$Tr_B <- measurefun(fit_zipfa$Vfit[[idx_max_like]], B0) # metricList$PoissonPCA$Time <- time_zipfa # ## ----eval = FALSE------------------------------------------------------------- # metricList$GFM <- list() # tic <- proc.time() # fit_gfm <- gfm(list(X_count), type='poisson', q= q, verbose = F) # toc <- proc.time() # time_gfm <- toc[3] - tic[3] # metricList$GFM$Tr_H <- measurefun(fit_gfm$hH, H0) # metricList$GFM$Tr_B <- measurefun(fit_gfm$hB, B0) # metricList$GFM$err_bb1 <- norm_vec(fit_gfm$hmu- bbeta0[,1]) # metricList$GFM$err_bb <- NA # metricList$GFM$Time <- time_gfm # ## ----eval = FALSE------------------------------------------------------------- # PLNPCA_run <- function(X_count, covariates, q, Offset=rep(1, nrow(X_count))){ # require(PLNmodels) # # if(!is.character(Offset)){ # dat_plnpca <- prepare_data(X_count, covariates) # dat_plnpca$Offset <- Offset # }else{ # dat_plnpca <- prepare_data(X_count, covariates, offset = Offset) # } # # d <- ncol(covariates) # # offset(log(Offset))+ # formu <- paste0("Abundance ~ 1 + offset(log(Offset))+",paste(paste0("V",1:d), collapse = '+')) # # # myPCA <- PLNPCA(as.formula(formu), data = dat_plnpca, ranks = q) # # myPCA1 <- getBestModel(myPCA) # myPCA1$scores # # res_plnpca <- list(PCs= myPCA1$scores, bbeta= myPCA1$model_par$B, # loadings=myPCA1$model_par$C) # # return(res_plnpca) # } # # # # tic <- proc.time() # fit_plnpca <- PLNPCA_run(X_count, covariates = Z[,-1], q= q) # toc <- proc.time() # time_plnpca <- toc[3] - tic[3] # message(time_plnpca, " seconds") # # metricList$PLNPCA$Tr_H <- measurefun(fit_plnpca$PCs, H0) # metricList$PLNPCA$Tr_B <- measurefun(fit_plnpca$loadings, B0) # metricList$PLNPCA$err_bb1 <- norm_vec(fit_plnpca$bbeta[,1]- bbeta0[,1]) # metricList$PLNPCA$err_bb <- norm_vec(as.vector(fit_plnpca$bbeta) - as.vector(bbeta0)) # metricList$PLNPCA$Time <- time_plnpca ## ----eval =FALSE-------------------------------------------------------------- # ## GLLVM runs very slowly, so we do not run it here. # # library(gllvm) # colnames(Z) <- c(paste0("V",1: ncol(Z))) # tic <- proc.time() # fit <- gllvm(y=X_count, X=Z, family=poisson(), num.lv= q, control = list(trace=T)) # toc <- proc.time() # time_gllvm <- toc[3] - tic[3] # # metricList$GLLVM <- list() # metricList$GLLVM$Tr_H <- measurefun(fit$lvs, H0) # metricList$GLLVM$Tr_B <- measurefun(fit$params$theta, B0) # metricList$GLLVM$err_bb1 <- norm_vec(fit$params$beta0- bbeta0[,1]) # metricList$GLLVM$err_bb <- norm_vec(as.vector(cbind(fit$params$beta0,fit$params$Xcoef)) - as.vector(bbeta0)) # metricList$GLLVM$Time <- time_gllvm # } # ## ----eval = FALSE------------------------------------------------------------- # PoisReg <- function(X_count, covariates){ # library(stats) # hbbeta <- apply(X_count, 2, function(x){ # glm1 <- glm(x~covariates+0, family = "poisson") # coef(glm1) # } ) # return(t(hbbeta)) # } # tic <- proc.time() # hbbeta_poisreg <- PoisReg(X_count, Z) # toc <- proc.time() # time_poisreg <- toc[3] - tic[3] # metricList$GLM <- list() # metricList$GLM$Tr_H <- NA # metricList$GLM$Tr_B <- NA # metricList$GLM$err_bb1 <- norm_vec(hbbeta_poisreg[,1]- bbeta0[,1]) # metricList$GLM$err_bb <- norm_vec(as.vector(hbbeta_poisreg) - as.vector(bbeta0)) # metricList$GLM$Time <- time_poisreg # ## ----eval = FALSE------------------------------------------------------------- # mrrr_run <- function(Y, X, rank0, q=NULL, family=list(poisson()), familygroup=rep(1,ncol(Y))){ # # # require(rrpack) # # n <- nrow(Y); p <- ncol(Y) # # if(!is.null(q)){ # rank0 <- rank0+q # X <- cbind(X, diag(n)) # } # # svdX0d1 <- svd(X)$d[1] # init1 = list(kappaC0 = svdX0d1 * 5) ## this setting follows the example that authors provided. # # fit.mrrr <- mrrr(Y=Y, X=X[,-1], family = family, familygroup = familygroup, # penstr = list(penaltySVD = "rankCon", lambdaSVD = 0.1), # init = init1, maxrank = rank0) # hbbeta_mrrr <-t(fit.mrrr$coef[1:ncol(Z), ]) # if(!is.null(q)){ # Theta_hb <- (fit.mrrr$coef[(ncol(Z)+1): (nrow(Z)+ncol(Z)), ]) # svdTheta <- svd(Theta_hb, nu=q, nv=q) # return(list(hbbeta=hbbeta_mrrr, factor=svdTheta$u, loading=svdTheta$v)) # }else{ # return(list(hbbeta=hbbeta_mrrr)) # } # # # } # tic <- proc.time() # # res_mrrrz <- mrrr_run(X_count, Z, rank0) # toc <- proc.time() # time_mrrrz <- toc[3] - tic[3] # # metricList$MRRR_Z <- list() # metricList$MRRR_Z$Tr_H <- NA # metricList$MRRR_Z$Tr_B <-NA # metricList$MRRR_Z$err_bb1 <- norm_vec(res_mrrrz$hbbeta[,1]- bbeta0[,1]) # metricList$MRRR_Z$err_bb <- norm_vec(as.vector(res_mrrrz$hbbeta) - as.vector(bbeta0)) # metricList$MRRR_Z$Time <- time_mrrrz # ## ----eval = FALSE------------------------------------------------------------- # tic <- proc.time() # res_mrrrf <- mrrr_run(X_count, Z, rank0, q=q) # toc <- proc.time() # time_mrrrf <- toc[3] - tic[3] # metricList$MRRR_F <- list() # metricList$MRRR_F$Tr_H <- measurefun(res_mrrrf$factor, H0) # metricList$MRRR_F$Tr_B <- measurefun(res_mrrrf$loading, B0) # metricList$MRRR_F$err_bb1 <- norm_vec(res_mrrrf$hbbeta[,1]- bbeta0[,1]) # metricList$MRRR_F$err_bb <- norm_vec(as.vector(res_mrrrf$hbbeta) - as.vector(bbeta0)) # metricList$MRRR_F$Time <- time_mrrrf # ## ----eval = FALSE------------------------------------------------------------- # list2vec <- function(xlist){ # nn <- length(xlist) # me <- rep(NA, nn) # idx_noNA <- which(sapply(xlist, function(x) !is.null(x))) # for(r in idx_noNA) me[r] <- xlist[[r]] # return(me) # } # # dat_metric <- data.frame(Tr_H = sapply(metricList, function(x) x$Tr_H), # Tr_B = sapply(metricList, function(x) x$Tr_B), # err_bb1 =sapply(metricList, function(x) x$err_bb1), # err_bb = list2vec(lapply(metricList, function(x) x[['err_bb']])), # Method = names(metricList)) # dat_metric ## ----eval = FALSE, fig.width=9, fig.height=6---------------------------------- # library(cowplot) # p1 <- ggplot(data=subset(dat_metric, !is.na(Tr_B)), aes(x= Method, y=Tr_B, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL) + theme_bw(base_size = 16) # p2 <- ggplot(data=subset(dat_metric, !is.na(Tr_H)), aes(x= Method, y=Tr_H, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) # p3 <- ggplot(data=subset(dat_metric, !is.na(err_bb1)), aes(x= Method, y=err_bb1, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) # p4 <- ggplot(data=subset(dat_metric, !is.na(err_bb)), aes(x= Method, y=err_bb, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) # plot_grid(p1,p2,p3, p4, nrow=2, ncol=2) ## ----eval = FALSE------------------------------------------------------------- # datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(3, 6), # sigma2_eps = 1) # X_count <- datList$X; Z <- datList$Z # res1 <- selectParams(X_count=datList$X, Z=datList$Z, verbose=F) # # print(c(q_true=q, q_est=res1['hq'])) # print(c(r_true=rank0, r_est=res1['hr'])) ## ----------------------------------------------------------------------------- sessionInfo()
/scratch/gouwar.j/cran-all/cranData/COAP/inst/doc/COAPsimu.R
--- title: 'COAP: simulation' author: "Wei Liu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{COAP: simulation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` This vignette introduces the usage of COAP for the analysis of high-dimensional count data with additional high-dimensional covariates, by comparison with other methods. The package can be loaded with the command: ```{r eval = FALSE} library(COAP) library(GFM) ``` ## Generate the simulated data First, we generate the data simulated data. ```{r eval = FALSE} n <- 200; p <- 200; d= 50 rank0 <- 6; q = 5; datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(2, 2), sigma2_eps = 1) X_count <- datList$X; Z <- datList$Z H0 <- datList$H0; B0 <- datList$B0 bbeta0 <- cbind( datList$mu0, datList$bbeta0) ``` Fit the COAP model using the function `RR_COAP()` in the R package `COAP`. Users can use `?RR_COAP` to see the details about this function ```{r eval = FALSE} hq <- 5; hr <- 6 system.time({ tic <- proc.time() reslist <- RR_COAP(X_count, Z= Z, q=hq, rank_use= hr, epsELBO = 1e-6) toc <- proc.time() time_coap <- toc[3] - tic[3] }) ``` Check the increased property of the envidence lower bound function. ```{r eval = FALSE} library(ggplot2) dat_iter <- data.frame(iter=1:length(reslist$ELBO_seq), ELBO=reslist$ELBO_seq) ggplot(data=dat_iter, aes(x=iter, y=ELBO)) + geom_line() + geom_point() + theme_bw(base_size = 20) ``` We calculate the metrics to measure the estimatioin accuracy, where the trace statistic is used to measure the estimation accuracy of loading matrix and prediction accuracy of factor matrix, which is evaluated by the function `measurefun()` in the R package `GFM`, and the root of mean square error is adopted to measure the estimation error of bbeta. ```{r eval = FALSE} library(GFM) metricList <- list() metricList$COAP <- list() metricList$COAP$Tr_H <- measurefun(reslist$H, H0) metricList$COAP$Tr_B <- measurefun(reslist$B, B0) norm_vec <- function(x) sqrt(sum(x^2/ length(x))) metricList$COAP$err_bb <- norm_vec(reslist$bbeta-bbeta0) metricList$COAP$err_bb1 <- norm_vec(reslist$bbeta[,1]-bbeta0[,1]) metricList$COAP$Time <- time_coap ``` ## Compare with other methods We compare COAP with various prominent methods in the literature. They are (1) High-dimensional LFM (Bai and Ng 2002) implemented in the R package GFM; (2) PoissonPCA (Kenney et al. 2021) implemented in the R package PoissonPCA; (3) Zero-inflated Poisson factor model (ZIPFA, Xu et al. 2021) implemented in the R package ZIPFA; (4) Generalized factor model (Liu et al. 2023) implemented in the R package GFM; (5) PLNPCA (Chiquet et al. 2018) implemented in the R package PLNmodels; (6) Generalized Linear Latent Variable Models (GLLVM, Hui et al. 2017) implemented in the R package gllvm. (7) Poisson regression model for each $x_{ij}, (j = 1,··· ,p)$, implemented in stats R package; (8) Multi-response reduced-rank Poisson regression model (MMMR, Luo et al. 2018) implemented in rrpack R package. (1). First, we implemented the linear factor model (LFM) and record the metrics that measure the estimation accuracy and computational cost. ```{r eval = FALSE} metricList$LFM <- list() tic <- proc.time() fit_lfm <- Factorm(X_count, q=q) toc <- proc.time() time_lfm <- toc[3] - tic[3] hbb1 <- colMeans(X_count) metricList$LFM$Tr_H <- measurefun(fit_lfm$hH, H0) metricList$LFM$Tr_B <- measurefun(fit_lfm$hB, B0) metricList$LFM$err_bb1 <- norm_vec(hbb1- bbeta0[,1]) metricList$LFM$err_bb <- NA metricList$LFM$Time <- time_lfm ``` (2). Then, we implemented PoissonPCA and recorded the metrics. ```{r eval = FALSE} metricList$PoissonPCA <- list() library(PoissonPCA) tic <- proc.time() fit_poispca <- Poisson_Corrected_PCA(X_count, k= hq) toc <- proc.time() time_ppca <- toc[3] - tic[3] hbb1 <- colMeans(X_count) metricList$PoissonPCA$Tr_H <- measurefun(fit_poispca$scores, H0) metricList$PoissonPCA$Tr_B <- measurefun(fit_poispca$loadings, B0) metricList$PoissonPCA$err_bb1 <- norm_vec(log(1+fit_poispca$center)- bbeta0[,1]) metricList$PoissonPCA$err_bb <- NA metricList$PoissonPCA$Time <- time_ppca ``` (3) Thirdly, we implemented the zero-inflated Poisson factor model: ```{r eval =FALSE} ## ZIPFA runs very slowly, so we do not run it here. library(ZIPFA) metricList$ZIPFA <- list() system.time( tic <- proc.time() fit_zipfa <- ZIPFA(X_count, k=hq, display = FALSE) toc <- proc.time() time_zipfa <- toc[3] - tic[3] ) idx_max_like <- which.max(fit_zipfa$Likelihood) hbb1 <- colMeans(X_count) metricList$ZIPFA$Tr_H <- measurefun(fit_zipfa$Ufit[[idx_max_like]], H0) metricList$ZIPFA$Tr_B <- measurefun(fit_zipfa$Vfit[[idx_max_like]], B0) metricList$PoissonPCA$Time <- time_zipfa ``` (4) Fourthly, we also applied the generalized factor model to estimate the loading matrix and factor matrix. ```{r eval = FALSE} metricList$GFM <- list() tic <- proc.time() fit_gfm <- gfm(list(X_count), type='poisson', q= q, verbose = F) toc <- proc.time() time_gfm <- toc[3] - tic[3] metricList$GFM$Tr_H <- measurefun(fit_gfm$hH, H0) metricList$GFM$Tr_B <- measurefun(fit_gfm$hB, B0) metricList$GFM$err_bb1 <- norm_vec(fit_gfm$hmu- bbeta0[,1]) metricList$GFM$err_bb <- NA metricList$GFM$Time <- time_gfm ``` (5) Fifthly, we implemented PLNPCA: ```{r eval = FALSE} PLNPCA_run <- function(X_count, covariates, q, Offset=rep(1, nrow(X_count))){ require(PLNmodels) if(!is.character(Offset)){ dat_plnpca <- prepare_data(X_count, covariates) dat_plnpca$Offset <- Offset }else{ dat_plnpca <- prepare_data(X_count, covariates, offset = Offset) } d <- ncol(covariates) # offset(log(Offset))+ formu <- paste0("Abundance ~ 1 + offset(log(Offset))+",paste(paste0("V",1:d), collapse = '+')) myPCA <- PLNPCA(as.formula(formu), data = dat_plnpca, ranks = q) myPCA1 <- getBestModel(myPCA) myPCA1$scores res_plnpca <- list(PCs= myPCA1$scores, bbeta= myPCA1$model_par$B, loadings=myPCA1$model_par$C) return(res_plnpca) } tic <- proc.time() fit_plnpca <- PLNPCA_run(X_count, covariates = Z[,-1], q= q) toc <- proc.time() time_plnpca <- toc[3] - tic[3] message(time_plnpca, " seconds") metricList$PLNPCA$Tr_H <- measurefun(fit_plnpca$PCs, H0) metricList$PLNPCA$Tr_B <- measurefun(fit_plnpca$loadings, B0) metricList$PLNPCA$err_bb1 <- norm_vec(fit_plnpca$bbeta[,1]- bbeta0[,1]) metricList$PLNPCA$err_bb <- norm_vec(as.vector(fit_plnpca$bbeta) - as.vector(bbeta0)) metricList$PLNPCA$Time <- time_plnpca ``` (6) Sixthly, we implement the generalized linear latent variable models (GLLVM, Hui et al. 2017). ```{r eval =FALSE} ## GLLVM runs very slowly, so we do not run it here. library(gllvm) colnames(Z) <- c(paste0("V",1: ncol(Z))) tic <- proc.time() fit <- gllvm(y=X_count, X=Z, family=poisson(), num.lv= q, control = list(trace=T)) toc <- proc.time() time_gllvm <- toc[3] - tic[3] metricList$GLLVM <- list() metricList$GLLVM$Tr_H <- measurefun(fit$lvs, H0) metricList$GLLVM$Tr_B <- measurefun(fit$params$theta, B0) metricList$GLLVM$err_bb1 <- norm_vec(fit$params$beta0- bbeta0[,1]) metricList$GLLVM$err_bb <- norm_vec(as.vector(cbind(fit$params$beta0,fit$params$Xcoef)) - as.vector(bbeta0)) metricList$GLLVM$Time <- time_gllvm } ``` (7) Seventhly, Poisson regression model for each variable was implemented. ```{r eval = FALSE} PoisReg <- function(X_count, covariates){ library(stats) hbbeta <- apply(X_count, 2, function(x){ glm1 <- glm(x~covariates+0, family = "poisson") coef(glm1) } ) return(t(hbbeta)) } tic <- proc.time() hbbeta_poisreg <- PoisReg(X_count, Z) toc <- proc.time() time_poisreg <- toc[3] - tic[3] metricList$GLM <- list() metricList$GLM$Tr_H <- NA metricList$GLM$Tr_B <- NA metricList$GLM$err_bb1 <- norm_vec(hbbeta_poisreg[,1]- bbeta0[,1]) metricList$GLM$err_bb <- norm_vec(as.vector(hbbeta_poisreg) - as.vector(bbeta0)) metricList$GLM$Time <- time_poisreg ``` (8) Eightly, we implemented the first version of multi-response reduced-rank Poisson regression model (MMMR, Luo et al. 2018) implemented in rrpack R package (MRRR-Z), that did not consider the latent factor structure but only the covariates. ```{r eval = FALSE} mrrr_run <- function(Y, X, rank0, q=NULL, family=list(poisson()), familygroup=rep(1,ncol(Y))){ require(rrpack) n <- nrow(Y); p <- ncol(Y) if(!is.null(q)){ rank0 <- rank0+q X <- cbind(X, diag(n)) } svdX0d1 <- svd(X)$d[1] init1 = list(kappaC0 = svdX0d1 * 5) ## this setting follows the example that authors provided. fit.mrrr <- mrrr(Y=Y, X=X[,-1], family = family, familygroup = familygroup, penstr = list(penaltySVD = "rankCon", lambdaSVD = 0.1), init = init1, maxrank = rank0) hbbeta_mrrr <-t(fit.mrrr$coef[1:ncol(Z), ]) if(!is.null(q)){ Theta_hb <- (fit.mrrr$coef[(ncol(Z)+1): (nrow(Z)+ncol(Z)), ]) svdTheta <- svd(Theta_hb, nu=q, nv=q) return(list(hbbeta=hbbeta_mrrr, factor=svdTheta$u, loading=svdTheta$v)) }else{ return(list(hbbeta=hbbeta_mrrr)) } } tic <- proc.time() res_mrrrz <- mrrr_run(X_count, Z, rank0) toc <- proc.time() time_mrrrz <- toc[3] - tic[3] metricList$MRRR_Z <- list() metricList$MRRR_Z$Tr_H <- NA metricList$MRRR_Z$Tr_B <-NA metricList$MRRR_Z$err_bb1 <- norm_vec(res_mrrrz$hbbeta[,1]- bbeta0[,1]) metricList$MRRR_Z$err_bb <- norm_vec(as.vector(res_mrrrz$hbbeta) - as.vector(bbeta0)) metricList$MRRR_Z$Time <- time_mrrrz ``` (9) Lastly, we implemented the second version of MRRR (MRRR-F) that considered both covariates and the latent factor structure. ```{r eval = FALSE} tic <- proc.time() res_mrrrf <- mrrr_run(X_count, Z, rank0, q=q) toc <- proc.time() time_mrrrf <- toc[3] - tic[3] metricList$MRRR_F <- list() metricList$MRRR_F$Tr_H <- measurefun(res_mrrrf$factor, H0) metricList$MRRR_F$Tr_B <- measurefun(res_mrrrf$loading, B0) metricList$MRRR_F$err_bb1 <- norm_vec(res_mrrrf$hbbeta[,1]- bbeta0[,1]) metricList$MRRR_F$err_bb <- norm_vec(as.vector(res_mrrrf$hbbeta) - as.vector(bbeta0)) metricList$MRRR_F$Time <- time_mrrrf ``` ## Visualize the comparison of performance Next, we summarized the metrics for COAP and other compared methods in a dataframe object. ```{r eval = FALSE} list2vec <- function(xlist){ nn <- length(xlist) me <- rep(NA, nn) idx_noNA <- which(sapply(xlist, function(x) !is.null(x))) for(r in idx_noNA) me[r] <- xlist[[r]] return(me) } dat_metric <- data.frame(Tr_H = sapply(metricList, function(x) x$Tr_H), Tr_B = sapply(metricList, function(x) x$Tr_B), err_bb1 =sapply(metricList, function(x) x$err_bb1), err_bb = list2vec(lapply(metricList, function(x) x[['err_bb']])), Method = names(metricList)) dat_metric ``` Plot the results for COAP and other methods, which suggests that COAP achieves better estimation accuracy for the quantiites of interest. ```{r eval = FALSE, fig.width=9, fig.height=6} library(cowplot) p1 <- ggplot(data=subset(dat_metric, !is.na(Tr_B)), aes(x= Method, y=Tr_B, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL) + theme_bw(base_size = 16) p2 <- ggplot(data=subset(dat_metric, !is.na(Tr_H)), aes(x= Method, y=Tr_H, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) p3 <- ggplot(data=subset(dat_metric, !is.na(err_bb1)), aes(x= Method, y=err_bb1, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) p4 <- ggplot(data=subset(dat_metric, !is.na(err_bb)), aes(x= Method, y=err_bb, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) plot_grid(p1,p2,p3, p4, nrow=2, ncol=2) ``` ## Select the parameters We applied the singular value ratio based method to select the number of factors and the rank of coefficient matrix. The results showed that the SVR method has the potential to identify the true values. ```{r eval = FALSE} datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(3, 6), sigma2_eps = 1) X_count <- datList$X; Z <- datList$Z res1 <- selectParams(X_count=datList$X, Z=datList$Z, verbose=F) print(c(q_true=q, q_est=res1['hq'])) print(c(r_true=rank0, r_est=res1['hr'])) ``` <details> <summary>**Session Info**</summary> ```{r} sessionInfo() ``` </details>
/scratch/gouwar.j/cran-all/cranData/COAP/inst/doc/COAPsimu.Rmd
--- title: 'COAP: simulation' author: "Wei Liu" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{COAP: simulation} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` This vignette introduces the usage of COAP for the analysis of high-dimensional count data with additional high-dimensional covariates, by comparison with other methods. The package can be loaded with the command: ```{r eval = FALSE} library(COAP) library(GFM) ``` ## Generate the simulated data First, we generate the data simulated data. ```{r eval = FALSE} n <- 200; p <- 200; d= 50 rank0 <- 6; q = 5; datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(2, 2), sigma2_eps = 1) X_count <- datList$X; Z <- datList$Z H0 <- datList$H0; B0 <- datList$B0 bbeta0 <- cbind( datList$mu0, datList$bbeta0) ``` Fit the COAP model using the function `RR_COAP()` in the R package `COAP`. Users can use `?RR_COAP` to see the details about this function ```{r eval = FALSE} hq <- 5; hr <- 6 system.time({ tic <- proc.time() reslist <- RR_COAP(X_count, Z= Z, q=hq, rank_use= hr, epsELBO = 1e-6) toc <- proc.time() time_coap <- toc[3] - tic[3] }) ``` Check the increased property of the envidence lower bound function. ```{r eval = FALSE} library(ggplot2) dat_iter <- data.frame(iter=1:length(reslist$ELBO_seq), ELBO=reslist$ELBO_seq) ggplot(data=dat_iter, aes(x=iter, y=ELBO)) + geom_line() + geom_point() + theme_bw(base_size = 20) ``` We calculate the metrics to measure the estimatioin accuracy, where the trace statistic is used to measure the estimation accuracy of loading matrix and prediction accuracy of factor matrix, which is evaluated by the function `measurefun()` in the R package `GFM`, and the root of mean square error is adopted to measure the estimation error of bbeta. ```{r eval = FALSE} library(GFM) metricList <- list() metricList$COAP <- list() metricList$COAP$Tr_H <- measurefun(reslist$H, H0) metricList$COAP$Tr_B <- measurefun(reslist$B, B0) norm_vec <- function(x) sqrt(sum(x^2/ length(x))) metricList$COAP$err_bb <- norm_vec(reslist$bbeta-bbeta0) metricList$COAP$err_bb1 <- norm_vec(reslist$bbeta[,1]-bbeta0[,1]) metricList$COAP$Time <- time_coap ``` ## Compare with other methods We compare COAP with various prominent methods in the literature. They are (1) High-dimensional LFM (Bai and Ng 2002) implemented in the R package GFM; (2) PoissonPCA (Kenney et al. 2021) implemented in the R package PoissonPCA; (3) Zero-inflated Poisson factor model (ZIPFA, Xu et al. 2021) implemented in the R package ZIPFA; (4) Generalized factor model (Liu et al. 2023) implemented in the R package GFM; (5) PLNPCA (Chiquet et al. 2018) implemented in the R package PLNmodels; (6) Generalized Linear Latent Variable Models (GLLVM, Hui et al. 2017) implemented in the R package gllvm. (7) Poisson regression model for each $x_{ij}, (j = 1,··· ,p)$, implemented in stats R package; (8) Multi-response reduced-rank Poisson regression model (MMMR, Luo et al. 2018) implemented in rrpack R package. (1). First, we implemented the linear factor model (LFM) and record the metrics that measure the estimation accuracy and computational cost. ```{r eval = FALSE} metricList$LFM <- list() tic <- proc.time() fit_lfm <- Factorm(X_count, q=q) toc <- proc.time() time_lfm <- toc[3] - tic[3] hbb1 <- colMeans(X_count) metricList$LFM$Tr_H <- measurefun(fit_lfm$hH, H0) metricList$LFM$Tr_B <- measurefun(fit_lfm$hB, B0) metricList$LFM$err_bb1 <- norm_vec(hbb1- bbeta0[,1]) metricList$LFM$err_bb <- NA metricList$LFM$Time <- time_lfm ``` (2). Then, we implemented PoissonPCA and recorded the metrics. ```{r eval = FALSE} metricList$PoissonPCA <- list() library(PoissonPCA) tic <- proc.time() fit_poispca <- Poisson_Corrected_PCA(X_count, k= hq) toc <- proc.time() time_ppca <- toc[3] - tic[3] hbb1 <- colMeans(X_count) metricList$PoissonPCA$Tr_H <- measurefun(fit_poispca$scores, H0) metricList$PoissonPCA$Tr_B <- measurefun(fit_poispca$loadings, B0) metricList$PoissonPCA$err_bb1 <- norm_vec(log(1+fit_poispca$center)- bbeta0[,1]) metricList$PoissonPCA$err_bb <- NA metricList$PoissonPCA$Time <- time_ppca ``` (3) Thirdly, we implemented the zero-inflated Poisson factor model: ```{r eval =FALSE} ## ZIPFA runs very slowly, so we do not run it here. library(ZIPFA) metricList$ZIPFA <- list() system.time( tic <- proc.time() fit_zipfa <- ZIPFA(X_count, k=hq, display = FALSE) toc <- proc.time() time_zipfa <- toc[3] - tic[3] ) idx_max_like <- which.max(fit_zipfa$Likelihood) hbb1 <- colMeans(X_count) metricList$ZIPFA$Tr_H <- measurefun(fit_zipfa$Ufit[[idx_max_like]], H0) metricList$ZIPFA$Tr_B <- measurefun(fit_zipfa$Vfit[[idx_max_like]], B0) metricList$PoissonPCA$Time <- time_zipfa ``` (4) Fourthly, we also applied the generalized factor model to estimate the loading matrix and factor matrix. ```{r eval = FALSE} metricList$GFM <- list() tic <- proc.time() fit_gfm <- gfm(list(X_count), type='poisson', q= q, verbose = F) toc <- proc.time() time_gfm <- toc[3] - tic[3] metricList$GFM$Tr_H <- measurefun(fit_gfm$hH, H0) metricList$GFM$Tr_B <- measurefun(fit_gfm$hB, B0) metricList$GFM$err_bb1 <- norm_vec(fit_gfm$hmu- bbeta0[,1]) metricList$GFM$err_bb <- NA metricList$GFM$Time <- time_gfm ``` (5) Fifthly, we implemented PLNPCA: ```{r eval = FALSE} PLNPCA_run <- function(X_count, covariates, q, Offset=rep(1, nrow(X_count))){ require(PLNmodels) if(!is.character(Offset)){ dat_plnpca <- prepare_data(X_count, covariates) dat_plnpca$Offset <- Offset }else{ dat_plnpca <- prepare_data(X_count, covariates, offset = Offset) } d <- ncol(covariates) # offset(log(Offset))+ formu <- paste0("Abundance ~ 1 + offset(log(Offset))+",paste(paste0("V",1:d), collapse = '+')) myPCA <- PLNPCA(as.formula(formu), data = dat_plnpca, ranks = q) myPCA1 <- getBestModel(myPCA) myPCA1$scores res_plnpca <- list(PCs= myPCA1$scores, bbeta= myPCA1$model_par$B, loadings=myPCA1$model_par$C) return(res_plnpca) } tic <- proc.time() fit_plnpca <- PLNPCA_run(X_count, covariates = Z[,-1], q= q) toc <- proc.time() time_plnpca <- toc[3] - tic[3] message(time_plnpca, " seconds") metricList$PLNPCA$Tr_H <- measurefun(fit_plnpca$PCs, H0) metricList$PLNPCA$Tr_B <- measurefun(fit_plnpca$loadings, B0) metricList$PLNPCA$err_bb1 <- norm_vec(fit_plnpca$bbeta[,1]- bbeta0[,1]) metricList$PLNPCA$err_bb <- norm_vec(as.vector(fit_plnpca$bbeta) - as.vector(bbeta0)) metricList$PLNPCA$Time <- time_plnpca ``` (6) Sixthly, we implement the generalized linear latent variable models (GLLVM, Hui et al. 2017). ```{r eval =FALSE} ## GLLVM runs very slowly, so we do not run it here. library(gllvm) colnames(Z) <- c(paste0("V",1: ncol(Z))) tic <- proc.time() fit <- gllvm(y=X_count, X=Z, family=poisson(), num.lv= q, control = list(trace=T)) toc <- proc.time() time_gllvm <- toc[3] - tic[3] metricList$GLLVM <- list() metricList$GLLVM$Tr_H <- measurefun(fit$lvs, H0) metricList$GLLVM$Tr_B <- measurefun(fit$params$theta, B0) metricList$GLLVM$err_bb1 <- norm_vec(fit$params$beta0- bbeta0[,1]) metricList$GLLVM$err_bb <- norm_vec(as.vector(cbind(fit$params$beta0,fit$params$Xcoef)) - as.vector(bbeta0)) metricList$GLLVM$Time <- time_gllvm } ``` (7) Seventhly, Poisson regression model for each variable was implemented. ```{r eval = FALSE} PoisReg <- function(X_count, covariates){ library(stats) hbbeta <- apply(X_count, 2, function(x){ glm1 <- glm(x~covariates+0, family = "poisson") coef(glm1) } ) return(t(hbbeta)) } tic <- proc.time() hbbeta_poisreg <- PoisReg(X_count, Z) toc <- proc.time() time_poisreg <- toc[3] - tic[3] metricList$GLM <- list() metricList$GLM$Tr_H <- NA metricList$GLM$Tr_B <- NA metricList$GLM$err_bb1 <- norm_vec(hbbeta_poisreg[,1]- bbeta0[,1]) metricList$GLM$err_bb <- norm_vec(as.vector(hbbeta_poisreg) - as.vector(bbeta0)) metricList$GLM$Time <- time_poisreg ``` (8) Eightly, we implemented the first version of multi-response reduced-rank Poisson regression model (MMMR, Luo et al. 2018) implemented in rrpack R package (MRRR-Z), that did not consider the latent factor structure but only the covariates. ```{r eval = FALSE} mrrr_run <- function(Y, X, rank0, q=NULL, family=list(poisson()), familygroup=rep(1,ncol(Y))){ require(rrpack) n <- nrow(Y); p <- ncol(Y) if(!is.null(q)){ rank0 <- rank0+q X <- cbind(X, diag(n)) } svdX0d1 <- svd(X)$d[1] init1 = list(kappaC0 = svdX0d1 * 5) ## this setting follows the example that authors provided. fit.mrrr <- mrrr(Y=Y, X=X[,-1], family = family, familygroup = familygroup, penstr = list(penaltySVD = "rankCon", lambdaSVD = 0.1), init = init1, maxrank = rank0) hbbeta_mrrr <-t(fit.mrrr$coef[1:ncol(Z), ]) if(!is.null(q)){ Theta_hb <- (fit.mrrr$coef[(ncol(Z)+1): (nrow(Z)+ncol(Z)), ]) svdTheta <- svd(Theta_hb, nu=q, nv=q) return(list(hbbeta=hbbeta_mrrr, factor=svdTheta$u, loading=svdTheta$v)) }else{ return(list(hbbeta=hbbeta_mrrr)) } } tic <- proc.time() res_mrrrz <- mrrr_run(X_count, Z, rank0) toc <- proc.time() time_mrrrz <- toc[3] - tic[3] metricList$MRRR_Z <- list() metricList$MRRR_Z$Tr_H <- NA metricList$MRRR_Z$Tr_B <-NA metricList$MRRR_Z$err_bb1 <- norm_vec(res_mrrrz$hbbeta[,1]- bbeta0[,1]) metricList$MRRR_Z$err_bb <- norm_vec(as.vector(res_mrrrz$hbbeta) - as.vector(bbeta0)) metricList$MRRR_Z$Time <- time_mrrrz ``` (9) Lastly, we implemented the second version of MRRR (MRRR-F) that considered both covariates and the latent factor structure. ```{r eval = FALSE} tic <- proc.time() res_mrrrf <- mrrr_run(X_count, Z, rank0, q=q) toc <- proc.time() time_mrrrf <- toc[3] - tic[3] metricList$MRRR_F <- list() metricList$MRRR_F$Tr_H <- measurefun(res_mrrrf$factor, H0) metricList$MRRR_F$Tr_B <- measurefun(res_mrrrf$loading, B0) metricList$MRRR_F$err_bb1 <- norm_vec(res_mrrrf$hbbeta[,1]- bbeta0[,1]) metricList$MRRR_F$err_bb <- norm_vec(as.vector(res_mrrrf$hbbeta) - as.vector(bbeta0)) metricList$MRRR_F$Time <- time_mrrrf ``` ## Visualize the comparison of performance Next, we summarized the metrics for COAP and other compared methods in a dataframe object. ```{r eval = FALSE} list2vec <- function(xlist){ nn <- length(xlist) me <- rep(NA, nn) idx_noNA <- which(sapply(xlist, function(x) !is.null(x))) for(r in idx_noNA) me[r] <- xlist[[r]] return(me) } dat_metric <- data.frame(Tr_H = sapply(metricList, function(x) x$Tr_H), Tr_B = sapply(metricList, function(x) x$Tr_B), err_bb1 =sapply(metricList, function(x) x$err_bb1), err_bb = list2vec(lapply(metricList, function(x) x[['err_bb']])), Method = names(metricList)) dat_metric ``` Plot the results for COAP and other methods, which suggests that COAP achieves better estimation accuracy for the quantiites of interest. ```{r eval = FALSE, fig.width=9, fig.height=6} library(cowplot) p1 <- ggplot(data=subset(dat_metric, !is.na(Tr_B)), aes(x= Method, y=Tr_B, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL) + theme_bw(base_size = 16) p2 <- ggplot(data=subset(dat_metric, !is.na(Tr_H)), aes(x= Method, y=Tr_H, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) p3 <- ggplot(data=subset(dat_metric, !is.na(err_bb1)), aes(x= Method, y=err_bb1, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) p4 <- ggplot(data=subset(dat_metric, !is.na(err_bb)), aes(x= Method, y=err_bb, fill=Method)) + geom_bar(stat="identity") + xlab(NULL) + scale_x_discrete(breaks=NULL)+ theme_bw(base_size = 16) plot_grid(p1,p2,p3, p4, nrow=2, ncol=2) ``` ## Select the parameters We applied the singular value ratio based method to select the number of factors and the rank of coefficient matrix. The results showed that the SVR method has the potential to identify the true values. ```{r eval = FALSE} datList <- gendata_simu(seed = 1, n=n, p=p, d= d, rank0 = rank0, q= q, rho=c(3, 6), sigma2_eps = 1) X_count <- datList$X; Z <- datList$Z res1 <- selectParams(X_count=datList$X, Z=datList$Z, verbose=F) print(c(q_true=q, q_est=res1['hq'])) print(c(r_true=rank0, r_est=res1['hr'])) ``` <details> <summary>**Session Info**</summary> ```{r} sessionInfo() ``` </details>
/scratch/gouwar.j/cran-all/cranData/COAP/vignettes/COAPsimu.Rmd
### COCONUT ### Sweeney, TE ### 2016 ## send only control to combat to get gammastar and deltastar ## correct diseased and match back COCONUT <- function (GSEs, control.0.col, disease.col=NULL, byPlatform=FALSE, platformCol, par.prior=TRUE, itConv=1e-04, parallel=FALSE, mc.cores=1){ ## subset to common genes common <- Reduce(intersect, lapply(GSEs, function(X) rownames(X$genes))) common <- common[!(common %in% c(NA, ""))] ## list of only controls (everything else removed) GSEs.control <- lapply(GSEs, function(x) { x$pheno <- x$pheno[(x$pheno[ , control.0.col] %in% 0), ] x$genes <- x$genes[, rownames(x$pheno)] x$class <- rep(0, ncol(x$genes)) x }) ## check for presence of appropriate controls if(byPlatform){ ## make sure that there are no platforms present without controls ## note takes the FIRST item in the column checkPlatforms <- function(GSElist) { platforms <- lapply(GSElist, function(x) { x$pheno[, grep(platformCol, colnames(x$pheno))][1] }) sort(unique(unlist(platforms))) } if(!identical(checkPlatforms(GSEs), checkPlatforms(GSEs.control))) { stop("byPlatform = T but not all platforms have associated controls") } } else { check <- unlist(lapply(GSEs.control, function(x) length(x$class)>1 )) if(!all(check)){ stop(paste("Datasets with <1 control:", names(GSEs.control[!check]))) } } ## get ComBat parameters from controls ComBatcontrol <- .CombatCustom(GSEs.control, common, params = "get", byPlatform = byPlatform, platformCol = platformCol, par.prior=par.prior, itConv=itConv, parallel=parallel, mc.cores=mc.cores) ## list of only disease samples (everything else removed) ## can make it a subset by specifying 'disease.col' if(is.null(disease.col)){ GSEs.disease <- lapply(GSEs, function(x) { x$pheno <- x$pheno[!(x$pheno[ , control.0.col] %in% 0), ] x$genes <- x$genes[, rownames(x$pheno)] x$class <- x$pheno[ , control.0.col] x }) } else { GSEs.disease <- lapply(GSEs, function(x) { x$pheno <- x$pheno[!is.na(x$pheno[ , disease.col]), ] x$genes <- x$genes[, rownames(x$pheno)] x$class <- x$pheno[ , disease.col] x }) } ## apply ComBat parameters from controls to disease samples GSEs.disease.ComBat <- .CombatCustom(GSEs.disease, common, params = "have", byPlatform = byPlatform, platformCol = platformCol, bayesParams = ComBatcontrol$bayesParams, getPlatforms = ComBatcontrol$getPlatforms) return(list(COCONUTList = GSEs.disease.ComBat, rawDiseaseList = GSEs.disease, controlList = ComBatcontrol)) } ## merge COCONUT-normalized data from multiple sources into single object ## Note, the 'pheno' file will only output columns with common names combineCOCOoutput <- function (COCONUT.out) { COCONUTList <- lapply(1:length(COCONUT.out$COCONUTList), function(i){ GEM.cntl <- COCONUT.out$controlList$GSEs[[i]] GEM.dis <- COCONUT.out$COCONUTList[[i]] list(genes=cbind(GEM.cntl $genes, GEM.dis$genes), pheno=data.frame(rbind(GEM.cntl $pheno, GEM.dis$pheno)), class=c(rep(0, ncol(GEM.cntl $genes)), rep(1, ncol(GEM.dis$genes)))) }) genesMat <- Reduce(cbind, lapply(COCONUTList, function(gse) gse$genes)) common <- Reduce(intersect, lapply(COCONUTList, function(gse) colnames(gse$pheno))) phenoMat <- Reduce(rbind, lapply(COCONUTList, function(gse) gse$pheno[, common])) class <- Reduce(c, lapply(COCONUTList, function(gse) gse$class)) list(genes=genesMat, pheno=phenoMat, class.cntl0.dis1=class) } .CombatCustom <- function(GSE.list.genes, common, params, byPlatform, platformCol, bayesParams, getPlatforms=NULL, par.prior=TRUE, itConv=1e-04, parallel=FALSE, mc.cores=1) { ## must be one or the other stopifnot(params %in% c("get", "have")) ## make single matrix from all data on common genes commonGenes <- Reduce(cbind, lapply(GSE.list.genes, function(X) X$genes[common, ])) ## can either normalize by platform or by dataset if(byPlatform){ ## split by platform cat("\nTreating platforms as batches...\nPlatforms found: ") platforms <- factor(unlist(lapply(GSE.list.genes, function(X){ ## note takes the FIRST match for grep platform <- grep(platformCol, colnames(X$pheno), ignore.case=T)[1] GSEplatform <- as.character(X$pheno[ , platform]) ## list chips found cat(GSEplatform[1], ", ") ## (!) could get confounded if multiple platforms in same col GSEplatform }))) ## will assure correct application of batches index <- order(platforms) platforms <- platforms[index] commonGenes <- commonGenes[index] ## quantile normalize datasets from the same platform prior to ComBat requireNamespace("limma") cat("\nCo-quantile-normalizing datasets from the same platform ") invisible(lapply(levels(platforms), function(platform){ index <- colnames(commonGenes)[platforms==platform] samePlatformData <- commonGenes[, index] samePlatformData <- limma::normalizeQuantiles(samePlatformData) commonGenes[, index] <<- samePlatformData cat(" .") })) } else { cat("\nTreating datasets as batches: ") platforms <- unlist(lapply(1:length(GSE.list.genes), function(i) { rep(i, ncol(GSE.list.genes[[i]]$genes)) })) } ## either combat normalize on controls ("get") ## or use parameters derived from controls ("have") if(params == "get"){ ComBatWithParams <- .ComBatGetParamsNoCov(commonGenes, batch=platforms, par.prior=par.prior, itConv=itConv, parallel=parallel, mc.cores=mc.cores) ## split common matrix (bayesdata) back out into list ComBatWithParams$GSEs <- lapply(GSE.list.genes, function(GSE) { GSE$genes <- data.frame(ComBatWithParams$bayesdata[, colnames(GSE$genes)]) return(GSE) }) ComBatWithParams$bayesdata <- NULL ## store platforms to ensure same order in application ("have") ## NOTE: 'platforms' are just datasets if byPlatform=F ComBatWithParams$getPlatforms <- unique(platforms) return(ComBatWithParams) } else if (params == "have"){ ## check order of platforms if(!(identical(unique(platforms), getPlatforms))) { print(unique(platforms)) print(getPlatforms) stop("Batches not in identical order between controls and cases.") } else { cat("\nBatches identical between have-params and get-params...\n") } ## use parameters derived from prior ## commonGenesCombats is analogous to 'bayesdata' above commonGenesCombat <- .ComBatApplyParamsNoCov(commonGenes, batch = platforms, bayesParams = bayesParams) ## split common matrix back out into list GSE.list.genes <- lapply(GSE.list.genes, function(GSE) { GSE$genes <- data.frame(commonGenesCombat[, colnames(GSE$genes)]) return(GSE) }) return(GSE.list.genes) } } .ComBatGetParamsNoCov <- function (dat, batch, par.prior = TRUE, itConv=1e-04, parallel=FALSE, mc.cores=1) { ## slightly modified from sva::ComBat ## no covariates allowed batch <- as.factor(batch) design <- stats::model.matrix(~-1 + batch) cat("Found", nlevels(batch), "batches\n") n.batch <- nlevels(batch) batches <- lapply(1:n.batch, function(i) which(batch == levels(batch)[i])) n.batches <- sapply(batches, length) n.array <- sum(n.batches) NAs = any(is.na(dat)) stopifnot(!NAs) cat("Standardizing Data across genes\n") B.hat <- solve(t(design) %*% design) %*% t(design) %*% t(as.matrix(dat)) grand.mean <- t(n.batches/n.array) %*% B.hat[1:n.batch,] dat <- as.matrix(dat) var.pooled <- ((dat - t(design %*% B.hat))^2) %*% rep(1/n.array, n.array) stand.mean <- t(grand.mean) %*% t(rep(1, n.array)) if (!is.null(design)) { tmp <- design tmp[, c(1:n.batch)] <- 0 stand.mean <- stand.mean + t(tmp %*% B.hat) } s.data <- (dat - stand.mean)/(sqrt(var.pooled) %*% t(rep(1, n.array))) cat("Fitting L/S model and finding priors\n") batch.design <- design[, 1:n.batch] gamma.hat <- solve(t(batch.design) %*% batch.design) %*% t(batch.design) %*% t(as.matrix(s.data)) delta.hat <- NULL for (i in batches) { delta.hat <- rbind(delta.hat, apply(s.data[, i], 1, stats::var, na.rm = T)) } ## switch to rowMeans gamma.bar <- rowMeans(gamma.hat) t2 <- apply(gamma.hat, 1, stats::var) ### same as sva:::aprior a.prior <- apply(delta.hat, 1, function(d.hat) { m = mean(d.hat) s2 = stats::var(d.hat) (2 * s2 + m^2)/s2 }) ### same as sva:::bprior b.prior <- apply(delta.hat, 1, function(d.hat){ m = mean(d.hat) s2 = stats::var(d.hat) (m * s2 + m^3)/s2 }) ############# added parallel functionality here ################## if(parallel) { cat("parallelized, running on ", mc.cores, "cores\n") if (par.prior) { cat("Finding parametric adjustments\n") gd.star <- parallel::mclapply(1:n.batch, mc.cores=mc.cores, function(i) { temp <- .sva.it.sol(s.data[, batches[[i]]], gamma.hat[i,], delta.hat[i, ], gamma.bar[i], t2[i], a.prior[i], b.prior[i], conv=itConv) list(g.star = temp[1, ], d.star = temp[2, ]) }) gamma.star <- Reduce(rbind, lapply(gd.star, function(i) i$g.star)) delta.star <- Reduce(rbind, lapply(gd.star, function(i) i$d.star)) } else { cat("Finding nonparametric adjustments\n") gd.star <- parallel::mclapply(1:n.batch, mc.cores=mc.cores, mc.allow.recursive=FALSE, function(i) { temp <- .sva.int.eprior(as.matrix(s.data[, batches[[i]]]), gamma.hat[i, ], delta.hat[i, ], parallel=parallel, mc.cores=mc.cores) cat("batch ", i, "done...\n") list(g.star = temp[1, ], d.star = temp[2, ]) }) gamma.star <- Reduce(rbind, lapply(gd.star, function(i) i$g.star)) delta.star <- Reduce(rbind, lapply(gd.star, function(i) i$d.star)) } } else { ###### original non-parallel; converted to lapply from for loops ###### if (par.prior) { cat("Finding parametric adjustments\n") gd.star <- lapply(1:n.batch, function(i) { temp <- .sva.it.sol(s.data[, batches[[i]]], gamma.hat[i,], delta.hat[i, ], gamma.bar[i], t2[i], a.prior[i], b.prior[i], conv=itConv) list(g.star = temp[1, ], d.star = temp[2, ]) }) gamma.star <- Reduce(rbind, lapply(gd.star, function(i) i$g.star)) delta.star <- Reduce(rbind, lapply(gd.star, function(i) i$d.star)) } else { cat("Finding nonparametric adjustments for each batch\n") gd.star <- lapply(1:n.batch, function(i) { temp <- .sva.int.eprior(as.matrix(s.data[, batches[[i]]]), gamma.hat[i, ], delta.hat[i, ]) cat("batch ", i, "done...\n") list(g.star = temp[1, ], d.star = temp[2, ]) }) gamma.star <- Reduce(rbind, lapply(gd.star, function(i) i$g.star)) delta.star <- Reduce(rbind, lapply(gd.star, function(i) i$d.star)) } } cat("Adjusting the Data\n") bayesdata <- s.data j <- 1 for (i in batches) { bayesdata[, i] <- (bayesdata[, i] - t(batch.design[i,] %*% gamma.star)) / (sqrt(delta.star[j, ]) %*% t(rep(1, n.batches[j]))) j <- j + 1 } bayesdata <- (bayesdata * (sqrt(var.pooled) %*% t(rep(1, n.array)))) + stand.mean bayesParams <- list(B.hat=B.hat, stand.mean=stand.mean, var.pooled=var.pooled, gamma.star=gamma.star, delta.star=delta.star) return(list(bayesdata=bayesdata, bayesParams=bayesParams)) } .ComBatApplyParamsNoCov <- function (dat, batch, bayesParams) { ## slightly modified from sva::ComBat ## no covariates allowed batch <- as.factor(batch) design <- stats::model.matrix(~-1 + batch) cat("Found", nlevels(batch), "batches\n") n.batch <- nlevels(batch) batches <- lapply(1:n.batch, function(i) which(batch == levels(batch)[i])) n.batches <- sapply(batches, length) n.array <- sum(n.batches) NAs = any(is.na(dat)) stopifnot(!NAs) ## use params from control patients gamma.star <- bayesParams$gamma.star delta.star <- bayesParams$delta.star ## B.hat <- bayesParams$B.hat stand.mean <- bayesParams$stand.mean var.pooled <- bayesParams$var.pooled cat("Standardizing Data across genes using prior parameters\n") # B.hat <- solve(t(design) %*% design) %*% t(design) %*% t(as.matrix(dat)) # grand.mean <- t(n.batches/n.array) %*% B.hat[1:n.batch,] # # var.pooled <- ((dat - t(design %*% B.hat))^2) %*% rep(1/n.array, n.array) # # stand.mean <- t(grand.mean) %*% t(rep(1, n.array)) # # if (!is.null(design)) { # tmp <- design # tmp[, c(1:n.batch)] <- 0 # stand.mean <- stand.mean + t(tmp %*% B.hat) # } ## stand.mean cols are identical; ## if supplied from 'bayesParams' add enough to match dat # stand.mean <- Reduce(cbind, lapply(1:ncol(dat), function(x) stand.mean[,1])) geneNames <- rownames(stand.mean) stand.mean <- matrix(data=stand.mean[,1], nrow=nrow(stand.mean), ncol=ncol(dat)) rownames(stand.mean) <- geneNames ## s.data is Z\sub_ijg s.data <- (dat - stand.mean)/(sqrt(var.pooled) %*% t(rep(1, n.array))) batch.design <- design[, 1:n.batch] cat("Adjusting the Data Based on input gamma.star and delta.star\n") bayesdata <- s.data j <- 1 for (i in batches) { bayesdata[, i] <- (bayesdata[, i] - t(batch.design[i,] %*% gamma.star)) / (sqrt(delta.star[j, ]) %*% t(rep(1, n.batches[j]))) j <- j + 1 } bayesdata <- (bayesdata * (sqrt(var.pooled) %*% t(rep(1, n.array)))) + stand.mean return(bayesdata) } .sva.it.sol <- function (sdat, g.hat, d.hat, g.bar, t2, a, b, conv = 1e-04) { ## taken from sva:::it.sol ## switch to rowSums n <- rowSums(!is.na(sdat)) g.old <- g.hat d.old <- d.hat change <- 1 count <- 0 .sva.postmean <- function (g.hat, g.bar, n, d.star, t2) { (t2 * n * g.hat + d.star * g.bar)/(t2 * n + d.star) } .sva.postvar <- function (sum2, n, a, b) { (0.5 * sum2 + b)/(n/2 + a - 1) } while (change > conv) { g.new <- .sva.postmean(g.hat, g.bar, n, d.old, t2) sum2 <- rowSums((sdat - g.new %*% t(rep(1, ncol(sdat))))^2, na.rm = T) d.new <- .sva.postvar(sum2, n, a, b) change <- max(abs(g.new - g.old)/g.old, abs(d.new - d.old)/d.old) g.old <- g.new d.old <- d.new count <- count + 1 } adjust <- rbind(g.new, d.new) rownames(adjust) <- c("g.star", "d.star") adjust } .sva.int.eprior <- function (sdat, g.hat, d.hat, parallel=FALSE, mc.cores=1) { ## modified from sva:::int.eprior ## added parallel g.star <- d.star <- NULL r <- nrow(sdat) if(parallel) { tmp <- parallel::mclapply(1:r, mc.cores=mc.cores, function(i) { g <- g.hat[-i] d <- d.hat[-i] x <- sdat[i, !is.na(sdat[i, ])] n <- length(x) j <- numeric(n) + 1 dat <- matrix(as.numeric(x), length(g), n, byrow = T) resid2 <- (dat - g)^2 sum2 <- resid2 %*% j LH <- 1/(2 * pi * d)^(n/2) * exp(-sum2/(2 * d)) LH[LH == "NaN"] = 0 c(g.star = sum(g * LH)/sum(LH), d.star = sum(d * LH)/sum(LH) ) }) } else { tmp <- lapply(1:r, function(i) { g <- g.hat[-i] d <- d.hat[-i] x <- sdat[i, !is.na(sdat[i, ])] n <- length(x) j <- numeric(n) + 1 dat <- matrix(as.numeric(x), length(g), n, byrow = T) resid2 <- (dat - g)^2 sum2 <- resid2 %*% j LH <- 1/(2 * pi * d)^(n/2) * exp(-sum2/(2 * d)) LH[LH == "NaN"] = 0 c(g.star = sum(g * LH)/sum(LH), d.star = sum(d * LH)/sum(LH) ) }) } adjust <- data.matrix(data.frame(tmp)) colnames(tmp) <- NULL adjust }
/scratch/gouwar.j/cran-all/cranData/COCONUT/R/COCONUT.R
#' Convert a data frame to ranks #' #' Replaces all numerical columns of a data frame with their ranks. Uses sport ranking, i.e. ties #' share the highest rank place. Ignores non-numerical columns. See [rank()]. Optionally, returns in-group ranks #' using a specified grouping column. #' #' This function replaces the now-defunct `rankDF()` from COINr < v1.0. #' #' @param df A data frame #' @param use_group An optional column of df (specified as a string) to use as a grouping variable. If specified, returns ranks #' inside each group present in this column. #' #' @examples #' # some random data, with a column of characters #' df <- data.frame(RName = c("A", "B", "C"), #' Score1 = runif(3), Score2 = runif(3)) #' # convert to ranks #' rank_df(df) #' # grouped ranking - use some example data #' df1 <- ASEM_iData[c("uCode", "GDP_group", "Goods", "LPI")] #' rank_df(df1, use_group = "GDP_group") #' #' @return A data frame equal to the data frame that was input, but with any numerical columns replaced with ranks. #' #' @export rank_df <- function(df, use_group = NULL){ # store copy of original dfo <- df if(is.null(use_group)){ df <- data.frame( lapply(df, function(y) if(is.numeric(y)) rank(-1*y, na.last = "keep", ties.method = "min") else y) ) } else { stopifnot(use_group %in% colnames(df)) # get groups grps <- unique(unlist(df[[use_group]])) # I have to work over groups. To me the clearest way of doing this is with a for loop (sorry) dfold <- df for(grp in grps){ # get current group rows grprows <- df[[use_group]] == grp # exclude any NAs grprows[is.na(grprows)] <- FALSE # now work over all columns, but just for the current group rows df[grprows,] <- data.frame( lapply(dfold[grprows,], function(y) if(is.numeric(y)) rank(-1*y, na.last = "keep", ties.method = "min") else y) ) } # now I have to fill in rows that have NA group values, with NAs if(any(is.na(df[[use_group]]))){ df[is.na(df[[use_group]]),] <- data.frame(lapply(df[is.na(df[[use_group]]),], function(y) if(is.numeric(y)) NA else y) ) } } rownames(df) <- NULL # reset col names because sometimes gets altered... names(df) <- names(dfo) df } #' Compare two data frames #' #' A custom function for comparing two data frames of indicator data, to see whether they match up, at a specified number of #' significant figures. Specifically, this is intended to compare two data frames, without regard to row or column ordering. #' Rows are matched by the required `matchcol` argument. Hence, it is different from e.g. [all.equal()] which requires rows #' to be ordered. In COINr, typically `matchcol` is the `uCode` column, for example. #' #' This function compares numerical and non-numerical columns to see if they match. Rows and columns can be in any order. The function #' performs the following checks: #' #' * Checks that the two data frames are the same size #' * Checks that column names are the same, and that the matching column has the same entries #' * Checks column by column that the elements are the same, after sorting according to the matching column #' #' It then summarises for each column whether there are any differences, and also what the differences are, if any. #' #' This is intended to cross-check results. For example, if you run something in COINr and want to check indicator results against #' external calculations. #' #' This function replaces the now-defunct `compareDF()` from COINr < v1.0. #' #' @param df1 A data frame #' @param df2 Another data frame #' @param matchcol A common column name that is used to match row order. E.g. this might be `uCode`. #' @param sigfigs The number of significant figures to use for matching numerical columns #' #' @examples #' # take a sample of indicator data (including the uCode column) #' data1 <- ASEM_iData[c(2,12:15)] #' # copy the data #' data2 <- data1 #' # make a change: replace one value in data2 by NA #' data2[1,2] <- NA #' # compare data frames #' compare_df(data1, data2, matchcol = "uCode") #' #' @return A list with comparison results. List contains: #' * `.$Same`: overall summary: if `TRUE` the data frames are the same according to the rules specified, otherwise `FALSE`. #' * `.$Details`: details of each column as a data frame. Each row summarises a column of the data frame, saying whether #' the column is the same as its equivalent, and the number of differences, if any. In case the two data frames have differing #' numbers of columns and rows, or have differing column names or entries in `matchcol`, `.$Details` will simply contain a #' message to this effect. #' * `.$Differences`: a list with one entry for every column which contains different entries. Differences are summarised as #' a data frame with one row for each difference, reporting the value from `df1` and its equivalent from `df2`. #' #' @export compare_df <- function(df1, df2, matchcol, sigfigs = 5){ # general checks stopifnot(is.data.frame(df1), is.data.frame(df2), matchcol %in% colnames(df1), matchcol %in% colnames(df2)) # check for duplicates in matchcol if( (anyDuplicated(df1[[matchcol]]) > 0) | (anyDuplicated(df2[[matchcol]]) > 0) ){ stop("Duplicates found in matchcol. This function requires unique entries in matchcol to make a comparison.") } # this is default but will change if anything is found to be different sameanswer <- TRUE # check sizes if(nrow(df1)!=nrow(df2)){ sameanswer <- FALSE details <- "Different number of rows." } else if(ncol(df1)!=ncol(df2)){ sameanswer <- FALSE details <- "Different number of columns." } else if(!setequal(colnames(df1), colnames(df2))){ sameanswer <- FALSE details <- "Column names not the same." } else if(!setequal(df1[[matchcol]], df2[[matchcol]])){ sameanswer <- FALSE details <- "Elements in matchcol are not the same." } if(!sameanswer){ # exiting because dfs have different sizes or column/row names return(list(Same = sameanswer, Details = details)) } else { # From this point we should be fairly sure that the two dfs are the same size and contain the same cols and rows # match col order df2 <- df2[colnames(df1)] # match row order df2 <- df2[match(df1[[matchcol]], df2[[matchcol]]),] # Now the dfs should be also in the same order of rows and cols. Remains to check the values. details <- data.frame(Column = colnames(df1), TheSame = NA, Comment = NA, NDifferent = NA) diffs <- vector(mode = "list", length = 0) # now loop over columns for(ii in 1:length(colnames(df1))){ # get cols x <- df1[[ii]] y <- df2[[ii]] # class check if(class(x)!=class(y)){ details$TheSame[[ii]] <- FALSE details$Comment[[ii]] <- "Class difference" next } # now check depending on type if(is.numeric(x)){ if(identical(signif(x, sigfigs), signif(y, sigfigs))){ details$TheSame[[ii]] <- TRUE details$Comment[[ii]] <- paste0("Numerical and identical to ", sigfigs, " sf.") details$NDifferent[[ii]] <- 0 } else { details$TheSame[[ii]] <- FALSE details$Comment[[ii]] <- paste0("Numerical and different at ", sigfigs, " sf.") dfdiffs <- data.frame(MatchCol = df1[[matchcol]], df1 = x, df2 = y) colnames(dfdiffs)[1] <- matchcol diffrows <- signif(x, sigfigs) != signif(y, sigfigs) diffrows[is.na(diffrows)] <- TRUE dfdiffs <- dfdiffs[diffrows, ] diffs[[colnames(df1)[ii]]] <- dfdiffs details$NDifferent[[ii]] <- nrow(dfdiffs) } } else { if(identical(x, y)){ details$TheSame[[ii]] <- TRUE details$Comment[[ii]] <- paste0("Non-numerical and identical") details$NDifferent[[ii]] <- 0 } else { details$TheSame[[ii]] <- FALSE details$Comment[[ii]] <- paste0("Non-numerical and different") dfdiffs <- data.frame(MatchCol = df1[[matchcol]], df1 = x, df2 = y) colnames(dfdiffs)[1] <- matchcol dfdiffs <- dfdiffs[x != y, ] diffs[[colnames(df1)[ii]]] <- dfdiffs details$NDifferent[[ii]] <- nrow(dfdiffs) } } } list(Same = all(details$TheSame), Details = details, Differences = diffs) } } #' Replace multiple values in a data frame #' #' Given a data frame (or vector), this function replaces values according to a look up table or dictionary. In COINr this may #' be useful for exchanging categorical data with numeric scores, prior to assembly. Or for changing codes. #' #' The lookup data frame must not have any duplicated values in the `old` column. This function looks for exact matches of #' elements of the `old` column and replaces them with the corresponding value in the `new` column. For each row of `lookup`, #' the class of the old value must match the class of the new value. This is to keep classes of data frames columns consistent. #' If you wish to replace with a different class, you should convert classes in your data frame before using this function. #' #' This function replaces the now-defunct `replaceDF()` from COINr < v1.0. #' #' @param df A data frame or a vector #' @param lookup A data frame with columns `old` (the values to be replaced) and `new` the values to replace with. See details. #' #' @examples #' # replace sub-pillar codes in ASEM indicator metadata #' codeswap <- data.frame(old = c("Conn", "Sust"), new = c("SI1", "SI2")) #' # swap codes in both iMeta #' replace_df(ASEM_iMeta, codeswap) #' #' @return A data frame with replaced values #' #' @export replace_df <- function(df, lookup){ # if a vector is input, convert to data frame vecflag <- FALSE if(is.vector(df)){ vecflag <- TRUE df <- data.frame(v1 = df) } # checks stopifnot(is.data.frame(df), is.data.frame(lookup), !(is.null(lookup$old)), !(is.null(lookup$new)), anyDuplicated(lookup$old) == 0) # replace each item one at a time for(ii in 1:nrow(lookup)){ # check that the class of the old/new pair is the same if(class(lookup$old[ii]) != class(lookup$new[ii]) ){ stop(paste0("Class difference detected in row ", ii, " of lookup. Old class is ", class(lookup$old[ii]), " but new class is ", class(lookup$new[ii]), ".")) } # replace value df[df == lookup$old[ii]] <- lookup$new[ii] } # if it was a vector, convert back if(vecflag){ df <- unlist(df, use.names = FALSE) } df } #' Round down a data frame #' #' Tiny function just to round down a data frame for display in a table, ignoring non-numeric columns. #' #' This function replaces the now-defunct `roundDF()` from COINr < v1.0. #' #' @param df A data frame to input #' @param decimals The number of decimal places to round to (default 2) #' #' @examples #' round_df( as.data.frame(matrix(runif(20),10,2)), decimals = 3) #' #' @return A data frame, with any numeric columns rounded to the specified amount. #' #' @export round_df <- function(df, decimals = 2){ df <- data.frame( lapply(df, function(y) if(is.numeric(y)) round(y, decimals) else y) ) rownames(df) <- NULL df } #' Round a data frame to specified significant figures #' #' Tiny function just to round down a data frame by significant figures for display in a table, ignoring non-numeric columns. #' #' @param df A data frame to input #' @param digits The number of decimal places to round to (default 3) #' #' @examples #' signif_df( as.data.frame(matrix(runif(20),10,2)), digits = 3) #' #' #' @return A data frame, with any numeric columns rounded to the specified amount. #' #' @export signif_df <- function(df, digits = 3){ df <- data.frame( lapply(df, function(y) if(is.numeric(y)) signif(y, digits) else y) ) rownames(df) <- NULL df }
/scratch/gouwar.j/cran-all/cranData/COINr/R/DFtools.R
#' Perform PCA on a coin #' #' Performs Principle Component Analysis (PCA) on a specified data set and subset of indicators or aggregation groups. #' This function has two main outputs: the output(s) of [stats::prcomp()], and optionally the weights resulting from #' the PCA. Therefore it can be used as an analysis tool and/or a weighting tool. For the weighting aspect, please #' see the details below. #' #' PCA must be approached with care and an understanding of what is going on. First, let's consider the PCA excluding #' the weighting component. PCA takes a set of data consisting of variables (indicators) and observations. It then #' rotates the coordinate system such that in the new coordinate system, the first axis (called the first principal #' component (PC)) aligns with the direction of maximum variance of the data set. The amount of variance explained by the #' first PC, and by the next several PCs, can help to understand whether the data can be explained by simpler set of #' variables. PCA is often used for dimensionality reduction in modelling, for example. #' #' In the context of composite indicators, PCA can be used first as an analysis tool. We can check for example, within #' an aggregation group, can the indicators mostly be explained by one PC? If so, this gives a little extra justification #' to aggregating the indicators because the information lost in aggregation will be less. We can also check this over #' the entire set of indicators. #' #' The complications are in a composite indicator, the indicators are grouped and arranged into a hierarchy. This means #' that when performing a PCA, we have to decide which level to perform it at, and which groupings to use, if any. The [get_PCA()] #' function, using the `by_groups` argument, allows to automatically apply PCA by group if this is required. #' #' The output of [get_PCA()] is a PCA object for each of the groups specified, which can then be examined using existing #' tools in R, see `vignette("analysis")`. #' #' The other output of [get_PCA()] is a set of "PCA weights" if the `weights_to` argument is specified. Here we also need #' to say some words of caution. First, what constitutes "PCA weights" in composite indicators is not very well-defined. #' In COINr, a simple option is adopted. That is, the loadings of the first principal component are taken as the weights. #' The logic here is that these loadings should maximise the explained variance - the implication being that if we use #' these as weights in an aggregation, we should maximise the explained variance and hence the information passed from #' the indicators to the aggregate value. This is a nice property in a composite indicator, where one of the aims is to #' represent many indicators by single composite. See \doi{10.1016/j.envsoft.2021.105208} for a #' discussion on this. #' #' But. The weights that result from PCA have a number of downsides. First, they can often include negative weights #' which can be hard to justify. Also PCA may arbitrarily flip the axes (since from a variance point of view the #' direction is not important). In the quest for maximum variance, PCA will also weight the strongest-correlating #' indicators the highest, which means that other indicators may be neglected. In short, it often results in a very #' unbalanced set of weights. Moreover, PCA can only be performed on one level at a time. #' #' All these considerations point to the fact: while PCA as an analysis tool is well-established, please use PCA weights #' with care and understanding of what is going on. #' #' This function replaces the now-defunct `getPCA()` from COINr < v1.0. #' #' @param coin A coin #' @param dset The name of the data set in `.$Data` to use. #' @param iCodes An optional character vector of indicator codes to subset the indicator data, passed to [get_data()] #' @param Level The aggregation level to take indicator data from. Integer from 1 (indicator level) #' to N (top aggregation level, typically the index). #' @param by_groups If `TRUE` (default), performs PCA inside each aggregation group inside the specified level. If `FALSE`, #' performs a single PCA over all indicators/aggregates in the specified level. #' @param nowarnings If `FALSE` (default), will give warnings where missing data are found. Set to `TRUE` to suppress these warnings. #' @param out2 If the input is a coin object, this controls where to send the output. If `"coin"`, it #' sends the results to the coin object, otherwise if `"list"`, outputs to a separate list (default). #' @param weights_to A string to name the resulting set of weights. If this is specified, and `out2 = "coin"`, #' will write a new set of "PCA weights" to the `.$Meta$Weights` list. This is experimental - see details. If #' `NULL`, does not write any weights (default). #' #' @importFrom stats prcomp na.omit #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # PCA on "Sust" group of indicators #' l_pca <- get_PCA(coin, dset = "Raw", iCodes = "Sust", #' out2 = "list", nowarnings = TRUE) #' #' # Summary of results for one of the sub-groups #' summary(l_pca$PCAresults$Social$PCAres) #' #' @return #' If `out2 = "coin"`, results are appended to the coin object. Specifically: #' * A list is added to `.$Analysis` containing PCA weights (loadings) of the first principle component, and the output of [stats::prcomp], for each #' aggregation group found in the targeted level. #' * If `weights_to` is specified, a new set of PCA weights is added to `.$Meta$Weights` #' If `out2 = "list"` the same outputs are contained in a list. #' #' @seealso #' * [stats::prcomp] Principle component analysis #' #' @export get_PCA <- function(coin, dset = "Raw", iCodes = NULL, Level = NULL, by_groups = TRUE, nowarnings = FALSE, weights_to = NULL, out2 = "list"){ if(is.null(Level)){ Level <- 1 } # There is a catch here because we might want to do PCA weights across one level, but that level # may have multiple groups. This means we have to call PCA separately for each group. # first we define a function which returns weights for a given set of indicator data # this function implicitly calls other variables from the environment inside getPCA() so we don't need # to explicitly pass everything to it. PCAwts <- function(icodes1){ # get ind data iData_ <- get_data(coin, dset = dset, iCodes = icodes1, Level = Level, also_get = "none") # check for missing vals nNA <- sum(is.na(iData_)) # remove any rows with missing data if (nNA > 0){ dat4PCA <- stats::na.omit(iData_) if(!nowarnings){ warning(paste0(nNA, " missing values found. Removing ", nrow(iData_)-nrow(dat4PCA), " rows with missing values in order to perform PCA. You can also try imputing data first to avoid this.")) } } else { dat4PCA <- iData_ } # perform PCA PCAres <- stats::prcomp(dat4PCA, center = TRUE, scale = TRUE) # just for writing results - if Level not specified then we are working at ind level if(is.null(Level)){Level<-1} # weight from first PC should be the max variance weights wts <- as.numeric(PCAres$rotation[,1]) list(wts = wts, PCAres = PCAres, iCodes = names(iData_)) } # We need to know the codes of the inds/aggs to get weights from iData_full <- get_data(coin, dset = dset, iCodes = iCodes, Level = Level, also_get = "none") IndCodes <- names(iData_full) if(by_groups){ # OK, first thing is to find what groups we have # Get index structure lin <- coin$Meta$Lineage # Get cols of interest: the present one plus the parents lin <- lin[c(Level, Level + 1)] # Get parents of these codes parents <- unlist(unique(lin[(lin[[1]] %in% IndCodes) ,2])) } else { parents = "All" } # Right, now we need to cycle through these groups and do PCA on each group. # List for general PCA results PCAlist <- vector(mode = "list", length = length(parents)) # copy of weights to modify wlist <- coin$Meta$Weights$Original for (ii in 1: length(parents)){ if(by_groups){ # get PCA results for group outPCA <- PCAwts(parents[ii]) } else { # get PCA results for group outPCA <- PCAwts(NULL) } # attach weights to list # wts should be in the same order as out$iCodes. We have to make sure they match exactly here as # sometimes things get reordered. This is done with match() rather than %in% for this reason. wlist$Weight[match(outPCA$iCodes, wlist$iCode)] <- outPCA$wts # add general results to list PCAlist[[ii]] <- outPCA } # rename list names(PCAlist) <- parents # write results if(out2 == "coin"){ if(!is.null(weights_to)){ #w_name <- paste0("PCA_",dset,"L",Level) # write weights coin$Meta$Weights[[weights_to]] <- wlist message("Weights written to .$Meta$Weights$", weights_to) } # write other info coin$Analysis[[dset]][[paste0("$PCA$L",Level)]] <- PCAlist coin } else { list("Weights" = wlist, "PCAresults" = PCAlist) } }
/scratch/gouwar.j/cran-all/cranData/COINr/R/PCA.R
#' Aggregate indicators #' #' Aggregates indicators following the structure specified in `iMeta`, for each coin inside the purse. #' See [Aggregate.coin()], which is applied to each coin, for more information #' #' @param x A purse-class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param f_ag The name of an aggregation function, a string. This can either be a single string naming #' a function to use for all aggregation levels, or else a character vector of function names of length `n-1`, where `n` is #' the number of levels in the index structure. In this latter case, a different aggregation function may be used for each level #' in the index: the first in the vector will be used to aggregate from Level 1 to Level 2, the second from Level 2 to Level 3, and #' so on. #' @param w An optional data frame of weights. If `f_ag` does not require or accept weights, set to `"none"`. #' @param f_ag_para Optional parameters to pass to `f_ag`, other than `x` and `w`. As with `f_ag`, this can specified to have different #' parameters for each aggregation level by specifying as a nested list of length `n-1`. #' @param dat_thresh An optional data availability threshold, specified as a number between 0 and 1. If a row #' within an aggregation group has data availability lower than this threshold, the aggregated value for that row will be #' `NA`. Data availability, for a row `x_row` is defined as `sum(!is.na(x_row))/length(x_row)`, i.e. the #' fraction of non-`NA` values. #' @param write_to If specified, writes the aggregated data to `.$Data[[write_to]]`. Default `write_to = "Aggregated"`. #' @param ... arguments passed to or from other methods. #' #' @return An updated purse with new treated data sets added at `.$Data[[write_to]]` in each coin. #' @export #' #' @examples #' # build example purse up to normalised data set #' purse <- build_example_purse(up_to = "Normalise", quietly = TRUE) #' #' # aggregate using defaults #' purse <- Aggregate(purse, dset = "Normalised") #' Aggregate.purse <- function(x, dset, f_ag = NULL, w = NULL, f_ag_para = NULL, dat_thresh = NULL, write_to = NULL, ...){ # input check check_purse(x) # apply unit screening to each coin x$coin <- lapply(x$coin, function(coin){ Aggregate.coin(coin, dset, f_ag = f_ag, w = w, f_ag_para = f_ag_para, dat_thresh = dat_thresh, out2 = "coin", write_to = write_to) }) # make sure still purse class class(x) <- c("purse", "data.frame") x } #' Aggregate indicators #' #' Aggregates a named data set specified by `dset` using aggregation function `f_ag`, weights `w`, and optional #' function parameters `f_ag_para`. Note that COINr has a number of aggregation functions built in, #' all of which are of the form `a_*()`, e.g. [a_amean()], [a_gmean()] and friends. #' #' Aggregation is performed row-wise using the function `f_ag`, such that for each row `x_row`, the output is #' `f_ag(x_row, f_ag_para)`, and for the whole data frame, it outputs a numeric vector. The data frame `x` must #' only contain numeric columns. #' #' The function `f_ag` must be supplied as a string, e.g. `"a_amean"`, and it must take as a minimum an input #' `x` which is either a numeric vector (if `by_df = FALSE`), or a data frame (if `by_df = TRUE`). In the former #' case `f_ag` should return a single numeric value (i.e. the result of aggregating `x`), or in the latter case #' a numeric vector (the result of aggregating the whole data frame in one go). #' #' `f_ag` can optionally have other parameters, e.g. weights, specified as a list in `f_ag_para`. #' #' Note that COINr has a number of aggregation functions built in, #' all of which are of the form `a_*()`, e.g. [a_amean()], [a_gmean()] and friends. To see a list browse COINr functions alphabetically or #' type `a_` in the R Studio console and press the tab key (after loading COINr). #' #' Optionally, a data availability threshold can be assigned below which the aggregated value will return #' `NA` (see `dat_thresh` argument). If `by_df = TRUE`, this will however be ignored because aggregation is not #' done on individual rows. Note that more complex constraints could be built into `f_ag` if needed. #' #' @param x A coin class object. #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param f_ag The name of an aggregation function, a string. This can either be a single string naming #' a function to use for all aggregation levels, or else a character vector of function names of length `n-1`, where `n` is #' the number of levels in the index structure. In this latter case, a different aggregation function may be used for each level #' in the index: the first in the vector will be used to aggregate from Level 1 to Level 2, the second from Level 2 to Level 3, and #' so on. #' @param w An optional data frame of weights. If `f_ag` does not require accept weights, set to `"none"`. Alternatively, can be the #' name of a weight set found in `.$Meta$Weights`. #' @param f_ag_para Optional parameters to pass to `f_ag`, other than `x` and `w`. As with `f_ag`, this can specified to have different #' parameters for each aggregation level by specifying as a nested list of length `n-1`. #' @param dat_thresh An optional data availability threshold, specified as a number between 0 and 1. If a row #' within an aggregation group has data availability lower than this threshold, the aggregated value for that row will be #' `NA`. Data availability, for a row `x_row` is defined as `sum(!is.na(x_row))/length(x_row)`, i.e. the #' fraction of non-`NA` values. #' @param by_df Controls whether to send a numeric vector to `f_ag` (if `FALSE`, default) or a data frame (if `TRUE`) - see #' details. #' @param out2 Either `"coin"` (default) to return updated coin or `"df"` to output the aggregated data set. #' @param write_to If specified, writes the aggregated data to `.$Data[[write_to]]`. Default `write_to = "Aggregated"`. #' @param ... arguments passed to or from other methods. #' #' @examples #' # build example up to normalised data set #' coin <- build_example_coin(up_to = "Normalise") #' #' # aggregate normalised data set #' coin <- Aggregate(coin, dset = "Normalised") #' #' @return An updated coin with aggregated data set added at `.$Data[[write_to]]` if `out2 = "coin"`, #' else if `out2 = "df"` outputs the aggregated data set as a data frame. #' #' @export Aggregate.coin <- function(x, dset, f_ag = NULL, w = NULL, f_ag_para = NULL, dat_thresh = NULL, by_df = FALSE, out2 = "coin", write_to = NULL, ...){ # Write to Log ------------------------------------------------------------ coin <- write_log(x, dont_write = "x") # CHECK AND SET f_ag ------------------------------------------------------ nlev <- max(coin$Meta$Ind$Level, na.rm = TRUE) # default and check if(is.null(f_ag)){ f_ag <- "a_amean" f_ag_para <- NULL } else { if(!is.character(f_ag)){ stop("f_ag must be specified as a character string or vector (function name(s) in inverted commas).") } } stopifnot(length(f_ag) > 0) # if same for all levels, repeat if(length(f_ag) == 1){ f_ags <- rep(f_ag, nlev - 1) } else { if(length(f_ag) != (nlev - 1)){ stop("f_ag must have either length 1 (same function for all levels) or length equal to number of levels - in your case: ", nlev) } f_ags <- f_ag } # CHECK AND SET w --------------------------------------------------------- # if weights is supplied we have to see what kind of thing it is # NULL indicates that we should use metadata weights if(!is.null(w)){ if(is.data.frame(w)){ stopifnot(exists("iCode", w), exists("Weight", w)) w1 <- w } else if(is.character(w)){ if(length(w) != 1){ stop("w must be either a string indicating a name of a weight set, or a data frame of weights, or 'none', or NULL (to use weights from metadata).") } if(w != "none"){ # we look for a named weight set w1 <- coin$Meta$Weights[[w]] if(is.null(w1)){ stop("Weight set with name '", w, "' not found in .$Meta$Weights.") } stopifnot(is.data.frame(w1), exists("iCode", w1), exists("Weight", w1)) } else { # convert w1 to NULL w1 <- NULL } } else { stop("w must be either a string indicating a name of a weight set, or a data frame of weights, or 'none', or NULL (to use weights from metadata).") } } else{ # if w was NULL, get from metadata w1 <- coin$Meta$Ind[c("iCode", "Weight")] } # from this point, w1 is either a data frame of weights, or NULL (don't pass weights to f_ag) # CHECK AND SET f_ag_para ------------------------------------------------- # if f_ag_para is NULL, repeat for all levs if(!is.null(f_ag_para)){ if(!is.list(f_ag_para)){ stop("f_ag_para must be specified as a list or list of lists") } stopifnot(length(f_ag_para) > 0) # if same for all levels, repeat if(length(f_ag_para) == 1){ f_ag_paras <- rep(f_ag_para, nlev - 1) } else { if(length(f_ag_para) != (nlev - 1)){ stop("f_ag_para must have either length 1 (same parameters for all levels) or length equal to number of levels - in your case: ", nlev) } f_ag_paras <- f_ag_para } } else { f_ag_paras <- rep(list(NULL), 4) } # Other Prep -------------------------------------------------------------------- if(is.null(dat_thresh)){ dat_threshs <- rep(list(NULL), 4) } else { if(!is.numeric(dat_thresh)){ stop("dat_thresh must be a numeric value or vector of length (number of levels - 1) - in your case: ", nlev) } if(any((dat_thresh < 0) | (dat_thresh > 1))){ stop("dat_thresh must only contain numeric values between 0 and 1.") } if(length(dat_thresh) == 1){ dat_threshs <- rep(dat_thresh, nlev - 1) } else { if(length(dat_thresh) != (nlev - 1)){ stop("dat_thresh must have either length 1 (same for all levels) or length equal to number of levels - in your case: ", nlev) } dat_threshs <- dat_thresh } } # Aggregate --------------------------------------------------------------- # Here we apply the aggregation by level # get data (also performing checks) indat <- get_dset(coin, dset) # get metadata imeta <- coin$Meta$Ind[!is.na(coin$Meta$Ind$Level), ] # Function that aggregates from Level = lev to the next level up # calls the function specified by f_ag. aggregate_level <- function(lev){ # filter metadata to level imeta_l <- imeta[imeta$Level == (lev-1), ] if(is.null(w1)){ aggs <- tapply(imeta_l$iCode, imeta_l$Parent, function(codes){ # call func do.call("Aggregate", list(x = indat_ag[codes], f_ag = f_ags[lev-1], f_ag_para = f_ag_paras[[lev-1]], dat_thresh = dat_threshs[lev-1], by_df = by_df)) }) } else { aggs <- tapply(imeta_l$iCode, imeta_l$Parent, function(codes){ # get weights wts <- w1$Weight[match(codes, w1$iCode)] # call func do.call("Aggregate", list(x = indat_ag[codes], f_ag = f_ags[lev-1], f_ag_para = c(list(w = wts), f_ag_paras[[lev-1]]), dat_thresh = dat_threshs[[lev-1]], by_df = by_df)) }) } if(is.list(aggs)){ # aggs comes out as a list of vectors, have to make to df as.data.frame(do.call(cbind, aggs)) } else if (is.numeric(aggs)){ # in this case there is only one row, and it comes out as an array which needs to be converted as.data.frame(t(aggs)) } } indat_ag <- indat # run the above function for each level for(lev in 2:nlev){ indat_ag <- cbind(indat_ag, aggregate_level(lev)) } # Output ------------------------------------------------------------------ # output list if(out2 == "df"){ indat_ag } else { if(is.null(write_to)){ write_to <- "Aggregated" } write_dset(coin, indat_ag, dset = write_to) } } #' Aggregate data frame #' #' Aggregates a data frame into a single column using a specified function. Note that COINr has a number of aggregation functions built in, #' all of which are of the form `a_*()`, e.g. [a_amean()], [a_gmean()] and friends. #' #' Aggregation is performed row-wise using the function `f_ag`, such that for each row `x_row`, the output is #' `f_ag(x_row, f_ag_para)`, and for the whole data frame, it outputs a numeric vector. The data frame `x` must #' only contain numeric columns. #' #' The function `f_ag` must be supplied as a string, e.g. `"a_amean"`, and it must take as a minimum an input #' `x` which is either a numeric vector (if `by_df = FALSE`), or a data frame (if `by_df = TRUE`). In the former #' case `f_ag` should return a single numeric value (i.e. the result of aggregating `x`), or in the latter case #' a numeric vector (the result of aggregating the whole data frame in one go). #' #' `f_ag` can optionally have other parameters, e.g. weights, specified as a list in `f_ag_para`. #' #' Note that COINr has a number of aggregation functions built in, #' all of which are of the form `a_*()`, e.g. [a_amean()], [a_gmean()] and friends. To see a list browse COINr functions alphabetically or #' type `a_` in the R Studio console and press the tab key (after loading COINr). #' #' Optionally, a data availability threshold can be assigned below which the aggregated value will return #' `NA` (see `dat_thresh` argument). If `by_df = TRUE`, this will however be ignored because aggregation is not #' done on individual rows. Note that more complex constraints could be built into `f_ag` if needed. #' #' @param x Data frame to be aggregated #' @param f_ag The name of an aggregation function, as a string. #' @param f_ag_para Any additional parameters to pass to `f_ag`, as a named list. #' @param dat_thresh An optional data availability threshold, specified as a number between 0 and 1. If a row #' of `x` has data availability lower than this threshold, the aggregated value for that row will be #' `NA`. Data availability, for a row `x_row` is defined as `sum(!is.na(x_row))/length(x_row)`, i.e. the #' fraction of non-`NA` values. #' @param by_df Controls whether to send a numeric vector to `f_ag` (if `FALSE`, default) or a data frame (if `TRUE`) - see #' details. #' @param ... arguments passed to or from other methods. #' #' @examples #' # get some indicator data - take a few columns from built in data set #' X <- ASEM_iData[12:15] #' #' # normalise to avoid zeros - min max between 1 and 100 #' X <- Normalise(X, #' global_specs = list(f_n = "n_minmax", #' f_n_para = list(l_u = c(1,100)))) #' #' # aggregate using harmonic mean, with some weights #' y <- Aggregate(X, f_ag = "a_hmean", f_ag_para = list(w = c(1, 1, 2, 1))) #' #' @return A numeric vector #' #' @export Aggregate.data.frame <- function(x, f_ag = NULL, f_ag_para = NULL, dat_thresh = NULL, by_df = FALSE, ...){ # CHECKS ------------------------------------------------------------------ # x must be a df but check all numeric not_numeric <- !sapply(x, is.numeric) if(any(not_numeric)){ stop("Non-numeric column(s) in x.") } if(!is.null(f_ag_para)){ if(!is.list(f_ag_para)){ stop("f_ag_para must be a list") } } # DEFAULTS ---------------------------------------------------------------- # default mean of cols if(is.null(f_ag)){ f_ag <- "a_amean" f_ag_para = list(w = rep(1, ncol(x))) } if(is.null(dat_thresh)){ dat_thresh <- -1 # effectively no limit } # AGGREGATE --------------------------------------------------------------- lx <- ncol(x) # call aggregation function if(by_df){ # DATA FRAME AGGRGATION if(is.null(f_ag_para)){ y <- do.call(f_ag, list(x = x)) } else { y <- do.call(f_ag, c(list(x = x), f_ag_para)) } } else { # BY-ROW AGGREGATION if(is.null(f_ag_para)){ y <- apply(x, 1, function(x){ if(sum(!is.na(x))/lx < dat_thresh){ NA } else { do.call(f_ag, list(x = x)) } }) } else { y <- apply(x, 1, function(x){ if(sum(!is.na(x))/lx < dat_thresh){ NA } else { do.call(f_ag, c(list(x = x), f_ag_para)) } }) } } if(!is.numeric(y)){ if(all(is.na(y))){ # if we get all NAs, this comes back as a logical vector, so convert y <- as.numeric(y) } else { stop("The output of f_ag has not successfully created a numeric vector.") } } if(length(y) != nrow(x)){ stop("The ouput of f_ag is not the same length as nrow(x).") } y } #' Aggregate data #' #' Methods for aggregating numeric vectors, data frames, coins and purses. See individual method documentation #' for more details: #' #' * [Aggregate.data.frame()] #' * [Aggregate.coin()] #' * [Aggregate.purse()] #' #' @param x Object to be aggregated #' @param ... Further arguments to be passed to methods. #' #' @examples #' # see individual method documentation #' #' @return An object similar to the input #' #' @export Aggregate <- function(x, ...){ UseMethod("Aggregate") } #' Weighted arithmetic mean #' #' The vector of weights `w` is relative since the formula is: #' #' \deqn{ y = 1(\sum w) \sum wx } #' #' If `x` contains `NA`s, these `x` values and the corresponding `w` values are removed before applying the #' formula above. #' #' @param x A numeric vector. #' @param w A vector of numeric weights of the same length as `x`. #' #' @examples #' x <- c(1:10) #' w <- c(10:1) #' a_amean(x,w) #' #' @return The weighted mean as a scalar value #' #' @export a_amean <- function(x, w){ # Checks stopifnot(is.numeric(x), is.numeric(w), length(w) == length(x)) if(any(is.na(w))){ stop("w cannot contain NAs") } # remove w entries corresponding to NAs in x w <- w[!is.na(x)] # also x x <- x[!is.na(x)] if(length(x)==0){ return(NA) } # w to sum to 1 w <- w/sum(w) sum(w*x) } #' Weighted geometric mean #' #' Weighted geometric mean of a vector. `NA` are skipped by default. #' #' This function replaces the now-defunct `geoMean()` from COINr < v1.0. #' #' @param x A numeric vector of positive values. #' @param w A vector of weights, which should have length equal to `length(x)`. Weights are relative #' and will be re-scaled to sum to 1. If `w` is not specified, defaults to equal weights. #' #' @examples #' # a vector of values #' x <- 1:10 #' # a vector of weights #' w <- runif(10) #' # weighted geometric mean #' a_gmean(x,w) #' #' @return The geometric mean, as a numeric value. #' #' @export a_gmean <- function(x, w = NULL){ if(is.null(w)){ # default equal weights w <- rep(1,length(x)) message("No weights specified for geometric mean, using equal weights.") } if(any(!is.na(x))){ if(any((x <= 0), na.rm = TRUE)){ stop("Negative or zero values found when applying geometric mean. This doesn't work because geometric mean uses log. Normalise to remove negative/zero values first or use another aggregation method.")} # have to set any weights to NA to correspond to NAs in x w[is.na(x)] <- NA # calculate geom mean gm <- exp( sum(w * log(x), na.rm = TRUE)/sum(w, na.rm = TRUE) ) } else { gm <- NA } gm } #' Weighted harmonic mean #' #' Weighted harmonic mean of a vector. `NA` are skipped by default. #' #' This function replaces the now-defunct `harMean()` from COINr < v1.0. #' #' @param x A numeric vector of positive values. #' @param w A vector of weights, which should have length equal to `length(x)`. Weights are relative #' and will be re-scaled to sum to 1. If `w` is not specified, defaults to equal weights. #' #' @examples #' # a vector of values #' x <- 1:10 #' # a vector of weights #' w <- runif(10) #' # weighted harmonic mean #' a_hmean(x,w) #' #' @return Weighted harmonic mean, as a numeric value. #' #' @export a_hmean <- function(x, w = NULL){ if(is.null(w)){ # default equal weights w <- rep(1,length(x)) message("No weights specified harmonic mean, using equal weights.") } if(any(!is.na(x))){ if(any(x == 0, na.rm = TRUE)){ stop("Zero values found when applying harmonic mean. This doesn't work because harmonic mean uses 1/x. Normalise to remove zero values first or use another aggregation method.")} # have to set any weights to NA to correspond to NAs in x w[is.na(x)] <- NA hm <- sum(w, na.rm = TRUE)/sum(w/x, na.rm = TRUE) } else { hm <- NA } hm } #' Outranking matrix #' #' Constructs an outranking matrix based on a data frame of indicator data and corresponding weights. #' #' @param X A data frame or matrix of indicator data, with observations as rows and indicators #' as columns. No other columns should be present (e.g. label columns). #' @param w A vector of weights, which should have length equal to `ncol(X)`. Weights are relative #' and will be re-scaled to sum to 1. If `w` is not specified, defaults to equal weights. #' #' @examples #' # get a sample of a few indicators #' ind_data <- COINr::ASEM_iData[12:16] #' # calculate outranking matrix #' outlist <- outrankMatrix(ind_data) #' # see fraction of dominant pairs (robustness) #' outlist$fracDominant #' #' @return A list with: #' * `.$OutRankMatrix` the outranking matrix with `nrow(X)` rows and columns (matrix class). #' * `.$nDominant` the number of dominance/robust pairs #' * `.$fracDominant` the percentage of dominance/robust pairs #' #' @export outrankMatrix <- function(X, w = NULL){ stopifnot(is.data.frame(X) | is.matrix(X)) if (!all(apply(X, 2, is.numeric))){ stop("Non-numeric columns in input data frame or matrix not allowed.") } nInd <- ncol(X) nUnit <- nrow(X) if(is.null(w)){ # default equal weights w <- rep(1,nInd) message("No weights specified for outranking matrix, using equal weights.") } # make w sum to 1 w = w/sum(w, na.rm = TRUE) # prep outranking matrix orm <- matrix(NA, nrow = nUnit, ncol = nUnit) for (ii in 1:nUnit){ # get iith row, i.e. the indicator values of unit ii rowii <- X[ii,] for (jj in 1:nUnit){ if (ii==jj){ # diag vals are zero orm[ii, jj] <- 0 } else if (ii>jj){ # to save time, only calc upper triangle of matrix. If lower triangle, do 1-upper orm[ii, jj] <- 1 - orm[jj, ii] } else { # get jjth row, i.e. the indicator values of unit jj rowjj <- X[jj,] # get score. Sum of weights where ii scores higher than jj, and half sum of weights where they are equal orm[ii, jj] <- sum( sum(w[rowii > rowjj], na.rm = TRUE), sum(w[rowii == rowjj], na.rm = TRUE)/2, na.rm = TRUE) } } } # find number of dominance pairs ndom <- sum(orm==1, na.rm = TRUE) npairs <- (nUnit^2 - nUnit)/2 prcdom <- ndom/npairs list( OutRankMatrix = orm, nDominant = ndom, fracDominant = prcdom) } #' Copeland scores #' #' Aggregates a data frame of indicator values into a single column using the Copeland method. #' This function calls `outrankMatrix()`. #' #' The outranking matrix is transformed as follows: #' #' * values > 0.5 are replaced by 1 #' * values < 0.5 are replaced by -1 #' * values == 0.5 are replaced by 0 #' * the diagonal of the matrix is all zeros #' #' The Copeland scores are calculated as the row sums of this transformed matrix. #' #' This function replaces the now-defunct `copeland()` from COINr < v1.0. #' #' @param X A numeric data frame or matrix of indicator data, with observations as rows and indicators #' as columns. No other columns should be present (e.g. label columns). #' @param w A numeric vector of weights, which should have length equal to `ncol(X)`. Weights are relative #' and will be re-scaled to sum to 1. If `w` is not specified, defaults to equal weights. #' #' @examples #' # some example data #' ind_data <- COINr::ASEM_iData[12:16] #' #' # aggregate with vector of weights #' outlist <- outrankMatrix(ind_data) #' #' @return Numeric vector of Copeland scores. #' #' @export a_copeland <- function(X, w = NULL){ # get outranking matrix orm <- outrankMatrix(X, w)$OutRankMatrix orm[orm > 0.5] <- 1 orm[orm == 0.5] <- 0 orm[orm < 0.5] <- -1 diag(orm) <- 0 # get scores by summing across rows rowSums(orm, na.rm = TRUE) # outlist <- list(Scores = scores, OutRankMat = orm) # outlist }
/scratch/gouwar.j/cran-all/cranData/COINr/R/aggregate.R
# EXTENDED AUDITING TOOLS #' Check the effect of removing indicators or aggregates #' #' This is an analysis function for seeing what happens when elements of the composite indicator are removed. This can help with "what if" #' experiments and acts as different measure of the influence of each indicator or aggregate. #' #' One way of looking at indicator "importance" in a composite indicator is via correlations. A different way is to see what happens if we #' remove the indicator completely from the framework. If removing an indicator or a whole aggregation of indicators results in very little #' rank change, it is one indication that perhaps it is not necessary to include it. Emphasis on *one*: there may be many other things to take #' into account. #' #' This function works by successively setting the weight of each indicator or aggregate to zero. If the analysis is performed at the indicator #' level, it creates a copy of the coin, sets the weight of the first indicator to zero, regenerates the results, and compares to the nominal #' results (results when no weights are set to zero). It repeats this for each indicator in turn, such that each time one indicator is set to #' zero weights, and the others retain their original weights. The output is a series of tables comparing scores and ranks (see Value). #' #' Note that "removing the indicator" here means more precisely "setting its weight to zero". In most cases the first implies the second, #' but check that the aggregation method that you are using satisfies this relationship. For example, if the aggregation method does not #' use any weights, then setting the weight to zero will have no effect. #' #' This function replaces the now-defunct `removeElements()` from COINr < v1.0. #' #' @param coin A coin class object, which must be constructed up to and including the aggregation step, i.e. using [Aggregate()]. #' @param Level The level at which to remove elements. For example, `Level = 1` would check the effect of removing each indicator, one at #' a time. `Level = 2` would check the effect of removing each of the aggregation groups above the indicator level, one at a time. #' @param iCode A character string indicating the indicator or aggregate code to extract from each iteration. I.e. normally this would be set to #' the index code to compare the ranks of the index upon removing each indicator or aggregate. But it can be any code that is present in #' `.$Data[[dset]]`. #' @param quietly Logical: if `FALSE` (default) will output to the console an indication of progress. Might be useful when iterating over many #' indicators. Otherwise set to `TRUE` to shut this up. #' @param dset The name of the data set to take `iCode` from. Most likely this should be name of the aggregated data set, typically `"Aggregated"`. #' #' @examples #' # build example coin #' coin <- build_example_coin(quietly = TRUE) #' #' # run function removing elements in level 2 #' l_res <- remove_elements(coin, Level = 3, dset = "Aggregated", iCode = "Index") #' #' # get summary of rank changes #' l_res$MeanAbsDiff #' #' @return A list with elements as follows: #' * `.$Scores`: a data frame where each column is the scores for each unit, with indicator/aggregate corresponding to the column name removed. #' E.g. `.$Scores$Ind1` gives the scores resulting from removing "Ind1". #' * `.$Ranks`: as above but ranks #' * `.$RankDiffs`: as above but difference between nominal rank and rank on removing each indicator/aggregate #' * `.$RankAbsDiffs`: as above but absolute rank differences #' * `.$MeanAbsDiffs`: as above, but the mean of each column. So it is the mean (over units) absolute rank change resulting from removing each #' indicator or aggregate. #' #' @export remove_elements <- function(coin, Level, dset, iCode, quietly = FALSE){ ##----- Checks and Preps ---- # check input first check_coin_input(coin) # number of levels nlev <- coin$Meta$maxlev if(Level %nin% 1:(nlev -1)){ stop("Level must be between 1 (indicator level) and the number of levels minus one.") } # get scores of nominal and create table Scores <- get_data(coin, dset = dset, iCodes = iCode) stopifnot(!is.null(Scores$uCode), !is.null(Scores[[iCode]])) colnames(Scores)[colnames(Scores) == iCode] <- "Nominal" ##----- Get weights ---- # this function uses setting weights to zero to remove things. In order to do this, we need the weights that were used to aggregate # Find which weights were used w_used <- coin$Log$Aggregate$w # now get the weights, and run some checks if(is.null(w_used)){ # if this is NULL, then it means that the original weights were used if(is.null(coin$Meta$Weights$Original)){ stop("No 'Original' weights found in .$Meta$Weights. This is required to use this function.") } else { # get original weights wts <- coin$Meta$Weights$Original } } else if (is.character(w_used)) { # agweights specified by a character string wts <- coin$Meta$Weights[[w_used]] # check this now exists if(is.null(wts)){ stop("Cannot find specified weight set in .$Meta$Weights...") } # check is df if(!(is.data.frame(wts))){ stop("Specified set of weights is not a data frame. Please check.") } } else if (is.data.frame(w_used)){ wts <- w_used } else { stop("Argument 'w' to Aggregate() is not in the correct format. Should be NULL, a character string or data frame.") } ##----- Loop over inds or aggs ---- # Now we start with the removing process. First get codes of the elements to remove # we get them from the weights df icodes <- wts$iCode[wts$Level == Level] # now we have to loop through these and set the weight of each to 0 # Yes i am using a for loop because it is easier than forcing a map or apply. Deal with it! :) for (ii in 1:length(icodes)){ if(!quietly){ message(paste0("Iteration ", ii, " of ", length(icodes))) } # copy the coin and weights COIN2 <- coin wtsii <- wts # modify the weights. Set element to zero wtsii$Weight[wtsii$iCode == icodes[ii]] <- 0 # add a new set of weights - copy of the existing weights used COIN2$Meta$Weights$Removed1 <- wtsii # point method COIN2$Log$Aggregate$w <- "Removed1" # regenerate COIN2 <- Regen(COIN2, quietly = TRUE) # Extract the output of interest and add to Scores df newscores <- get_data(COIN2, dset = dset, iCodes = iCode) Scores <- merge(Scores, newscores, by = "uCode") colnames(Scores)[ncol(Scores)] <- icodes[ii] } # now we generate tables as the output # ranks ranktab <- rank_df(Scores) # rank changes rankchg <- ranktab rankchg[-1] <- as.data.frame(apply(rankchg[-1], 2, function(x) rankchg[2] - x)) colnames(rankchg) <- colnames(ranktab) # absolute rank changes rankchgabs <- rankchg rankchgabs[-1] <- apply(rankchgabs[-1], 2, abs) # mean absolute rank changes MeanAbsDiff <- colMeans(rankchgabs[-1]) # Output list(Scores = Scores, Ranks = ranktab, RankDiffs = rankchg, RankAbsDiffs = rankchgabs, MeanAbsDiff = MeanAbsDiff) }
/scratch/gouwar.j/cran-all/cranData/COINr/R/audit_tools.R
# FUNCTIONS AND METHODS FOR CHECKING AND GETTING DATA FROM COINS AND PURSES # Check a purse to make sure has the expected format check_purse <- function(x){ if(!is.purse(x)){ stop("Object is not tagged as an S3 purse class") } if(!is.data.frame(x)){ stop("Object is not a data frame, which is required for a purse class") } if(is.null(x$Time)){ stop("No 'Time' column found in purse - this is required.") } if(is.null(x$coin)){ stop("No coin column found in purse - this is required.") } not_coins <- !sapply(x$coin, is.coin) if(any(not_coins)){ stop("One or more entries in .$coin is not a coin class") } } #Stop if object is NOT coin class check_coin_input <- function(x){ if(is.purse(x)){ stop("This function requires a coin input, and you have input a 'purse' object. Check if a purse method is available, or else apply the function individually the coins in the purse.") } if(!is.coin(x)){ stop("Input is not recognised as a coin class object.", call. = FALSE) } } #Stop if object is NOT coin class check_purse_input <- function(x){ if(!is.purse(x)){ stop("Input is not recognised as a purse class object.", call. = FALSE) } } # Check for named data set check_dset.purse <- function(x, dset, ...){ stopifnot(is.purse(x), is.character(dset), length(dset)==1) for(tt in x$Time){ coin <- x$coin[[which(x$Time == tt)]] if(is.null(coin$Data[[dset]])){ stop("Required data set '", dset, "' not found in coin at Time = ", tt) } } } # Check for named data set check_dset.coin <- function(x, dset, ...){ stopifnot(is.coin(x), is.character(dset), length(dset)==1) if(is.null(x$Data[[dset]]) & (dset != "uMeta") ){ stop("Required data set '", dset, "' not found in coin object.") } } # Check for named data set check_dset <- function(x, dset, ...){ UseMethod("check_dset") } #' Gets a named data set and performs checks #' #' A helper function to retrieve a named data set from a purse object. Retrieves the specified data set #' from each coin in the purse and joins them together in a single data frame using [rbind()], indexed #' with a `Time` column. #' #' @param x A purse class object #' @param dset A character string corresponding to a named data set within each coin `.$Data`. E.g. `"Raw"`. #' @param Time Optional time index to extract from a subset of the coins present in the purse. Should be a #' vector containing one or more entries in `x$Time` or `NULL` to return all (default). #' @param also_get A character vector specifying any columns to attach to the data set that are *not* #' indicators or aggregates. These will be e.g. `uName`, groups, denominators or columns labelled as "Other" #' in `iMeta`. These columns are stored in `.$Meta$Unit` to avoid repetition. Set `also_get = "all"` to #' attach all columns, or set `also_get = "none"` to return only numeric columns, i.e. no `uCode` column. #' @param ... arguments passed to or from other methods. #' #' @examples #' # build example purse #' purse <- build_example_purse(up_to = "new_coin", quietly = TRUE) #' #' # get raw data set #' df1 <- get_dset(purse, dset = "Raw") #' #' @return Data frame of indicator data. #' #' @export get_dset.purse <- function(x, dset, Time = NULL, also_get = NULL, ...){ # check specified dset exists check_dset(x, dset) if(!is.null(Time)){ if(any(Time %nin% x$Time)){ stop("One or more entries in Time not found in the Time column of the purse.") } coins <- x$coin[x$Time %in% Time] } else { coins <- x$coin } # extract data sets in one df iDatas <- lapply(coins, function(coin){ iData <- get_dset(coin, dset = dset, also_get = setdiff(also_get, "Time")) iData <- cbind(Time = coin$Meta$Unit$Time[[1]], iData) }) iData <- Reduce(rbind, iDatas) # sometimes we get two "Time" cols - here make sure only 1 (remove duplicate cols) iData <- iData[unique(names(iData))] # may have to additionally remove Time col if(!is.null(also_get)){ if(also_get == "none"){ iData <- iData[names(iData) != "Time"] } } iData } #' Gets a named data set and performs checks #' #' A helper function to retrieve a named data set from the coin object. Also performs input checks at the #' same time. #' #' If `also_get` is not specified, this will return the indicator columns with the `uCode` identifiers #' in the first column. Optionally, `also_get` can be specified to attach other metadata columns, or #' to only return the numeric (indicator) columns with no identifiers. This latter option might be useful #' for e.g. examining correlations. #' #' @param x A coin class object #' @param dset A character string corresponding to a named data set within `.$Data`. E.g. `"Raw"`. #' @param also_get A character vector specifying any columns to attach to the data set that are *not* #' indicators or aggregates. These will be e.g. `uName`, groups, denominators or columns labelled as "Other" #' in `iMeta`. These columns are stored in `.$Meta$Unit` to avoid repetition. Set `also_get = "all"` to #' attach all columns, or set `also_get = "none"` to return only numeric columns, i.e. no `uCode` column. #' @param ... arguments passed to or from other methods. #' #' @examples #' # build example coin, just up to raw dset for speed #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # retrieve raw data set with added cols #' get_dset(coin, dset = "Raw", also_get = c("uName", "GDP_group")) #' #' @return Data frame of indicator data. #' #' @export get_dset.coin <- function(x, dset, also_get = NULL, ...){ # check specified dset exists check_dset(x, dset) # get dset if(dset != "uMeta"){ iData <- x$Data[[dset]] if(!is.null(also_get)){ if(also_get[1] == "none"){ iData <- iData[names(iData) != "uCode"] } else { uMeta <- x$Meta$Unit if(is.null(uMeta)){ stop("Unit metadata not found in coin.") } if(length(also_get) == 1){ if(also_get == "all"){ uMeta_codes <- colnames(uMeta) } else { uMeta_codes <- also_get } } else { uMeta_codes <- also_get } # check entries in also_get exist if(any(uMeta_codes %nin% colnames(uMeta))){ stop("Entries in also_get not recognised - see function help.") } uMeta <- uMeta[union("uCode", uMeta_codes)] iData <- merge(uMeta, iData, by = "uCode", all.x = FALSE, all.y = TRUE) } } } else { # get uMeta iData <- x$Meta$Unit if(is.null(iData)){ stop("Unit metadata (uMeta) not found in coin!") } } iData } #' Gets a named data set and performs checks #' #' A helper function to retrieve a named data set from coin or purse objects. See individual #' documentation on: #' #' * [get_dset.coin()] #' * [get_dset.purse()] #' #' @param x A coin or purse #' @param dset A character string corresponding to a named data set within `.$Data`. E.g. `"Raw"`. #' @param ... arguments passed to or from other methods. #' #' @examples #' # see examples for methods #' #' @return Data frame of indicator data, indexed also by time if input is a purse. #' #' @export get_dset <- function(x, dset, ...){ UseMethod("get_dset") } #' Get subsets of indicator data #' #' A flexible function for retrieving data from a coin, from a specified data set. Subsets of data can #' be returned based on selection of columns, using the `iCodes` and `Level` arguments, and by filtering #' rowwise using the `uCodes` and `use_group` arguments. The `also_get` argument also allows unit metadata #' columns to be attached, such as names, groups, and denominators. #' #' The `iCodes` argument can be used to directly select named indicators, i.e. setting `iCodes = c("a", "b")` #' will select indicators "a" and "b", attaching any extra columns specified by `also_get`. However, #' using this in conjunction with the `Level` argument returns named groups of indicators. For example, #' setting `iCodes = "Group1"` (for e.g. an aggregation group in Level 2) and `Level = 1` will return #' all indicators in Level 1, belonging to "Group1". #' #' Rows can also be subsetted. The `uCodes` argument can be used to select specified units in the same #' way as `iCodes`. Additionally, the `use_group` argument filters to specified groups. If `uCodes` is #' specified, and `use_group` refers to a named group column, then it will return all units in the #' groups that the `uCodes` belong to. This is useful for putting a unit into context with its peers #' based on some grouping variable. #' #' Note that if you want to retrieve a whole data set (with no column/row subsetting), use the #' [get_dset()] function which should be slightly faster. #' #' @param x A coin class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param iCodes Optional indicator codes to retrieve. If `NULL` (default), returns all iCodes found in #' the selected data set. Can also refer to indicator groups. See details. #' @param Level Optionally, the level in the hierarchy to extract data from. See details. #' @param uCodes Optional unit codes to filter rows of the resulting data set. Can also be used in conjunction #' with groups. See details. #' @param use_group Optional group to filter rows of the data set. Specified as `list(Group_Var = Group)`, #' where `Group_Var` is a Group_ column that must be present in the selected data set, and `Group` is a specified group #' inside that grouping variable. This filters the selected data to only include rows from the specified group. Can #' also be used in conjunction with `uCodes` -- see details. #' @param also_get A character vector specifying any columns to attach to the data set that are *not* #' indicators or aggregates. These will be e.g. `uName`, groups, denominators or columns labelled as "Other" #' in `iMeta`. These columns are stored in `.$Meta$Unit` to avoid repetition. Set `also_get = "all"` to #' attach all columns, or set `also_get = "none"` to return only numeric columns, i.e. no `uCode` column. #' @param ... arguments passed to or from other methods. #' #' @examples #' # build full example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # get all indicators in "Political group #' x <- get_data(coin, dset = "Raw", iCodes = "Political", Level = 1) #' head(x, 5) #' #' # see vignette("data_selection") for more examples #' #' @return A data frame of indicator data according to specifications. #' @export get_data.coin <- function(x, dset, iCodes = NULL, Level = NULL, uCodes = NULL, use_group = NULL, also_get = NULL, ...){ # CHECKS ------------------------------------------------------------------ coin <- x check_coin_input(coin) # get iMeta and maxlev iMeta <- coin$Meta$Ind maxlev <- coin$Meta$maxlev # check Level if(!is.null(Level)){ stopifnot(is.numeric(Level), length(Level) == 1) if(Level %nin% 1:maxlev){ stop("Level is not in 1:(max level).") } if(dset == "uMeta"){ # if it's uMeta we don't worry about levels Level <- NULL } } # check groups and get names if(!is.null(use_group)){ stopifnot(length(use_group)==1) if(is.list(use_group)){ groupcol <- names(use_group) groupsel <- use_group[[1]] if(length(groupsel) > 1){ stop("Only one group can be selected by groupsel.") } } else if (is.character(use_group)){ groupcol <- use_group groupsel <- NULL } } else { groupcol <- NULL groupsel <- NULL } # GET DSET ---------------------------------------------------------------- # if we have to filter by group, we also have to get group cols # we also probably need uCode in any case (can be deleted later) remove_meta <- FALSE if(!is.null(also_get)){ if(is.null(use_group)){ # we don't need any group cols, take also_get as is # if none, we still probably need uCode, so set NULL if(also_get[1] == "none"){ also_get <- NULL remove_meta <- TRUE } } else { if(also_get[1] == "none"){ also_get <- c("uCode", groupcol) remove_meta <- TRUE } else { also_get <- unique(c(also_get, groupcol)) } } } else { also_get <- groupcol } iData <- get_dset(coin, dset = dset, also_get = also_get) # make sure group can be found in group col, if specified if(!is.null(groupsel)){ if(groupsel %nin% iData[[groupcol]]){ stop("Selected group not found in specified group column.") } } # col names that are NOT indicators not_iCodes <- names(iData)[names(iData) %in% names(coin$Meta$Unit)] # COLUMNS ----------------------------------------------------------------- # We have iCodes and Level to think about here if(!is.null(iCodes)){ # first check iCodes are findable if(any(iCodes %nin% iMeta$iCode)){ stop("One or more iCodes not found in iMeta.") } # check which level iCodes are from Lev_iCodes <- unique(iMeta$Level[iMeta$iCode %in% iCodes]) # check not from different levels if(length(Lev_iCodes) != 1){ stop("iCodes are from different Levels - this is not allowed.") } if(is.null(Level)){ # no Level specified: take iCodes as given cols <- iCodes } else { # get lineage lin <- coin$Meta$Lineage # get cols to select cols <- unique(lin[[Level]][lin[[Lev_iCodes]] %in% iCodes]) } # select columns if(any(cols %nin% names(iData))){ stop("Selected iCodes not found in data set. If Level > 1 you need to target an aggregated data set.") } if(dset == "uMeta"){ iData1 <- iData[unique(c("uCode", groupcol, cols, also_get))] } else { iData1 <- iData[c(not_iCodes, cols)] } } else if (!is.null(Level)) { # iCodes not specified, but Level specified # This means we take everything from specified level, if available cols <- iMeta$iCode[iMeta$Level == Level] cols <- cols[!is.na(cols)] # select columns if(any(cols %nin% names(iData))){ stop("Selected iCodes not found in data set. If Level > 1 you need to target an aggregated data set.") } iData1 <- iData[c(not_iCodes, cols)] } else { # no iCodes or Level specified # no column selection iData1 <- iData } # ROWS -------------------------------------------------------------------- if(!is.null(uCodes)){ # check uCodes can be found if(any(uCodes %nin% iData$uCode)){ stop("One or more uCodes not found in the selected data set.") } if(!is.null(use_group)){ # We have uCodes AND group specification # filter to group(s) containing units if(is.null(groupsel)){ # groups containing units uGroups <- unique(iData1[[groupcol]][iData1$uCode %in% uCodes]) # filter to these groups iData2 <- iData1[iData1[[groupcol]] %in% uGroups, ] } else { # if we have a specified group within a column, AND uCodes, we give preference to uCodes iData2 <- iData1[iData1$uCode %in% uCodes, ] } } else { # no groups specified - # filter to selected units iData2 <- iData1[iData1$uCode %in% uCodes, ] } } else if (!is.null(use_group)){ # groups specified, but no uCodes # select a whole group if(is.null(groupsel)){ # silly case where only a col is specified, but no actual group. Hence no filtering iData2 <- iData1 } else { # proper group selection iData2 <- iData1[iData1[[groupcol]] == groupsel, ] } } else { # no row filtering iData2 <- iData1 } # OUTPUT ------------------------------------------------------------------ if(remove_meta){ iData2 <- iData2[names(iData2) %nin% not_iCodes] } iData2 } #' Get subsets of indicator data #' #' This retrieves data from a purse. It functions in a similar way to [get_data.coin()] but has the #' additional `Time` argument to allow selection based on the point(s) in time. #' #' Note that #' #' @param x A purse class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param iCodes Optional indicator codes to retrieve. If `NULL` (default), returns all iCodes found in #' the selected data set. Can also refer to indicator groups. See details. #' @param Level Optionally, the level in the hierarchy to extract data from. See details. #' @param uCodes Optional unit codes to filter rows of the resulting data set. Can also be used in conjunction #' with groups. See details. #' @param use_group Optional group to filter rows of the data set. Specified as `list(Group_Var = Group)`, #' where `Group_Var` is a Group_ column that must be present in the selected data set, and `Group` is a specified group #' inside that grouping variable. This filters the selected data to only include rows from the specified group. Can #' also be used in conjunction with `uCodes` -- see details. #' @param Time Optional time index to extract from a subset of the coins present in the purse. Should be a #' vector containing one or more entries in `x$Time` or `NULL` to return all (default). #' @param also_get A character vector specifying any columns to attach to the data set that are *not* #' indicators or aggregates. These will be e.g. `uName`, groups, denominators or columns labelled as "Other" #' in `iMeta`. These columns are stored in `.$Meta$Unit` to avoid repetition. Set `also_get = "all"` to #' attach all columns, or set `also_get = "none"` to return only numeric columns, i.e. no `uCode` column. #' @param ... arguments passed to or from other methods. #' #' @examples #' # build full example purse #' purse <- build_example_purse(up_to = "new_coin", quietly = TRUE) #' #' # get specified indicators for specific years, for specified units #' get_data(purse, dset = "Raw", #' iCodes = c("Lang", "Forest"), #' uCodes = c("AUT", "CHN", "DNK"), #' Time = c(2019, 2020)) #' #' @return A data frame of indicator data indexed by a "Time" column. #' @export get_data.purse <- function(x, dset, iCodes = NULL, Level = NULL, uCodes = NULL, use_group = NULL, Time = NULL, also_get = NULL, ...){ # NOTE I'll probably need to deal with the problem of groups at some point: since different coins # can have different units, there may be groups available in some coins and not in others. I fixed # the equivalent problem with units but will leave the groups issue for the moment. # check specified dset exists check_dset(x, dset) if(!is.null(Time)){ if(any(Time %nin% x$Time)){ stop("One or more entries in Time not found in the Time column of the purse.") } coins <- x$coin[x$Time %in% Time] } else { coins <- x$coin } # extract data sets in one df iDatas <- lapply(coins, function(coin){ # we first have to check which units are available (different coins can have different units) uCodes_avail <- coin$Data[[dset]][["uCode"]] # we retrieve only the uCodes that are requested AND available if(!is.null(uCodes)){ uCodes_get <- intersect(uCodes, uCodes_avail) } else { uCodes_get <- uCodes_avail } if(length(uCodes_get) > 0){ # get data iData <- get_data(coin, dset = dset, iCodes = iCodes, Level = Level, uCodes = uCodes_get, use_group = use_group, also_get = also_get) # bind on time (note, this is only one year anyway, hence no merge needed) iData <- cbind(Time = coin$Meta$Unit$Time[[1]], iData) } }) df_out <- Reduce(rbind, iDatas) # this is a check in case uCodes not found anywhere at all if(is.null(df_out)){ stop("Selected uCode(s) not found in any coins in the purse.", call. = FALSE) } df_out } #' Get subsets of indicator data #' #' A helper function to retrieve a named data set from coin or purse objects. See individual method #' documentation: #' #' * [get_data.coin()] #' * [get_data.purse()] #' #' This function replaces the now-defunct `getIn()` from COINr < v1.0. #' #' @param x A coin or purse #' @param ... Arguments passed to methods #' #' @examples #' # see individual method documentation #' #' @return Data frame of indicator data, indexed also by time if input is a purse. #' #' @export get_data <- function(x, ...){ UseMethod("get_data") } # A helper function to separate indicator cols from metadata columns in iMeta. extract_iData <- function(coin, iData, GET){ # indicator cols iCodes <- names(iData)[names(iData) %nin% names(coin$Meta$Unit)] if(GET == "iCodes"){ iCodes } else if (GET == "iData_"){ iData[iCodes] } else if (GET == "meta"){ iData[colnames(iData) %nin% iCodes] } else if (GET == "mCodes"){ setdiff(colnames(iData), iCodes) } } # Given either uCodes or iCodes, returns uNames or iNames get_names <- function(coin, uCodes = NULL, iCodes = NULL){ if(!is.null(uCodes) && !is.null(iCodes)){ stop("Either uCodes or iCodes, not both.") } if(!is.null(uCodes)){ uNames <- coin$Meta$Unit$uName if(is.null(uNames)){stop("uNames not found")} if(any(uCodes %nin% coin$Meta$Unit$uCode)){ stop("One or more uCodes not found in .$Meta$Unit$uCode") } uNames[match(uCodes, coin$Meta$Unit$uCode)] } else if (!is.null(iCodes)){ iNames <- coin$Meta$Ind$iName if(is.null(iNames)){stop("iNames not found")} if(any(iCodes %nin% coin$Meta$Ind$iCode)){ stop("One or more iCodes not found in .$Meta$Ind$iCode") } iNames[match(iCodes, coin$Meta$Ind$iCode)] } } # Given iCodes, returns corresponding units if available. get_units <- function(coin, iCodes = NULL){ iUnits <- coin$Meta$Ind$Unit if(is.null(iUnits)){ return(NULL) } if(any(iCodes %nin% coin$Meta$Ind$iCode)){ stop("One or more iCodes not found in .$Meta$Ind$iCode") } iUnits[match(iCodes, coin$Meta$Ind$iCode)] }
/scratch/gouwar.j/cran-all/cranData/COINr/R/check_and_get.R
#' Import data directly from COIN Tool #' #' The [COIN Tool](https://knowledge4policy.ec.europa.eu/composite-indicators/coin-tool_en) is an Excel-based tool #' for building composite indicators. This function provides a direct interface for reading a COIN Tool input deck and #' converting it to COINr. You need to provide a COIN Tool file, with the "Database" sheet properly compiled. #' #' This function replaces the now-defunct `COINToolIn()` from COINr < v1.0. #' #' @param fname The file name and path to read, e.g. `"C:/Documents/COINToolFile.xlsx"`. #' @param makecodes Logical: if `TRUE`, will generate short indicator codes based on indicator names, #' otherwise if `FALSE`, will use COIN Tool indicator codes `"Ind.01"`, etc. Currently only does this #' for indicators, not aggregation groups. #' @param oldtool Logical: if `TRUE`, compatible with old COIN Tool (pre-release, early 2019 or earlier). #' There are some minor differences on where the elements are found. #' @param out2 Either `"list"` (default) to output a list with `iData` and `iMeta` entries (for input into [new_coin()]), #' else `"coin"` to output a coin. #' #' @importFrom readxl read_excel cell_limits #' #' @examples #' \dontrun{ #' ## This example downloads a COIN Tool spreadsheet containing example data, #' ## saves it to a temporary directory, unzips, and reads into R. Finally it #' ## assembles it into a COIN. #' #' # Make temp zip filename in temporary directory #' tmpz <- tempfile(fileext = ".zip") #' # Download an example COIN Tool file to temporary directory #' # NOTE: the download.file() command may need its "method" option set to a #' # specific value depending on the platform you run this on. You can also #' # choose to download/unzip this file manually. #' download.file("https://knowledge4policy.ec.europa.eu/sites/default/ #' files/coin_tool_v1_lite_exampledata.zip", tmpz) #' # Unzip #' CTpath <- unzip(tmpz, exdir = tempdir()) #' # Read COIN Tool into R #' l <- import_coin_tool(CTpath, makecodes = TRUE) } #' #' @return Either a list or a coin, depending on `out2` #' #' @export import_coin_tool <- function(fname, makecodes = FALSE, oldtool = FALSE, out2 = "list"){ #----- GET IndData -----# # Get the main data first ind_data_only <- suppressMessages(readxl::read_excel(fname, range = "E16:CY315", na = "n/a", col_types = "numeric", sheet = "Database")) # Delete any rows and cols with all NAs ind_data_only <- ind_data_only[rowSums(is.na(ind_data_only )) != ncol(ind_data_only ), ] ind_data_only <- ind_data_only[,colSums(is.na(ind_data_only )) != nrow(ind_data_only )] # Reference points lastcol <- ncol(ind_data_only)+4 lastrow <- nrow(ind_data_only)+16 # Unit names and codes (together) UnitNamesCodes <- suppressMessages( readxl::read_excel(fname, range = readxl::cell_limits(c(17, 2), c(lastrow, 3)), col_types = "text", col_names = FALSE, sheet = "Database")) # Assemble IndData IndData <- as.data.frame(cbind(UnitNamesCodes, ind_data_only)) colnames(IndData)[1:2] <- c("uName", "uCode") #----- GET IndMeta -----# # IndMeta (partial) IndMeta1 <- suppressMessages( readxl::read_excel(fname, range = readxl::cell_limits(c(11, 5), c(16, lastcol)), col_types = "text", col_names = FALSE, sheet = "Database") ) # Put into tidy format IndMeta1 <- rev(as.data.frame(t(IndMeta1))) # Sort out aggregation columns aggcols <- IndMeta1[3:6] # Any cols with all same agg codes - means that one of the COIN Tool levels was not used fakecols <- sapply(aggcols, function(x) length(unique(x))==1) stopifnot(is.logical(fakecols)) # I have to manually set the last col to FALSE because this is the Index col and I want to keep fakecols[4]<-FALSE # Now aggcols with any fake rows removed aggcols <- aggcols[!fakecols] # Name cols already, avoids problems later colnames(aggcols) <- paste0("Agg",1:ncol(aggcols)) # Weights, directions, goalposts IndMeta2 <- suppressMessages( readxl::read_excel(fname, range = readxl::cell_limits(c(7, 5), c(10, lastcol)), col_types = "numeric", col_names = FALSE, sheet = "Database")) # Put into tidy format IndMeta2 <- rev(as.data.frame(t(IndMeta2))) # Assemble IndMeta IndMeta <- as.data.frame(cbind(IndMeta1[1:2], IndMeta2, aggcols)) colnames(IndMeta)[1:6] <- c("IndCode", "IndName", "GPupper", "GPlower", "Direction", "IndWeight") #----- Get AggMeta -----# # Read in aggmeta cols # this is the only diff with the older CT - framework rows are 1 further down if(oldtool){ AggMetaIn <- readxl::read_excel(fname, range = "C5:H53", col_names = TRUE, sheet = "Framework") } else { AggMetaIn <- readxl::read_excel(fname, range = "C4:H52", col_names = TRUE, sheet = "Framework") } # Delete empty rows AggMetaIn <- AggMetaIn[AggMetaIn$`Dimension/indicator` != "--", ] #dplyr::filter(AggMetaIn,.data$`Dimension/indicator`!="--") # Get rid of cols we don't want AggMetaIn <- as.data.frame(cbind(0, AggMetaIn[c(1,6,3)])) # Rename cols colnames(AggMetaIn) <- c("AgLevel", "Code", "Name", "Weight") # Put in correct levels levs <- c("sp.", "p.", "si.", "Index") # remove any unused level codes levs <- levs[!fakecols] levno <- 1:length(levs)+1 for(ii in 1:length(levno)){ AggMetaIn$AgLevel[startsWith(AggMetaIn$Code, levs[ii])] <- levno[ii] } AggMetaIn <- AggMetaIn[AggMetaIn$AgLevel != 0,] #----- Finish up -----# # generate indicator codes if asked if(makecodes){ IndMeta$IndCode <- names_to_codes(IndMeta$IndName) colnames(IndData)[3:ncol(IndData)] <- IndMeta$IndCode #AggMetaIn$Code <- names_to_codes(AggMetaIn$Name) } message(paste0("Imported ", ncol(ind_data_only), " indicators and ", nrow(ind_data_only), " units.")) # convert to new coin format (done this way to avoid rewriting the above code) COIN_to_coin(list(IndData = IndData, IndMeta = IndMeta, AggMeta = AggMetaIn), recover_dsets = FALSE, out2 = out2) } #' Generate short codes from long names #' #' Given a character vector of long names (probably with spaces), generates short codes. #' Intended for use when importing from the COIN Tool. #' #' This function replaces the now-defunct `names2Codes()` from COINr < v1.0. #' #' @param cvec A character vector of names #' @param maxword The maximum number of words to use in building a short name (default 2) #' @param maxlet The number of letters to take from each word (default 4) #' #' @examples #' # get names from example data #' iNames <- ASEM_iMeta$iName #' #' # convert to codes #' names_to_codes(iNames) #' #' @seealso #' * [import_coin_tool()] Import data from the COIN Tool (Excel). #' #' @return A corresponding character vector, but with short codes, and no duplicates. #' #' @export names_to_codes <- function(cvec, maxword=2, maxlet=4){ # There is definitely a better way to do this with lapply or similar, but for now... codes <- cvec for (ii in 1:length(cvec)){ cvecii <- cvec[ii] # first, split into separate elements using spaces, and remove words less than four chars st2 <- unlist(strsplit(gsub('\\b\\w{1,3}\\s','',cvecii), " +")) nwords <- min(c(length(st2),maxword)) # now take first 3 words, take first 4 chars of each word st3 <- substr(st2[1:nwords],start=1,stop=maxlet) # capitalise first letter of each word st3 <- gsub("\\b([[:lower:]])([[:lower:]]+)", "\\U\\1\\L\\2", st3, perl = TRUE) # collapse back to one string and add to new vector codes[ii] <- paste(st3, collapse = '') } # if we have any duplicates, make unique make.unique(codes, "_") }
/scratch/gouwar.j/cran-all/cranData/COINr/R/coinToolIn.R
#' Compare two coins #' #' Compares two coin class objects using a specified `iCode` (column of data) from specified data sets. #' #' This function replaces the now-defunct `compTable()` from COINr < v1.0. #' #' @param coin1 A coin class object #' @param coin2 A coin class object #' @param dset A data set that is found in `.$Data`. #' @param iCode The name of a column that is found in `.$Data[[dset]]`. #' @param also_get Optional metadata columns to attach to the table: see [get_data()]. #' @param compare_by Either `"ranks"` which produces a comparison using ranks, or else `"scores"`, which instead #' uses scores. Note that scores may be very different if the methodology is different from one coin to another, #' e.g. for different normalisation methods. #' @param sort_by Optionally, a column name of the output data frame to sort rows by. Can be either #' `"coin.1"`, `"coin.2"`, `"Diff"`, `"Abs.diff"` or possibly a column name imported using `also_get`. #' @param decreasing Argument to pass to [order()]: how to sort. #' #' @examples #' # build full example coin #' coin <- build_example_coin(quietly = TRUE) #' #' # copy coin #' coin2 <- coin #' #' # change to prank function (percentile ranks) #' # we don't need to specify any additional parameters (f_n_para) here #' coin2$Log$Normalise$global_specs <- list(f_n = "n_prank") #' #' # regenerate #' coin2 <- Regen(coin2) #' #' # compare index, sort by absolute rank difference #' compare_coins(coin, coin2, dset = "Aggregated", iCode = "Index", #' sort_by = "Abs.diff", decreasing = TRUE) #' #' @return A data frame of comparison information. #' #' @export compare_coins <- function(coin1, coin2, dset, iCode, also_get = NULL, compare_by = "ranks", sort_by = NULL, decreasing = FALSE){ # CHECKS ------------------------------------------------------------------ check_coin_input(coin1) check_coin_input(coin2) stopifnot(length(iCode) == 1, compare_by %in% c("ranks", "scores")) if(!is.null(also_get)){ if(also_get == "none"){ stop("also_get cannot be set to 'none': at a minimum uCodes are needed for matching.") } } # GET DATA AND MERGE ------------------------------------------------------ # get selected CODE plus ISO3 for matching df1 <- get_data(coin1, dset = dset, iCodes = iCode, also_get = also_get) df2 <- get_data(coin2, dset = dset, iCodes = iCode, also_get = also_get) # merge the two tables and convert to ranks if needed df12 <- merge(df1, df2, by = "uCode", all.x = TRUE, all.y = TRUE) if(compare_by == "ranks"){ df12 <- rank_df(df12) } # get meta columns for each coin - this needs to be done after merge because there could be some NAs m1_codes <- extract_iData(coin1, iData = df1, GET = "mCodes") m1_codes <- setdiff(m1_codes, "uCode") m2_codes <- extract_iData(coin2, iData = df2, GET = "mCodes") m2_codes <- setdiff(m2_codes, "uCode") if(!setequal(m1_codes, m2_codes)){ stop("Different metadata columns returned by coin1 and coin2.") } # sort out meta cols, if there are any if(length(m1_codes) > 0){ m1 <- df12[paste0(m1_codes, ".x")] m2 <- df12[paste0(m2_codes, ".y")] # replace any NAs from one df with the other # from here m1 is what we will put in the output table m1[is.na(m1)] <- m2[is.na(m1)] colnames(m1) <- gsub('.x', '', colnames(m1)) } # get iCode cols idat1 <- df12[[paste0(iCode,".x")]] idat2 <- df12[[paste0(iCode,".y")]] # OUTPUT ------------------------------------------------------------------ # assemble the table if(length(m1_codes) > 0){ dfout <- data.frame(uCode = df12$uCode, m1, # meta data excluding uCode coin.1 = idat1, coin.2 = idat2, Diff = idat1 - idat2, Abs.diff = abs(idat1 - idat2)) } else { dfout <- data.frame(uCode = df12$uCode, coin.1 = idat1, coin.2 = idat2, Diff = idat1 - idat2, Abs.diff = abs(idat1 - idat2)) } # SORT if(is.null(sort_by)){ dfout } else { if(sort_by %nin% names(dfout)){ stop("sort_by not recognised...") } dfout[order(dfout[[sort_by]], decreasing = decreasing), ] } } #' Compare multiple coins #' #' Given multiple coins as a list, generates a rank comparison of a single indicator or aggregate which is specified #' by the `dset` and `iCode` arguments (passed to [get_data()]). The indicator or aggregate targeted must be available #' in all the coins in `coins`. #' #' By default, the ranks of the target indicator/aggregate of each coin will be merged using the `uCode`s within each coin. #' Optionally, specifying `also_get` (passed to [get_data()]) will additionally merge using the metadata columns. #' This means that coins must share the same metadata columns that are returned as a result of `also_get`. #' #' This function replaces the now-defunct `compTableMulti()` from COINr < v1.0. #' #' @param coins A list of coins. If names are provided, these will be used in the tables returned by this function. #' @param tabtype The type of table to generate. One of: #' * `"Values"`: returns a data frame of rank values for each coin provided, plus ISO3 column #' * `"Diffs"`: returns a data frame of rank differences between the base coin and each other coin (see `ibase`) #' * `"AbsDiffs"`: as `"Diffs"` but absolute rank differences are returned #' * `"All"`: returns all of the three previous rank tables, as a list of data frames #' @param ibase The index of the coin to use as a base comparison (default first coin in list) #' @param sort_table If TRUE, sorts by the base COIN (`ibase`) (default). #' @param dset The name of a data set found in `.$Data`. See [get_data()]. #' @param iCode A column name of the data set targeted by `dset`. See [get_data()]. #' @param also_get Optional metadata columns to attach to the table: see [get_data()]. If this is not specified, the #' results of each coin will be merged using the `uCode`s within each coin. If this is specified, results will be #' merged additionally using the metadata columns. This means that coins must share the same metadata columns that #' are returned as a result of `also_get`. #' @param compare_by Either `"ranks"` which produces a comparison using ranks, or else `"scores"`, which instead #' uses scores. Note that scores may be very different if the methodology is different from one coin to another, #' e.g. for different normalisation methods. #' #' @examples #' # see vignette("adjustments") #' #' @return Data frame unless `tabtype = "All"`, in which case a list of three data frames is returned. #' #' @export #' compare_coins_multi <- function(coins, dset, iCode, also_get = NULL, tabtype = "Values", ibase = 1, sort_table = TRUE, compare_by = "ranks"){ tabtypes <- c("Values", "Diffs", "AbsDiffs", "All") # if all tabtypes are requested, recursively call the function if(tabtype == "All"){ tablist <- lapply(setdiff(tabtypes, "All"), function(x){ compare_coins_multi(coins, dset = dset, iCode = iCode, also_get = also_get, tabtype = x, ibase = ibase, sort_table = sort_table, compare_by = compare_by)}) names(tablist) <- setdiff(tabtypes, "All") return(tablist) } # Prep -------------------------------------------------------------------- # checks if(any(!sapply(coins, is.coin))){ stop("One or more coins are not coin class objects.") } if(tabtype %nin% tabtypes){ stop("tabtype must be one of c('Values', 'Diffs', 'AbsDiffs', 'All') ") } stopifnot(ibase %in% 1:length(coins), is.logical(sort_table), compare_by %in% c("ranks", "scores")) if(!is.null(also_get)){ if(also_get == "none"){ stop("also_get cannot be set to 'none': at a minimum uCodes are needed for matching.") } } # names if (is.null(names(coins))){ names(coins) <- paste0("coin.", 1:length(coins)) } # change order of list: put ibase first coins <- coins[c(ibase, setdiff(1:length(coins), ibase))] # Assemble df of scores --------------------------------------------------- # get base df_CODE <- get_data(coins[[1]], dset = dset, iCodes = iCode, also_get = also_get) # this func checks uCode is present df_check <- function(dfi){ if("uCode" %nin% names(dfi)){ stop("Expected column 'uCode' not found in extracted data frame.") } } # check df_check(df_CODE) # get metadata column names m1_codes <- extract_iData(coins[[1]], iData = df_CODE, GET = "mCodes") # rename to avoid name duplication warnings names(df_CODE)[names(df_CODE) %nin% m1_codes] <- "v1" # func to merge other coins onto this table merge_dfs <- function(coin){ # get new data df1 <- get_data(coin, dset = dset, iCodes = iCode, also_get = also_get) # check df_check(df1) # get meta column names m2_codes <- extract_iData(coin, iData = df1, GET = "mCodes") if(!setequal(m1_codes, m2_codes)){ stop("Different metadata columns returned by coins: cannot merge.") } # rename to avoid name duplication warnings names(df1)[names(df1) %nin% m2_codes] <- paste0("v", ncol(df_CODE)) # merge df_CODE <<- merge(df_CODE, df1, by = m2_codes, all.x = TRUE) } # apply merge function to remaining coins lapply(coins[-1], merge_dfs) # rename to list names names(df_CODE)[which(names(df_CODE) == "v1"):ncol(df_CODE)] <- names(coins) # Convert to output format ------------------------------------------------ # first convert to ranks if needed (exclude metadata cols) if(compare_by == "ranks"){ df_CODE[names(df_CODE) %nin% m1_codes] <- rank_df(df_CODE[names(df_CODE) %nin% m1_codes]) } if (tabtype != "Values"){ # DIFFS: subtract each col from the first (numeric) col df_CODE[names(coins)] <- df_CODE[names(coins)] - data.frame(rep(df_CODE[names(coins)[1]], length(names(coins))) ) } if (tabtype == "AbsDiffs"){ df_CODE[names(coins)] <- abs(df_CODE[names(coins)]) } # sort if(sort_table){ df_CODE <- df_CODE[order(df_CODE[[names(coins)[1]]]),] } df_CODE }
/scratch/gouwar.j/cran-all/cranData/COINr/R/compare.R
#' Convert a COIN to a coin #' #' Converts an older COIN class to the newer coin class. Note that there are some limitations to this. First, #' the function arguments used to create the COIN will not be passed to the coin, since the function arguments #' are different. This means that any data sets beyond "Raw" cannot be regenerated. The second limitation is #' that anything from the `.$Analysis` folder will not be passed on. #' #' This function works by building the `iData` and `iMeta` arguments to `new_coin()`, using information from #' the COIN. It then uses these to build a coin if `out2 = "coin"` or else outputs both data frames in a list. #' #' If `recover_dsets = TRUE`, any data sets found in `COIN$Data` (except "Raw") will also be put in `coin$Data`, #' in the correct format. These can be used to inspect the data but not to regenerate. #' #' Note that if you want to exclude any indicators, you will have to set `out2 = "list"` and build the coin #' in a separate step with `exclude` specified. Any exclusions/inclusions from the COIN are not passed on #' automatically. #' #' @param COIN A COIN class object, generated by COINr version <= 0.6.1, OR a list containing IndData, IndMeta and #' AggMeta entries. #' @param recover_dsets Logical: if `TRUE`, will recover data sets other than "Raw" which are found in the #' `.$Data` list. #' @param out2 If `"coin"` (default) outputs a coin, else if `"list"`, outputs a list with #' `iData` and `iMeta` entries. This may be useful if you want to make further edits before building the coin. #' #' @return A coin class object if `out2 = "coin"`, else a list of data frames if `out2 = "list"`. #' @export #' #' @examples #' # see vignette("other_functions") #' COIN_to_coin <- function(COIN, recover_dsets = FALSE, out2 = "coin"){ # Get dfs ----------------------------------------------------------------- if(!inherits(COIN, "COIN")){ intype <- "list" if(!is.list(COIN)){ stop("input must be either a COIN (i.e. generated from COINr version <= 0.6.1.) or a list.") } if(is.null(COIN$IndData) | is.null(COIN$IndMeta) | is.null(COIN$AggMeta)){ stop("One or more of IndData, IndMeta and AggMeta not found in input list.") } # input data IndData <- COIN$IndData # get indicator metadata IndMeta <- COIN$IndMeta # aggmeta AggMeta <- COIN$AggMeta } else { intype <- "COIN" # input data IndData <- COIN$Input$Original$IndData # get indicator metadata IndMeta <- COIN$Input$Original$IndMeta # aggmeta AggMeta <- COIN$Input$Original$AggMeta } stopifnot(is.data.frame(IndData), is.data.frame(IndMeta), is.data.frame(AggMeta)) # iData ------------------------------------------------------------------- # make a copy IndData2 <- IndData # rename special columns names(IndData2)[names(IndData2) == "UnitName"] <- "uName" names(IndData2)[names(IndData2) == "UnitCode"] <- "uCode" names(IndData2)[names(IndData2) == "Year"] <- "Time" iData <- IndData2 if(recover_dsets & intype == "COIN"){ # get names of dsets dsets <- names(COIN$Data) # exclude "Raw" dsets <- setdiff(dsets, "Raw") if(length(dsets) > 0){ recovered_dsets <- lapply(dsets, function(dset){ x <- COIN$Data[[dset]] x <- x[c("UnitCode", COIN$Input$IndMeta$IndCode)] names(x)[1] <- "uCode" as.data.frame(x) }) names(recovered_dsets) <- dsets } else { message("No data sets other than Raw were found.") } } # iMeta ------------------------------------------------------------------- if(any(IndMeta$IndCode %nin% colnames(IndData))){ stop("One or more IndCodes from IndMeta not found in original IndData.") } # rename special columns names(IndMeta)[names(IndMeta) == "IndName"] <- "iName" names(IndMeta)[names(IndMeta) == "IndCode"] <- "iCode" names(IndMeta)[names(IndMeta) == "IndWeight"] <- "Weight" names(IndMeta)[names(IndMeta) == "IndUnit"] <- "Unit" # get aggregation cols lineage <- IndMeta[c("iCode", colnames(IndMeta)[ startsWith(colnames(IndMeta), "Agg") ])] # remove aggregation cols IndMeta <- IndMeta[ colnames(IndMeta)[!startsWith(colnames(IndMeta), "Agg")] ] # add parent for indicators IndMeta$Parent <- lineage[[2]] # add level for indicators IndMeta$Level <- 1 # add type IndMeta$Type <- "Indicator" # now we have to add higher aggregation levels... # rename special columns names(AggMeta)[names(AggMeta) == "AgLevel"] <- "Level" names(AggMeta)[names(AggMeta) == "Code"] <- "iCode" names(AggMeta)[names(AggMeta) == "Name"] <- "iName" # add type AggMeta$Type <- "Aggregate" # add direction AggMeta$Direction <- 1 # add Parent AggMeta$Parent <- sapply(AggMeta$iCode, function(Code){ # col index of current level icol <- AggMeta$Level[AggMeta$iCode == Code] # codes at current level l1 <- lineage[[icol]] if(icol == ncol(lineage)){ NA } else { # codes at parent level lp <- lineage[[icol + 1]] unique(lp[l1 == Code]) } }) # Now join to IndMeta iMeta <- rbind_fill(IndMeta, AggMeta) # Add any other cols from IndData # groups groupcols <- colnames(IndData)[startsWith(colnames(IndData), "Group_")] if(length(groupcols) > 0){ iMeta <- rbind_fill(iMeta, data.frame(iCode = groupcols, iName = groupcols, Type = "Group")) } # denominators dencols <- colnames(IndData)[startsWith(colnames(IndData), "Den_")] if(length(groupcols) > 0){ iMeta <- rbind_fill(iMeta, data.frame(iCode = dencols, iName = dencols, Type = "Denominator")) } # others othercols <- colnames(IndData)[startsWith(colnames(IndData), "x_")] if(length(othercols) > 0){ iMeta <- rbind_fill(iMeta, data.frame(iCode = othercols, iName = othercols, Type = "Other")) } # OUTPUT ------------------------------------------------------------------ if(out2 == "coin"){ coin <- new_coin(iData, iMeta) if(exists("recovered_dsets")){ coin$Data <- c(coin$Data, recovered_dsets) coin$Log$can_regen <- FALSE coin$Log$message <- "This coin contains data sets recovered from a COIN. Regeneration has been disabled." } coin } else if (out2 == "list"){ list(iData = iData, iMeta = iMeta) } }
/scratch/gouwar.j/cran-all/cranData/COINr/R/convert.R
#' Correlations between indicators and denominators #' #' Get a data frame containing any correlations between indicators and denominators that exceed a given #' threshold. This can be useful when *whether* to denominate an indicator and *by what* may not be obvious. #' If an indicator is strongly correlated with a denominator, this may suggest to denominate it by that #' denominator. #' #' @param coin A coin class object. #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param cor_thresh A correlation threshold: the absolute value of any correlations between indicator-denominator pairs above this #' threshold will be flagged. #' @param cortype The type of correlation: to be passed to the `method` argument of `stats::cor`. #' @param nround Optional number of decimal places to round correlation values to. Default 2, set `NULL` to #' disable. #' @param use_directions Logical: if `TRUE` the extracted data is adjusted using directions found inside the coin (i.e. the "Direction" #' column input in `iMeta`. See comments on this argument in [get_corr()]. #' #' @return A data frame of pairwise correlations that exceed the threshold. #' @export #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # get correlations >0.7 of any indicator with denominators #' get_denom_corr(coin, dset = "Raw", cor_thresh = 0.7) #' get_denom_corr <- function(coin, dset, cor_thresh = 0.6, cortype = "pearson", nround = 2, use_directions = FALSE){ # indicator data # get everything at this point to ensure matching rows iData <- get_dset(coin, dset = dset, also_get = "all") if(use_directions){ iData <- directionalise(iData, coin) } # iMeta iMeta <- coin$Meta$Ind # only the indicator data iData_ <- iData[iMeta$iCode[iMeta$Type == "Indicator"]] # only the denoms den_codes <- iMeta$iCode[iMeta$Type == "Denominator"] if(length(den_codes) == 0){ stop("No denominators found. Denominators should be labelled as 'Denominator' in iMeta.") } if(any(den_codes %nin% names(iData))){ stop("Denominators not found - they are present in iMeta but not found in selected data set.") } denoms <- iData[den_codes] # GET CORRS --------------------------------------------------------------- # get correlations corr_ind <- stats::cor(iData_, denoms, method = cortype, use = "pairwise.complete.obs") # make long crtable <- lengthen(corr_ind) # FIND HI CORRS ----------------------------------------------------------- # remove self-correlations crtable <- crtable[crtable$V1 != crtable$V2, ] # remove NAs crtable <- crtable[!is.na(crtable$Value), ] # now filter to only high or low correlations crtable <- crtable[abs(crtable$Value) > cor_thresh, ] # CLEAN AND OUTPUT -------------------------------------------------------- # col names colnames(crtable) <- c("Ind", "Denom", "Corr") # round if(!is.null(nround)){ crtable$Corr <- round(crtable$Corr, nround) } # sort crtable <- crtable[order(crtable$Ind),] crtable } #' Find highly-correlated indicators within groups #' #' This returns a data frame of any highly correlated indicators within the same aggregation group. The level of the aggregation #' grouping can be controlled by the `grouplev` argument. #' #' This function is motivated by the idea that having very highly-correlated indicators within the same group may #' amount to double counting, or possibly redundancy in the framework. #' #' This function replaces the now-defunct `hicorrSP()` from COINr < v1.0. #' #' @param coin A coin class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param cor_thresh A threshold to flag high correlation. Default 0.9. #' @param grouplev The level to group indicators in. E.g. if `grouplev = 2` it will look for high correlations between indicators that #' belong to the same group in Level 2. #' @param cortype The type of correlation, either `"pearson"` (default), `"spearman"` or `"kendall"`. See [stats::cor]. #' @param roundto Number of decimal places to round correlations to. Default 3. Set `NULL` to disable rounding. #' @param thresh_type Either `"high"`, which will only flag correlations *above* `cor_thresh`, or `"low"`, #' which will only flag correlations *below* `cor_thresh`. #' @param use_directions Logical: if `TRUE` the extracted data is adjusted using directions found inside the coin (i.e. the "Direction" #' column input in `iMeta`. See comments on this argument in [get_corr()]. #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "Normalise", quietly = TRUE) #' #' # get correlations between indicator over 0.75 within level 2 groups #' get_corr_flags(coin, dset = "Normalised", cor_thresh = 0.75, #' thresh_type = "high", grouplev = 2) #' #' @return A data frame with one entry for every indicator pair that is highly correlated within the same group, at the specified level. #' Pairs are only reported once, i.e. only uses the upper triangle of the correlation matrix. #' #' @export get_corr_flags <- function(coin, dset, cor_thresh = 0.9, thresh_type = "high", cortype = "pearson", grouplev = NULL, roundto = 3, use_directions = FALSE){ # CHECKS AND DEFAULTS ----------------------------------------------------- grouplev <- set_default(grouplev, 2) stopifnot(thresh_type %in% c("high", "low")) if(grouplev > coin$Meta$maxlev){ stop("grouplev is greater than the maximum level of ", coin$Meta$maxlev) } stopifnot(cor_thresh <= 1, cor_thresh >= -1) iData <- get_dset(coin, dset, also_get = "none") if(use_directions){ iData <- directionalise(iData, coin) } # GET CORRS --------------------------------------------------------------- # get correlations corr_ind <- stats::cor(iData, method = cortype, use = "pairwise.complete.obs") # make long crmat_melt <- lengthen(corr_ind) # FIND HI CORRS ----------------------------------------------------------- # get index structure lin <- coin$Meta$Lineage # select cols corresponding to what is being correlated against what lin <- unique(lin[c(1,grouplev)]) # get parent group of each of V1 and V2 crtable <- merge(crmat_melt, lin, by.x = "V1", by.y = colnames(lin)[1]) colnames(crtable)[ncol(crtable)] <- "P1" crtable <- merge(crtable, lin, by.x = "V2", by.y = colnames(lin)[1]) colnames(crtable)[ncol(crtable)] <- "P2" # filter to only include entries from the same group crtable <- crtable[crtable$P1 == crtable$P2 ,] # remove self-correlations crtable <- crtable[crtable$V1 != crtable$V2, ] # remove NAs crtable <- crtable[!is.na(crtable$Value), ] # now filter to only high or low correlations if(thresh_type == "high"){ crtable <- crtable[crtable$Value > cor_thresh, ] } else { crtable <- crtable[crtable$Value < cor_thresh, ] } # CLEAN AND OUTPUT -------------------------------------------------------- # col names colnames(crtable) <- c("Ind1", "Ind2", "Corr", "Group", "P2") if(!is.null(roundto)){ crtable$Corr <- round(crtable$Corr, roundto) } df_out <- crtable[c("Group", "Ind1", "Ind2", "Corr")] remove_duplicate_corrs(df_out, c("Ind1", "Ind2")) } #' Get correlations #' #' Helper function for getting correlations between indicators and aggregates. This retrieves subsets of correlation #' matrices between different aggregation levels, in different formats. By default, it will return a #' long-form data frame, unless `make_long = FALSE`. By default, any correlations with a p-value less than 0.05 are #' replaced with `NA`. See `pval` argument to adjust this. #' #' This function allows you to obtain correlations between any subset of indicators or aggregates, from #' any data set present in a coin. Indicator selection is performed using [get_data()]. Two different #' indicator sets can be correlated against each other by specifying `iCodes` and `Levels` as vectors. #' #' The correlation type can be specified by the `cortype` argument, which is passed to [stats::cor()]. #' #' The `withparent` argument will optionally only return correlations which correspond to the structure #' of the index. For example, if `Levels = c(1,2)` (i.e. we wish to correlate indicators from Level 1 with #' aggregates from Level 2), and we set `withparent = TRUE`, only the correlations between each indicator #' and its parent group will be returned (not correlations between indicators and other aggregates to which #' it does not belong). This can be useful to check whether correlations of an indicator/aggregate with #' any of its parent groups exceeds or falls below thresholds. #' #' Similarly, the `grouplev` argument can be used to restrict correlations to within groups corresponding #' to the index structure. Setting e.g. `grouplev = 2` will only return correlations within the groups #' defined at Level 2. #' #' The `grouplev` and `withparent` options are disabled if `make_long = FALSE`. #' #' Note that this function can only call correlations within the same data set (i.e. only one data set in `.$Data`). #' #' This function replaces the now-defunct `getCorr()` from COINr < v1.0. #' #' @param coin A coin class coin object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param iCodes An optional list of character vectors where the first entry specifies the indicator/aggregate #' codes to correlate against the second entry (also a specification of indicator/aggregate codes). If this is specified as a character vector #' it will coerced to the first entry of a list, i.e. `list(iCodes)`. #' @param Levels The aggregation levels to take the two groups of indicators from. See [get_data()] for details. #' Defaults to indicator level. #' @param ... Further arguments to be passed to [get_data()] (`uCodes` and `use_group`). #' @param cortype The type of correlation to calculate, either `"pearson"`, `"spearman"`, or `"kendall"`. #' @param withparent If `TRUE`, and `aglev[1] != aglev[2]`, will only return correlations of each row with its parent. Alternatively, if #' `withparent = "family"`, will return correlations with parents, grandparents etc, up to the highest level. In both cases the data set #' must be aggregated for this to work. #' @param grouplev The aggregation level to group correlations by if `aglev[1] == aglev[2]`. Requires that #' `make_long = TRUE`. #' @param pval The significance level for including correlations. Correlations with \eqn{p > pval} will be returned as `NA`. #' Default 0.05. Set to 0 to disable this. #' @param make_long Logical: if `TRUE`, returns correlations in long format (default), else if `FALSE` #' returns in wide format. Note that if wide format is requested, features specified by `grouplev` #' and `withparent` are not supported. #' @param use_directions Logical: if `TRUE` the extracted data is adjusted using directions found inside the coin (i.e. the "Direction" #' column input in `iMeta`: any indicators with negative direction will have their values multiplied by -1 which will reverse the #' direction of correlation). This should only be set to `TRUE` if the data set has not yet been normalised. For example, this can be #' useful to set to `TRUE` to analyse correlations in the raw data, but would make no sense to analyse correlations in the normalised #' data because that already has the direction adjusted! So you would reverse direction twice. In other words, use this at your #' discretion. #' #' @importFrom stats cor #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # get correlations #' cmat <- get_corr(coin, dset = "Raw", iCodes = list("Environ"), #' Levels = 1, make_long = FALSE) #' #' @return A data frame of pairwise correlation values in wide or long format (see `make_long`). #' Correlations with \eqn{p > pval} will be returned as `NA`. #' #' @seealso #' * [plot_corr()] Plot correlation matrices of indicator subsets #' #' @export get_corr <- function(coin, dset, iCodes = NULL, Levels = NULL, ..., cortype = "pearson", pval = 0.05, withparent = FALSE, grouplev = NULL, make_long = TRUE, use_directions = FALSE){ # CHECKS ------------------------------------------------------------------ check_coin_input(coin) # DEFAULTS ---------------------------------------------------------------- # set Levels, repeat iCodes etc if only one input if(is.null(Levels)){Levels <- 1} if(is.null(iCodes)){iCodes <- list(NULL)} if(!is.list(iCodes)){ stopifnot(is.character(iCodes)) iCodes <- as.list(iCodes) } if (length(iCodes) == 1){ iCodes = rep(iCodes, 2) } if (length(Levels) == 1){ Levels = rep(Levels, 2) } if (Levels[2] > Levels [1]){ Levels <- rev(Levels) iCodes <- rev(iCodes) } # catch when two different groups in same level: here we can't show groupings if (!setequal(iCodes[[1]], iCodes[[2]])){ if (Levels[1] == Levels[2]) { grouplev <- NULL } } # GET FAM (RECURSIVE) ----------------------------------------------------- if(!is.logical(withparent)){ stopifnot(is.character(withparent), length(withparent)==1) if(withparent != "family"){ stop("withparent not recognised - should be either logical or 'family'.") } # ignore second iCode and Level in this case iCodes[[2]] <- iCodes[[1]] Levels[2] <- Levels[1] lin <- coin$Meta$Lineage if(ncol(lin) <= Levels[1]){ stop("If withparent = 'family', Levels[1] cannot be the top level.") } par_levs <- (Levels[1] + 1) : ncol(lin) cr_fam <- lapply(par_levs, function(lev){ cmat <- get_corr(coin, dset = dset, iCodes = iCodes, Levels = c(Levels[1], lev), cortype = cortype, pval = pval, withparent = TRUE, grouplev = grouplev, make_long = TRUE, ... = ...) # rename to level cmat[1] <- names(lin)[lev] cmat }) cr_fam <- Reduce(rbind, cr_fam) return(cr_fam) } # GET DATA ---------------------------------------------------------------- # get data sets iData1 <- get_data(coin, dset = dset, iCodes = iCodes[[1]], Level = Levels[[1]], ..., also_get = "none") iData2 <- get_data(coin, dset = dset, iCodes = iCodes[[2]], Level = Levels[[2]], ..., also_get = "none") # Adjust directions ------------------------------------------------------- if(use_directions){ iData1 <- directionalise(iData1, coin) iData2 <- directionalise(iData2, coin) } # GET CORRELATIONS -------------------------------------------------------- # get corr matrix crmat <- stats::cor(iData1, iData2, use = "pairwise.complete.obs", method = cortype) # set insignificant correlations to zero if requested if(pval > 0){ # p values p_ind <- get_pvals(cbind(iData1, iData2), method = cortype) # relevant part of the matrix p_ind2 <- p_ind[1:ncol(iData1), ((ncol(iData1)+1):ncol(p_ind))] # set elements of crmat to zero, where correlations are below significance level # when plotted, these will be white, so invisible crmat[p_ind2 > pval] <- NA } crmat_melt <- lengthen(crmat) # remove rows with NAs #crmat_melt <- crmat_melt[!is.na(crmat_melt$value),] #- PARENTS ------------------------------------- if (withparent & (ncol(crmat)>1) & Levels[1]!=Levels[2]){ # get index structure lin <- coin$Meta$Lineage # select cols corresponding to what is being correlated against what lin <- unique(lin[Levels]) # rename corr matrix cols to prepare for join colnames(crmat_melt) <- c(colnames(lin), "Correlation") # now merge - we are matching correlation rows that agree with the structure of the index # essentially, we subset the rows of crmat_melt to only include the ones that agree with lin crtable <- merge(lin, crmat_melt, by = colnames(lin)) # rename cols for plotting colnames(crtable)[1:2] <- c("Var1", "Var2") } else { crtable <- crmat_melt colnames(crtable) <- c("Var1", "Var2", "Correlation") } ##- GROUP ---------------------------------------- # If correlating against the same level, only show within groups if asked if(!is.null(grouplev)){ if(!make_long){ warning("Grouping is not supported for make_long = FALSE. Set make_long = TRUE to group.") } else { if (Levels[1] == Levels[2]){ # get index structure lin <- coin$Meta$Lineage if(grouplev <= Levels[1]){ stop("grouplev must be at least the aggregation level above Levels.") } if(grouplev > ncol(lin)){ stop("Grouping level is out of range - should be between 2 and ", ncol(lin), " or zero to turn off.") } # select cols corresponding to current level, plus the one above # remember here we are correlating a level against itself, so Levels[1]==Levels[2] lin <- lin[c(Levels[1], grouplev)] # get unique groups in level above lev2 <- unique(lin[[2]]) # initialise df for recording entries of corr matrix to keep keeprows <- data.frame(Var1 = NA, Var2 = NA) # loop over the levels above for (lev2ii in lev2){ # get child codes lev1 <- lin[lin[2]==lev2ii, 1] lev1 <- unique(unlist(lev1, use.names = FALSE)) # otherwise it is still a tibble, also remove dups # get all 2-way combos of these codes lev1pairs <- expand.grid(lev1, lev1) # add to df keeprows <- rbind(keeprows, lev1pairs) } # remove first row (dummy) keeprows <- keeprows[-1,] # rename corr matrix cols to prepare for join colnames(crmat_melt)[3] <- "Correlation" colnames(keeprows) <- colnames(crmat_melt)[1:2] # now do inner join - we are matching correlation rows that agree with the structure of the index crtable <- merge(keeprows, crmat_melt, by = colnames(keeprows)) # sometimes this throws duplicates, so remove crtable <- unique(crtable) } } } colnames(crtable) <- c("Var1", "Var2", "Correlation") # widen or not if(!make_long){ crtable <- widen(crtable) } crtable } #' P-values for correlations in a data frame or matrix #' #' This is a stripped down version of the "cor.mtest()" function from the "corrplot" package. It uses #' the [stats::cor.test()] function to calculate pairwise p-values. Unlike the corrplot version, this #' only calculates p-values, and not confidence intervals. Credit to corrplot for this code, I only #' replicate it here to avoid depending on their package for a single function. #' #' @param X A numeric matrix or data frame #' @param \dots Additional arguments passed to function [cor.test()], e.g. \code{conf.level = 0.95}. #' #' @importFrom stats cor.test #' #' @examples #' # a matrix of random numbers, 3 cols #' x <- matrix(runif(30), 10, 3) #' #' # get correlations between cols #' cor(x) #' #' # get p values of correlations between cols #' get_pvals(x) #' #' @return Matrix of p-values #' @export get_pvals = function(X, ...) { # convert to matrix, get number cols X = as.matrix(X) n = ncol(X) # prep matrix for p values p.X <- matrix(NA, n, n) diag(p.X) = 0 # populate matrix for (i in 1:(n - 1)) { for (j in (i + 1):n) { # get p val for pair # catch possibility of all NAs in one or both vectors if(all(is.na(X[,i])) | all(is.na(X[,j]))){ p.X[i, j] <- p.X[j, i] <- NA } else { tmp = stats::cor.test(x = X[, i], y = X[, j], ...) p.X[i, j] = p.X[j, i] = tmp$p.value } } } # rename cols colnames(p.X) = rownames(p.X) = colnames(X) # output p.X } #' Cronbach's alpha #' #' Calculates Cronbach's alpha, a measure of statistical reliability. Cronbach's alpha is a simple measure #' of "consistency" of a data set, where a high value implies higher reliability/consistency. The #' selection of indicators via [get_data()] allows to calculate the measure on any group of #' indicators or aggregates. #' #' This function simply returns Cronbach's alpha. If you want a lot more details on reliability, the 'psych' package has #' a much more detailed analysis. #' #' This function replaces the now-defunct `getCronbach()` from COINr < v1.0. #' #' @param coin A coin or a data frame containing only numerical columns of data. #' @param ... Further arguments passed to [get_data()], other than those explicitly specified here. #' @param use Argument to pass to [stats::cor] to calculate the covariance matrix. Default `"pairwise.complete.obs"`. #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param iCodes Indicator codes to retrieve. If `NULL` (default), returns all iCodes found in #' the selected data set. See [get_data()]. #' @param Level The level in the hierarchy to extract data from. See [get_data()]. #' #' @importFrom stats cov #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # Cronbach's alpha for the "P2P" group #' get_cronbach(coin, dset = "Raw", iCodes = "P2P", Level = 1) #' #' @return Cronbach alpha as a numerical value. #' #' @export get_cronbach <- function(coin, dset, iCodes, Level, ..., use = "pairwise.complete.obs"){ # get data iData <- get_data(coin, dset = dset, iCodes = iCodes, Level = Level, ...) # get only indicator cols iData <- extract_iData(coin, iData, "iData_") # get number of variables k = ncol(iData) # get covariance matrix cvtrix <- stats::cov(iData, use = use) # sum of all elements of cov matrix sigall <- sum(cvtrix, na.rm = TRUE) # mean of all elements except diagonal sigav <- (sigall - sum(diag(cvtrix), na.rm = TRUE))/(k*(k-1)) # calculate Cronbach alpha (k^2 * sigav)/sigall }
/scratch/gouwar.j/cran-all/cranData/COINr/R/correlations.R
#' ASEM raw panel data #' #' This is an artificially-generated set of panel data (multiple observations of indicators over time) that #' is included to build the example "purse" class, i.e. to build composite indicators over time. This will #' eventually be replaced with a better example, i.e. a real data set. #' #' This data set is in the new v1.0 format. #' #' @format A data frame with 255 rows and 60 variables. #' #' @source \url{https://composite-indicators.jrc.ec.europa.eu/asem-sustainable-connectivity/repository} "ASEM_iData_p" #' ASEM raw indicator data #' #' A data set containing raw values of indicators for 51 countries, groups and denominators. See the ASEM Portal #' for further information and detailed description of each indicator. See also `vignette("coins")` for the format #' of this data. #' #' This data set is in the new v1.0 format. #' #' @format A data frame with 51 rows and 60 variables. #' #' @source \url{https://composite-indicators.jrc.ec.europa.eu/asem-sustainable-connectivity/repository} "ASEM_iData" #' ASEM indicator metadata #' #' This contains all metadata for ASEM indicators, including names, weights, directions, etc. See the ASEM Portal #' for further information and detailed description of each indicator. #' See also `vignette("coins")` for the format #' of this data. #' #' This data set is in the new v1.0 format. #' #' @format A data frame with 68 rows and 9 variables #' #' @source \url{https://bluefoxr.github.io/COINrDoc/coins-the-currency-of-coinr.html#aggregation-metadata} "ASEM_iMeta" #' World denomination data #' #' A small selection of common denominator indicators, which includes GDP, Population, Area, GDP per capita #' and income group. All data sourced from the World Bank as of Feb 2021 (data is typically from 2019). Note that this is #' intended as example data, and it would be a good idea to use updated data from the World Bank when needed. In this #' data set, country names have been altered slightly so as to include no accents - this is simply to make it more #' portable between distributions. #' #' @format A data frame with 249 rows and 7 variables. #' #' @source \url{https://data.worldbank.org/} "WorldDenoms" #' ASEM COIN (COINr < v1.0) #' #' This is an "old format" "COIN" object which is stored for testing purposes. #' It is generated using the COINr6 package (only available on GitHub) using #' `COINr6::build_ASEM()` #' #' @format A "COIN" class object #' #' @source \url{https://github.com/bluefoxr/COINr6} "ASEM_COIN"
/scratch/gouwar.j/cran-all/cranData/COINr/R/data.R
# # redirects for defunct functions # # defunct_message <- function(){ # message("COINr syntax and functionality has significantly changed. See vignette('v1') for details.") # } # # COINrX_message <- function(){ # stop("This function has been removed from the main COINr package but is now available in COINrX. # See vignette('v1') for details.", call. = FALSE) # } # # # BoxCox # # # # @param ... deprecated parameter # # @export # BoxCox <- function(...){ # defunct_message() # .Defunct("boxcox") # } # # #' COINToolIn # #' # #' @param ... deprecated parameter # #' @export # COINToolIn <- function(...){ # defunct_message() # .Defunct("import_coin_tool") # } # # #' assemble # #' # #' @param ... deprecated parameter # #' @export # assemble <- function(...){ # defunct_message() # .Defunct("new_coin") # } # # #' build_ASEM # #' # #' @param ... deprecated parameter # #' @export # build_ASEM <- function(...){ # defunct_message() # .Defunct("build_example_coin") # } # # #' checkData # #' # #' @param ... deprecated parameter # #' @export # checkData <- function(...){ # defunct_message() # .Defunct("Screen") # } # # #' coin2Excel # #' # #' @param ... deprecated parameter # #' @export # coin2Excel <- function(...){ # defunct_message() # .Defunct("export_to_excel") # } # # #' coin_win # #' # #' @param ... deprecated parameter # #' @export # coin_win <- function(...){ # defunct_message() # .Defunct("winsorise") # } # # #' colourTable # #' # #' @param ... deprecated parameter # #' @export # colourTable <- function(...){ # COINrX_message() # } # # #' compTable # #' # #' @param ... deprecated parameter # #' @export # compTable <- function(...){ # defunct_message() # .Defunct("compare_coins") # } # # #' compTableMulti # #' # #' @param ... deprecated parameter # #' @export # compTableMulti <- function(...){ # defunct_message() # .Defunct("compare_coins_multi") # } # # #' compareDF # #' # #' @param ... deprecated parameter # #' @export # compareDF <- function(...){ # defunct_message() # .Defunct("compare_df") # } # # #' copeland # #' # #' @param ... deprecated parameter # #' @export # copeland <- function(...){ # defunct_message() # .Defunct("a_copeland") # } # # #' corrweightscat # #' # #' @param ... deprecated parameter # #' @export # corrweightscat <- function(...){ # COINrX_message() # } # # # denominate # # # # @param ... deprecated parameter # # @export # denominate <- function(...){ # defunct_message() # .Defunct("Denominate") # } # # #' effectiveWeight # #' # #' @param ... deprecated parameter # #' @export # effectiveWeight <- function(...){ # defunct_message() # .Defunct("get_eff_weights") # } # # #' extractYear # #' # #' @param ... deprecated parameter # #' @export # extractYear <- function(...){ # defunct_message() # message("This function has been incorporated inside of new_coin.") # .Defunct("new_coin") # } # # #' geoMean # #' # #' @param ... deprecated parameter # #' @export # geoMean <- function(...){ # defunct_message() # .Defunct("a_gmean") # } # # #' geoMean_rescaled # #' # #' @param ... deprecated parameter # #' @export # geoMean_rescaled <- function(...){ # defunct_message() # .Defunct("a_gmean") # } # # #' getCorr # #' # #' @param ... deprecated parameter # #' @export # getCorr <- function(...){ # defunct_message() # .Defunct("get_corr") # } # # #' getCronbach # #' # #' @param ... deprecated parameter # #' @export # getCronbach <- function(...){ # defunct_message() # .Defunct("get_cronbach") # } # # #' getIn # #' # #' @param ... deprecated parameter # #' @export # getIn <- function(...){ # defunct_message() # .Defunct("get_data") # } # # #' getPCA # #' # #' @param ... deprecated parameter # #' @export # getPCA <- function(...){ # defunct_message() # .Defunct("get_PCA") # } # # #' getResults # #' # #' @param ... deprecated parameter # #' @export # getResults <- function(...){ # defunct_message() # .Defunct("get_results") # } # # #' getStats # #' # #' @param ... deprecated parameter # #' @export # getStats <- function(...){ # defunct_message() # .Defunct("get_stats") # } # # #' getStrengthNWeak # #' # #' @param ... deprecated parameter # #' @export # getStrengthNWeak <- function(...){ # defunct_message() # .Defunct("get_str_weak") # } # # #' getUnitReport # #' # #' @param ... deprecated parameter # #' @export # getUnitReport <- function(...){ # COINrX_message() # } # # #' getUnitSummary # #' # #' @param ... deprecated parameter # #' @export # getUnitSummary <- function(...){ # defunct_message() # .Defunct("get_unit_summary") # } # # #' harMean # #' # #' @param ... deprecated parameter # #' @export # harMean <- function(...){ # defunct_message() # .Defunct("a_hmean") # } # # #' hicorrSP # #' # #' @param ... deprecated parameter # #' @export # hicorrSP <- function(...){ # defunct_message() # .Defunct("get_corr_flags") # } # # # impute # # # # @param ... deprecated parameter # # @export # impute <- function(...){ # defunct_message() # .Defunct("Impute") # } # # #' indChange # #' # #' @param ... deprecated parameter # #' @export # indChange <- function(...){ # defunct_message() # .Defunct("change_ind") # } # # #' indDash # #' # #' @param ... deprecated parameter # #' @export # indDash <- function(...){ # COINrX_message() # } # # #' iplotBar # #' # #' @param ... deprecated parameter # #' @export # iplotBar <- function(...){ # COINrX_message() # } # #' iplotCorr # #' # #' @param ... deprecated parameter # #' @export # iplotCorr <- function(...){ # COINrX_message() # } # # #' iplotIndDist # #' # #' @param ... deprecated parameter # #' @export # iplotIndDist <- function(...){ # COINrX_message() # } # # #' iplotIndDist2 # #' # #' @param ... deprecated parameter # #' @export # iplotIndDist2 <- function(...){ # COINrX_message() # } # # #' iplotMap # #' # #' @param ... deprecated parameter # #' @export # iplotMap <- function(...){ # COINrX_message() # } # # #' iplotRadar # #' # #' @param ... deprecated parameter # #' @export # iplotRadar <- function(...){ # COINrX_message() # } # # #' iplotTable # #' # #' @param ... deprecated parameter # #' @export # iplotTable <- function(...){ # COINrX_message() # } # # #' loggish # #' # #' @param ... deprecated parameter # #' @export # loggish <- function(...){ # defunct_message() # message("This function has been split into multiple functions.") # .Defunct("Treat") # } # # #' names2Codes # #' # #' @param ... deprecated parameter # #' @export # names2Codes <- function(...){ # defunct_message() # .Defunct("names_to_codes") # } # # #' noisyWeights # #' # #' @param ... deprecated parameter # #' @export # noisyWeights <- function(...){ # defunct_message() # .Defunct("get_noisy_weights") # } # # # normalise # # # # @param ... deprecated parameter # # @export # normalise <- function(...){ # defunct_message() # .Defunct("Normalise") # } # # #' plotCorr # #' # #' @param ... deprecated parameter # #' @export # plotCorr <- function(...){ # defunct_message() # .Defunct("plot_corr") # } # # #' plotIndDist # #' # #' @param ... deprecated parameter # #' @export # plotIndDist <- function(...){ # defunct_message() # .Defunct("plot_dist") # } # # #' plotIndDot # #' # #' @param ... deprecated parameter # #' @export # plotIndDot <- function(...){ # defunct_message() # .Defunct("plot_dot") # } # # #' plotSA # #' # #' @param ... deprecated parameter # #' @export # plotSA <- function(...){ # defunct_message() # .Defunct("plot_sensitivity") # } # # #' plotSARanks # #' # #' @param ... deprecated parameter # #' @export # plotSARanks <- function(...){ # defunct_message() # .Defunct("plot_uncertainty") # } # # #' plotframework # #' # #' @param ... deprecated parameter # #' @export # plotframework <- function(...){ # defunct_message() # .Defunct("plot_framework") # } # # #' rankDF # #' # #' @param ... deprecated parameter # #' @export # rankDF <- function(...){ # defunct_message() # .Defunct("rank_df") # } # # # assemble # # # # @param ... deprecated parameter # # @export # regen <- function(...){ # defunct_message() # .Defunct("Regen") # } # # #' removeElements # #' # #' @param ... deprecated parameter # #' @export # removeElements <- function(...){ # defunct_message() # .Defunct("remove_elements") # } # # #' replaceDF # #' # #' @param ... deprecated parameter # #' @export # replaceDF <- function(...){ # defunct_message() # .Defunct("replace_df") # } # # #' resultsDash # #' # #' @param ... deprecated parameter # #' @export # resultsDash <- function(...){ # COINrX_message() # } # # #' rew8r # #' # #' @param ... deprecated parameter # #' @export # rew8r <- function(...){ # COINrX_message() # } # # #' roundDF # #' # #' @param ... deprecated parameter # #' @export # roundDF <- function(...){ # defunct_message() # .Defunct("round_df") # } # # #' sensitivity # #' # #' @param ... deprecated parameter # #' @export # sensitivity <- function(...){ # defunct_message() # .Defunct("get_sensitivity") # } # # # treat # # # # @param ... deprecated parameter # # @export # treat <- function(...){ # defunct_message() # .Defunct("Treat") # } # # #' weightOpt # #' # #' @param ... deprecated parameter # #' @export # weightOpt <- function(...){ # defunct_message() # .Defunct("get_opt_weights") # }
/scratch/gouwar.j/cran-all/cranData/COINr/R/defunct.R
# DENOMINATION TOOLS #' Denominate a data set within a purse. #' #' This works in almost exactly the same way as [Denominate.coin()]. The only point of care is that the #' `denoms` argument here cannot take time-indexed data, but only a single value for each unit. It is #' therefore recommended to pass the time-dependent denominator data as part of `iData` when calling #' [new_coin()]. In this way, denominators can vary with time. See `vignette("denomination")`. #' #' @param x A purse class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param denoms An optional data frame of denominator data. Columns should be denominator data, with column names corresponding #' to entries in `denomby`. This must also include an ID column identified by `denoms_ID` to match rows. If `denoms` #' is not specified, will extract any potential denominator columns that were attached to `iData` when calling [new_coin()]. #' @param denomby Optional data frame which specifies which denominators to use for each indicator, and any scaling factors #' to apply. Should have columns `iCode`, `Denominator`, `ScaleFactor`. `iCode` specifies an indicator code found in `dset`, #' `Denominator` specifies a column name from `denoms` to use to denominate the corresponding column from `x`. #' `ScaleFactor` allows the possibility to scale #' denominators if needed, and specifies a factor to multiply the resulting values by. For example, if GDP is a denominator and is measured in #' dollars, dividing will create very small numbers (order 1e-10 and smaller) which could cause problems with numerical precision. If `denomby` #' is not specified, specifications will be taken from the "Denominator" column in `iMeta`, if it exists. #' @param denoms_ID An ID column for matching `denoms` with the data to be denominated. This column should contain #' uMeta codes to match with the data set extracted from the coin. #' @param f_denom A function which takes two numeric vector arguments and is used to perform the denomination for each #' column. By default, this is division, i.e. `x[[col]]/denoms[[col]]` for given columns, but any function can be passed #' that takes two numeric vectors as inputs and returns a single numeric vector. See details. #' @param write_to If specified, writes the aggregated data to `.$Data[[write_to]]`. Default `write_to = "Denominated"`. #' @param ... arguments passed to or from other methods. #' #' @return An updated purse #' @export #' #' @examples #' # build example purse #' purse <- build_example_purse(up_to = "new_coin", quietly = TRUE) #' #' # denominate using data/specs already included in coin #' purse <- Denominate(purse, dset = "Raw") #' #' Denominate.purse <- function(x, dset, denoms = NULL, denomby = NULL, denoms_ID = NULL, f_denom = NULL, write_to = NULL, ...){ # input check check_purse(x) # apply denomination to each coin x$coin <- lapply(x$coin, function(coin){ Denominate.coin(coin, dset = dset, denoms = denoms, denomby = denomby, denoms_ID = denoms_ID, f_denom = f_denom, write_to = write_to, out2 = "coin") }) # make sure still purse class class(x) <- c("purse", "data.frame") x } #' Denominate data set in a coin #' #' "Denominates" or "scales" indicators by other variables. Typically this is done by dividing extensive variables such as #' GDP by a scaling variable such as population, to give an intensive variable (GDP per capita). #' #' This function denominates a data set `dset` inside the coin. By default, denominating variables are taken from #' the coin, specifically as variables in `iData` with `Type = "Denominator"` in `iMeta` (input to [new_coin()]). #' Specifications to map denominators to indicators are also taken by default from `iMeta$Denominator`, if it exists. #' #' These specifications can be overridden using the `denoms` and `denomby` arguments. The operator for denomination #' can also be changed using the `f_denom` argument. #' #' See also documentation for [Denominate.data.frame()] which is called by this method. #' #' @param x A coin class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param denoms An optional data frame of denominator data. Columns should be denominator data, with column names corresponding #' to entries in `denomby`. This must also include an ID column identified by `denoms_ID` to match rows. If `denoms` #' is not specified, will extract any potential denominator columns that were attached to `iData` when calling [new_coin()]. #' @param denomby Optional data frame which specifies which denominators to use for each indicator, and any scaling factors #' to apply. Should have columns `iCode`, `Denominator`, `ScaleFactor`. `iCode` specifies an indicator code found in `dset`, #' `Denominator` specifies a column name from `denoms` to use to denominate the corresponding column from `x`. #' `ScaleFactor` allows the possibility to scale #' denominators if needed, and specifies a factor to multiply the resulting values by. For example, if GDP is a denominator and is measured in #' dollars, dividing will create very small numbers (order 1e-10 and smaller) which could cause problems with numerical precision. If `denomby` #' is not specified, specifications will be taken from the "Denominator" column in `iMeta`, if it exists. #' @param denoms_ID An ID column for matching `denoms` with the data to be denominated. This column should contain #' `uMeta` codes to match with the data set extracted from the coin. #' @param f_denom A function which takes two numeric vector arguments and is used to perform the denomination for each #' column. By default, this is division, i.e. `x[[col]]/denoms[[col]]` for given columns, but any function can be passed #' that takes two numeric vectors as inputs and returns a single numeric vector. See details. #' @param write_to If specified, writes the aggregated data to `.$Data[[write_to]]`. Default `write_to = "Denominated"`. #' @param out2 Either `"coin"` (default) to return updated coin or `"df"` to output the aggregated data set. #' @param ... arguments passed to or from other methods #' #' @return An updated coin if `out2 = "coin"`, else a data frame of denominated data if `out2 = "df"`. #' @export #' #' @examples #' # build example coin #' coin <- build_example_coin(up_to = "new_coin", quietly = TRUE) #' #' # denominate (here, we only need to say which dset to use, takes #' # specs and denominators from within the coin) #' coin <- Denominate(coin, dset = "Raw") #' Denominate.coin <- function(x, dset, denoms = NULL, denomby = NULL, denoms_ID = NULL, f_denom = NULL, write_to = NULL, out2 = "coin", ...){ # WRITE LOG --------------------------------------------------------------- coin <- write_log(x, dont_write = "x") # GET DSET, CHECKS -------------------------------------------------------- iData <- get_dset(coin, dset) # DEFAULTS ---------------------------------------------------------------- if(is.null(denoms)){ denoms <- coin$Meta$Unit } if(is.null(denomby)){ if("Denominator" %nin% colnames(coin$Meta$Ind)){ stop("No Denominator column found in iMeta. Please supply denomby argument.") } denomby <- coin$Meta$Ind[c("iCode", "Denominator")] denomby <- denomby[!is.na(denomby$Denominator), ] } # DENOMINATE -------------------------------------------------------------- iData_d <- Denominate(iData, denoms = denoms, denomby = denomby, denoms_ID = denoms_ID, f_denom = f_denom) # Output ------------------------------------------------------------------ # output list if(out2 == "df"){ iData_d } else { if(is.null(write_to)){ write_to <- "Denominated" } write_dset(coin, iData_d, dset = write_to) } } #' Denominate data sets by other variables #' #' "Denominates" or "scales" variables by other variables. Typically this is done by dividing extensive variables such as #' GDP by a scaling variable such as population, to give an intensive variable (GDP per capita). #' #' A data frame `x` is denominated by variables found in another data frame `denoms`, according to specifications in #' `denomby`. `denomby` specifies which columns in `x` are to be denominated, and by which columns in `denoms`, and #' any scaling factors to apply to each denomination. #' #' Both `x` and `denomby` must contain an ID column which matches the rows of `x` to `denomby`. If not specified, this #' is assumed to be `uCode`, but can also be specified using the `x_ID` and `denoms_ID` arguments. All entries in #' `x[[x_ID]]` must be present in `denoms[[denoms_ID]]`, although extra rows are allowed in `denoms`. This is because #' the rows of `x` are matched to the rows of `denoms` using these ID columns, to ensure that units (rows) are correctly #' denominated. #' #' By default, columns of `x` are divided by columns of `denoms`. This can be generalised by setting `f_denom` to another #' function which takes two numeric vector arguments. I.e. setting `denoms = ``*`` ` will multiply columns of `x` and #' denoms together. #' #' @param x A data frame of data to be denominated. Columns to be denominated must be numeric, but any columns not #' specified in `denomby` will be ignored. `x` must also contain an ID column specified by `x_ID` to match rows with #' `denoms`. #' @param denoms A data frame of denominator data. Columns should be denominator data, with column names corresponding #' to entries in `denomby`. This must also include an ID column identified by `denoms_ID` to match rows. #' @param denomby A data frame which specifies which denominators to use for each indicator, and any scaling factors #' to apply. Should have columns `iCode`, `Denominator`, `ScaleFactor`. `iCode` specifies a column name from `x`, #' `Denominator` specifies a column name from `denoms` to use to denominate the corresponding column from `x`. #' `ScaleFactor` allows the possibility to scale #' denominators if needed, and specifies a factor to multiply the resulting values by. For example, if GDP is a denominator and is measured in #' dollars, dividing will create very small numbers (order 1e-10 and smaller) which could cause problems with numerical precision. #' @param x_ID A column name of `x` to use to match rows with `denoms`. Default is `"uCode"`. #' @param denoms_ID A column name of `denoms` to use to match rows with `x`. Default is `"uCode"`. #' @param f_denom A function which takes two numeric vector arguments and is used to perform the denomination for each #' column. By default, this is division, i.e. `x[[col]]/denoms[[col]]` for given columns, but any function can be passed #' that takes two numeric vectors as inputs and returns a single numeric vector. See details. #' @param ... arguments passed to or from other methods. #' #' @examples #' # Get a sample of indicator data (note must be indicators plus a "UnitCode" column) #' iData <- ASEM_iData[c("uCode", "Goods", "Flights", "LPI")] #' # Also get some denominator data #' denoms <- ASEM_iData[c("uCode", "GDP", "Population")] #' # specify how to denominate #' denomby <- data.frame(iCode = c("Goods", "Flights"), #' Denominator = c("GDP", "Population"), #' ScaleFactor = c(1, 1000)) #' # Denominate one by the other #' iData_den <- Denominate(iData, denoms, denomby) #' #' @return A data frame of the same size as `x`, with any specified columns denominated according to specifications. #' #' @seealso #' * [WorldDenoms] A data set of some common national-level denominators. #' #' @export Denominate.data.frame <- function(x, denoms, denomby, x_ID = NULL, denoms_ID = NULL, f_denom = NULL, ...){ # CHECKS ------------------------------------------------------------------ # denoms stopifnot(is.data.frame(denoms)) # denomby stopifnot(is.data.frame(denomby)) required_cols <- c("iCode", "Denominator") lapply(required_cols, function(col){ if(is.null(denomby[[col]])){ stop("Required column not found in denomby: ", col) } }) stopifnot(is.character(denomby$iCode), is.character(denomby$Denominator)) if(!is.null(denomby$ScaleFactor)){ stopifnot(is.numeric(denomby$ScaleFactor)) } else { denomby$ScaleFactor <- 1 } # denom_ID if(is.null(denoms_ID)){ denoms_ID <- "uCode" } stopifnot(denoms_ID %in% colnames(denoms)) # denom_ID if(is.null(x_ID)){ x_ID <- "uCode" } stopifnot(x_ID %in% colnames(x)) # cross checks if(any(denomby$iCode %nin% names(x))){ stop("One or more iCode entries in denomby not found in x.") } if(any(!sapply(x[denomby$iCode], is.numeric))){ stop("One or more columns in x referred to by denomby$iCode is not numeric.") } if(any(denomby$Denominator %nin% names(denoms))){ stop("One or more Denominator entries in denomby not found in denoms.") } if(any(!sapply(denoms[denomby$Denominator], is.numeric))){ stop("One or more columns in denoms referred to by denomby$Denominator is not numeric.") } # check all uCodes in x are found in denoms if(any(x[[x_ID]] %nin% denoms[[denoms_ID]])){ stop("One or more ID codes in x not found in denoms.") } if(is.null(f_denom)){ f_denom <- `/` } # DENOMINATE -------------------------------------------------------------- # first prep denoms to match the rows of x denoms_matched <- denoms[match(x[[x_ID]], denoms[[denoms_ID]]) ,] denom_col <- function(iCode){ # col from x xcol <- x[[iCode]] # scaled denominator column denom_code <- denomby$Denominator[denomby$iCode == iCode] dcol <- denoms_matched[[denom_code]] / denomby$ScaleFactor[denomby$iCode == iCode] # denominate do.call(f_denom, list(xcol, dcol)) } # run function (denominate) x_denomcols <- as.data.frame(sapply(denomby$iCode, denom_col)) # subst back in x[colnames(x_denomcols)] <- x_denomcols x } #' Denominate data #' #' "Denominates" or "scales" variables by other variables. Typically this is done by dividing extensive variables such as #' GDP by a scaling variable such as population, to give an intensive variable (GDP per capita). #' #' See documentation for individual methods: #' #' * [Denominate.data.frame()] #' * [Denominate.coin()] #' * [Denominate.purse()]. #' #' This function replaces the now-defunct `denominate()` from COINr < v1.0. #' #' @param x Object to be denominated #' @param ... arguments passed to or from other methods #' #' @return See individual method documentation #' #' @examples #' # See individual method documentation #' #' @export Denominate <- function (x, ...){ UseMethod("Denominate") }
/scratch/gouwar.j/cran-all/cranData/COINr/R/denominate.R
# example generation #' Build ASEM example coin #' #' Shortcut function to build the ASEM example coin, using inbuilt example data. This can be useful for testing and also #' for building reproducible examples. To see the underlying commands run `edit(build_example_coin)`. See also #' `vignette("coins")`. #' #' This function replaces the now-defunct `build_ASEM()` from COINr < v1.0. #' #' @param up_to The point up to which to build the index. If `NULL`, builds full index. Else specify a building function #' (as a string) - the index will be built up to and including this function. This option is mainly for helping with #' function examples. Example: `up_to = "Normalise"`. #' @param quietly If `TRUE`, suppresses all messages. #' #' @examples #' # build example coin up to data treatment step #' coin <- build_example_coin(up_to = "Treat") #' coin #' #' @return coin class object #' #' @export build_example_coin <- function(up_to = NULL, quietly = FALSE){ if(quietly){ coin <- suppressMessages(build_example_coin(up_to = up_to, quietly = FALSE)) return(coin) } if(!is.null(up_to)){ stopifnot(is.character(up_to), length(up_to)==1) if(up_to %nin% c("new_coin", "Denominate", "Impute", "Screen", "Treat", "Normalise", "Aggregate")){ stop("up_to must be the name of a building function such as 'Treat', 'new_coin', etc, or NULL to build the full example coin.") } } else { up_to = "theend" } # INITIALISE coin <- new_coin(COINr::ASEM_iData, COINr::ASEM_iMeta, level_names = c("Indicator", "Pillar", "Sub-index", "Index")) if(up_to == "new_coin"){ return(coin) } # DENOMINATE coin <- Denominate(coin, dset = "Raw") if(up_to == "Denominate"){ return(coin) } # IMPUTE coin <- Impute(coin, dset = "Denominated", f_i = "i_mean_grp", use_group = "EurAsia_group") if(up_to == "Impute"){ return(coin) } # SCREEN economies based on data availability rules coin <- Screen(coin, dset = "Imputed", dat_thresh = 0.9, unit_screen = "byNA") if(up_to == "Screen"){ return(coin) } # TREAT data # Explicitly set winmax so that it is easy to find for SA coin <- Treat(coin, dset = "Screened", global_specs = list(f1_para = list(winmax = 5))) if(up_to == "Treat"){ return(coin) } # NORMALISE data # explicitly set normalisation specs to find for SA coin <- Normalise(coin, dset = "Treated", global_specs = list(f_n = "n_minmax", f_n_para = list(c(0,100)))) if(up_to == "Normalise"){ return(coin) } # AGGREGATE data coin <- Aggregate(coin, dset = "Normalised", f_ag = "a_amean") if(up_to == "Aggregate"){ return(coin) } coin } #' Build example purse #' #' Shortcut function to build an example purse. This is currently an "artificial" example, in that it takes the ASEM data set #' used in [build_example_coin()] and replicates it for five years, adding artificial noise to simulate year-on-year variation. #' This was done simply to demonstrate the functionality of purses, and will at some point be replaced with a real example. #' See also `vignette("coins")`. #' #' @param up_to The point up to which to build the index. If `NULL`, builds full index. Else specify a `build_*` function #' (as a string) - the index will be built up to and including this function. This option is mainly for helping with #' function examples. Example: `up_to = "build_normalise"`. #' @param quietly If `TRUE`, suppresses all messages. #' #' @examples #' # build example purse up to unit screening step #' purse <- build_example_purse(up_to = "Screen") #' purse #' #' @return purse class object #' #' @export build_example_purse <- function(up_to = NULL, quietly = FALSE){ if(quietly){ purse <- suppressMessages(build_example_purse(up_to = up_to, quietly = FALSE)) return(purse) } if(!is.null(up_to)){ stopifnot(is.character(up_to), length(up_to)==1) if(up_to %nin% c("new_coin", "Screen", "Treat", "Normalise", "Aggregate")){ stop("up_to must be one of 'new_coin', 'Screen', 'Treat', 'Normalise', 'Aggregate' or NULL") } } else { up_to = "theend" } # INITIALISE purse <- new_coin(COINr::ASEM_iData_p, COINr::ASEM_iMeta, level_names = c("Indicator", "Pillar", "Sub-index", "Index"), split_to = "all") if(up_to == "new_coin"){ return(purse) } # SCREEN purse <- Screen(purse, dset = "Raw", dat_thresh = 0.9, unit_screen = "byNA") if(up_to == "Screen"){ return(purse) } # TREAT data purse <- Treat(purse, dset = "Screened") if(up_to == "Treat"){ return(purse) } # NORMALISE data purse <- Normalise(purse, dset = "Treated", global = TRUE) if(up_to == "Normalise"){ return(purse) } # AGGREGATE data purse <- Aggregate(purse, dset = "Normalised") if(up_to == "Aggregate"){ return(purse) } purse }
/scratch/gouwar.j/cran-all/cranData/COINr/R/examples.R
#' Export a coin to Excel #' #' Exports the contents of the coin to Excel. This writes all data frames inside the coin to Excel, with each data #' frame on a separate tab. Tabs are named according to the position in the coin object. You can write other #' data frames by simply attaching them to the coin object somewhere. #' #' @param x A coin class object #' @param fname The file name/path to write to, as a character string #' @param include_log Logical: if `TRUE`, also writes data frames from the `.$Log` list inside the coin. #' @param ... arguments passed to or from other methods. #' #' @importFrom openxlsx write.xlsx #' #' @examples #' ## Here we write a COIN to Excel, but this is done to a temporary directory #' ## to avoid "polluting" the working directory when running automatic tests. #' ## In a real case, set fname to a directory of your choice. #' #' # build example coin up to data treatment step #' coin <- build_example_coin(up_to = "Treat") #' #' # write to Excel in temporary directory #' export_to_excel(coin, fname = paste0(tempdir(), "\\ASEM_results.xlsx")) #' #' # spreadsheet is at: #' print(paste0(tempdir(), "\\ASEM_results.xlsx")) #' #' # now delete temporary file to keep things tidy in testing #' unlink(paste0(tempdir(),"\\ASEM_results.xlsx")) #' #' @return .xlsx file at specified path #' #' @export export_to_excel.coin <- function(x, fname = "coin_export.xlsx", include_log = FALSE, ...){ check_coin_input(x) # function to stop tab names exceeding 31 characters, avoiding errors. trunc_str <- function(x){ if(nchar(x) > 31){ xnew <- substr(x, 1, 31) warning("Truncated tab name (", x, ") because exceeds Excel length limit (31 characters).") } else { xnew <- x } xnew } if(!include_log){ x$Log <- NULL } # recursive func to get all dfs in coin into a single list unlist_2_df <- function(x) { if (is.data.frame(x)) return(list(x)) if (!is.list(x)) return(NULL) unlist(lapply(x, unlist_2_df), FALSE) } # unlist and alter any names that are too long coinwrite <- unlist_2_df(x) names(coinwrite) <- sapply(names(coinwrite), trunc_str) # write to excel openxlsx::write.xlsx(coinwrite, file = fname, colNames = TRUE) } #' Export a purse to Excel #' #' Exports the contents of the purse to Excel. This is similar to the coin method [export_to_excel.coin()], #' but combines data sets from various time points. It also selectively writes metadata since this may be #' spread across multiple coins. #' #' @param x A purse class object #' @param fname The file name/path to write to, as a character string #' @param include_log Logical: if `TRUE`, also writes data frames from the `.$Log` list inside the coin. #' @param ... arguments passed to or from other methods. #' #' @importFrom openxlsx write.xlsx #' #' @examples #' # #' #' @return .xlsx file at specified path #' #' @export export_to_excel.purse <- function(x, fname = "coin_export.xlsx", include_log = FALSE, ...){ # Prep -------------------------------------------------------------------- check_purse_input(x) # function to stop tab names exceeding 31 characters, avoiding errors. trunc_str <- function(x){ if(nchar(x) > 31){ xnew <- substr(x, 1, 31) warning("Truncated tab name (", x, ") because exceeds Excel length limit (31 characters).") } else { xnew <- x } xnew } # Extract coin ------------------------------------------------------------ # For the purse method, we take the first coin and modify it: # - merge dsets over time # - small other tweaks # - then write the coin # Extract first coin in the purse coin <- x$coin[[1]] # get names of data sets first dset_names <- names(coin$Data) # get data sets with all metadata attached dsets <- lapply(dset_names, function(dset){ get_dset(x, dset, also_get = "all") }) names(dsets) <- dset_names # replace data list with full dsets over time coin$Data <- dsets # I have to reconstruct the full uMeta because coins have different numbers of units icodes <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type %in% c("Indicator", "Aggregate")] uMeta <- unique(dsets[[1]][!names(dsets[[1]]) %in% c(icodes, "Time")]) coin$Meta$Unit <- uMeta # remove log if necessary if(!include_log){ coin$Log <- NULL } # Assemble into list ------------------------------------------------------ # recursive func to get all dfs in coin into a single list unlist_2_df <- function(x) { if (is.data.frame(x)) return(list(x)) if (!is.list(x)) return(NULL) unlist(lapply(x, unlist_2_df), FALSE) } # unlist and alter any names that are too long coinwrite <- unlist_2_df(coin) names(coinwrite) <- sapply(names(coinwrite), trunc_str) # write to excel openxlsx::write.xlsx(coinwrite, file = fname, colNames = TRUE) } #' Export a coin or purse to Excel #' #' Writes coins and purses to Excel. See individual method #' documentation: #' #' This function replaces the now-defunct `coin2Excel()` from COINr < v1.0. #' #' * [export_to_excel.coin()] #' * [export_to_excel.purse()] #' #' @param x A coin or purse #' @param fname The file name to write to #' @param ... Arguments passed to/from methods #' #' @examples #' # see individual method documentation #' #' @return An Excel spreadsheet. #' #' @export export_to_excel <- function(x, fname, ...){ UseMethod("export_to_excel") }
/scratch/gouwar.j/cran-all/cranData/COINr/R/export.R
# IMPUTATION #' Impute data sets in a purse #' #' This function imputes the target data set `dset` in each coin using the imputation function `f_i`. This is performed #' in the same way as the coin method [Impute.coin()], but with one "special case" for panel data. If `f_i = "impute_panel`, #' the data sets inside the purse are imputed using the last available data point, using the [impute_panel()] #' function. In this case, coins are not imputed individually, but treated as a single data set. In this #' case, optionally set `f_i_para = list(max_time = .)` where `.` should be substituted with the maximum #' number of time points to search backwards for a non-`NA` value. See [impute_panel()] for more details. #' No further arguments need to be passed to [impute_panel()]. See `vignette("imputation")` for more #' details. See also [Impute.coin()] documentation. #' #' @param x A purse object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param f_i An imputation function. For the "purse" class, if `f_i = "impute_panel` this is a special #' case: see details. #' @param f_i_para Further arguments to pass to `f_i`, other than `x`. See details. #' @param impute_by Specifies how to impute: if `"column"`, passes each column (indicator) separately as a numerical #' vector to `f_i`; if `"row"`, passes each *row* separately; and if `"df"` passes the entire data set (data frame) to #' `f_i`. The function called by `f_i` should be compatible with the type of data passed to it. #' @param group_level A level of the framework to use for grouping indicators. This is only #' relevant if `impute_by = "row"` or `"df"`. In that case, indicators will be split into their groups at the #' level specified by `group_level`, and imputation will be performed across rows of the group, rather #' than the whole data set. This can make more sense because indicators within a group are likely to be #' more similar. #' @param use_group Optional grouping variable name to pass to imputation function if this supports group #' imputation. #' @param normalise_first Logical: if `TRUE`, each column is normalised using a min-max operation before #' imputation. By default this is `FALSE` unless `impute_by = "row"`. See details. #' @param write_to Optional character string for naming the resulting data set in each coin. Data will be written to #' `.$Data[[write_to]]`. Default is `write_to == "Imputed"`. #' @param ... arguments passed to or from other methods. #' #' @return An updated purse with imputed data sets added to each coin. #' @export #' #' @examples #' # see vignette("imputation") Impute.purse <- function(x, dset, f_i = NULL, f_i_para = NULL, impute_by = "column", group_level = NULL, use_group = NULL, normalise_first = NULL, write_to = NULL, ...){ # input check check_purse(x) if(!is.null(f_i)){ if(f_i == "impute_panel"){ # this is a special case iDatas <- get_dset(x, dset) # impute l_imp <- impute_panel(iDatas, max_time = f_i_para$max_time) # extract imputed data iDatas_i <- l_imp$iData_imp # split by Time iDatas_i_l <- split(iDatas_i, iDatas$Time) # now write dsets to coins x$coin <- lapply(x$coin, function(coin){ # get Time tt <- coin$Meta$Unit$Time[[1]] if(is.null(tt)){ stop("Time index is NULL or not found in writing imputed data set to coin.") } if(is.null(write_to)){ write_to <- "Imputed" } # isolate data from the time point iData_write <- iDatas_i_l[[which(names(iDatas_i_l) == tt)]] # remove Time column iData_write <- iData_write[names(iData_write) != "Time"] # write dset first coin <- write_dset(coin, iData_write, dset = write_to) # also write to log - we signal that coin can't be regenerated any more coin$Log$can_regen <- FALSE coin$Log$message <- "Coin was imputed inside a purse with using panel imputation. Cannot be regenerated." coin }) } else { # apply unit screening to each coin x$coin <- lapply(x$coin, function(coin){ Impute.coin(coin, dset = dset, f_i = f_i, f_i_para = f_i_para, impute_by = impute_by, group_level = group_level, use_group = use_group, normalise_first = normalise_first, out2 = "coin", write_to = write_to)}) } } else { # apply unit screening to each coin x$coin <- lapply(x$coin, function(coin){ Impute.coin(coin, dset = dset, f_i = f_i, f_i_para = f_i_para, impute_by = impute_by, group_level = group_level, use_group = use_group, normalise_first = normalise_first, out2 = "coin", write_to = write_to) }) } # make sure still purse class class(x) <- c("purse", "data.frame") x } #' Impute a data set in a coin #' #' This imputes any `NA`s in the data set specified by `dset` #' by invoking the function `f_i` and any optional arguments `f_i_para` on each column at a time (if #' `impute_by = "column"`), or on each row at a time (if `impute_by = "row"`), or by passing the entire #' data frame to `f_i` if `impute_by = "df"`. #' #' Clearly, the function `f_i` needs to be able to accept with the data class passed to it - if #' `impute_by` is `"row"` or `"column"` this will be a numeric vector, or if `"df"` it will be a data #' frame. Moreover, this function should return a vector or data frame identical to the vector/data frame passed to #' it except for `NA` values, which can be replaced. The function `f_i` is not required to replace *all* `NA` #' values. #' #' When imputing row-wise, prior normalisation of the data is recommended. This is because imputation #' will use e.g. the mean of the unit values over all indicators (columns). If the indicators are on #' very different scales, the result will likely make no sense. If the indicators are normalised first, #' more sensible results can be obtained. There are two options to pre-normalise: first is by setting #' `normalise_first = TRUE` - this is anyway the default if `impute_by = "row"`. In this case, you also #' need to supply a vector of directions. The data will then be normalised using a min-max approach #' before imputation, followed by the inverse operation to return the data to the original scales. #' #' Another approach which gives more control is to simply run [Normalise()] first, and work with the #' normalised data from that point onwards. In that case it is better to set `normalise_first = FALSE`, #' since by default if `impute_by = "row"` it will be set to `TRUE`. #' #' Checks are made on the format of the data returned by imputation functions, to ensure the #' type and that non-`NA` values have not been inadvertently altered. This latter check is allowed #' a degree of tolerance for numerical precision, controlled by the `sfigs` argument. This is because #' if the data frame is normalised, and/or depending on the imputation function, there may be a very #' small differences. By default `sfigs = 9`, meaning that the non-`NA` values pre and post-imputation #' are compared to 9 significant figures. #' #' See also documentation for [Impute.data.frame()] and [Impute.numeric()] which are called by this function. #' #' @param x A coin class object #' @param dset The name of the data set to apply the function to, which should be accessible in `.$Data`. #' @param f_i An imputation function. See details. #' @param f_i_para Further arguments to pass to `f_i`, other than `x`. See details. #' @param impute_by Specifies how to impute: if `"column"`, passes each column (indicator) separately as a numerical #' vector to `f_i`; if `"row"`, passes each *row* separately; and if `"df"` passes the entire data set (data frame) to #' `f_i`. The function called by `f_i` should be compatible with the type of data passed to it. #' @param use_group Optional grouping variable name to pass to imputation function if this supports group #' imputation. #' @param group_level A level of the framework to use for grouping indicators. This is only #' relevant if `impute_by = "row"` or `"df"`. In that case, indicators will be split into their groups at the #' level specified by `group_level`, and imputation will be performed across rows of the group, rather #' than the whole data set. This can make more sense because indicators within a group are likely to be #' more similar. #' @param normalise_first Logical: if `TRUE`, each column is normalised using a min-max operation before #' imputation. By default this is `FALSE` unless `impute_by = "row"`. See details. #' @param out2 Either `"coin"` to return normalised data set back to the coin, or `df` to simply return a data #' frame. #' @param write_to Optional character string for naming the data set in the coin. Data will be written to #' `.$Data[[write_to]]`. Default is `write_to == "Imputed"`. #' @param disable Logical: if `TRUE` will disable imputation completely and write the unaltered data set. This option is mainly useful #' in sensitivity and uncertainty analysis (to test the effect of turning imputation on/off). #' @param ... arguments passed to or from other methods. #' #' @return An updated coin with imputed data set at `.$Data[[write_to]]` #' @export #' #' @examples #' #' # build coin #' coin <- build_example_coin(up_to = "new_coin") #' #' # impute raw data set using population groups #' # output to data frame directly #' Impute(coin, dset = "Raw", f_i = "i_mean_grp", #' use_group = "Pop_group", out2 = "df") #' Impute.coin <- function(x, dset, f_i = NULL, f_i_para = NULL, impute_by = "column", use_group = NULL, group_level = NULL, normalise_first = NULL, out2 = "coin", write_to = NULL, disable = FALSE, ...){ # WRITE LOG --------------------------------------------------------------- coin <- write_log(x, dont_write = "x") # potentially skip all imputation stopifnot(is.logical(disable)) if(disable){ idata <- get_dset(coin, dset = dset) # output list if(out2 == "df"){ return(idata) } else { if(is.null(write_to)){ write_to <- "Imputed" } return(write_dset(coin, idata, dset = write_to)) } } # GET DSET, DEFAULTS ------------------------------------------------------ iData <- get_dset(coin, dset, use_group) iData_ <- iData[colnames(iData) %nin% c("uCode", use_group)] # if normalise_first not specified set TRUE if rowwise or FALSE for anything else if(is.null(normalise_first)){ if(impute_by == "row"){ normalise_first <- TRUE } else { normalise_first <- FALSE } } # get directions if needed: these are the directions in iMeta if(!is.null(normalise_first)){ directions <- coin$Meta$Ind[c("iCode", "Direction")] } # add grouping as parameter if required if(!is.null(use_group)){ if(is.null(f_i_para)){ f_i_para <- list(f = iData[[use_group]]) } else { f_i_para[["f"]] <- iData[[use_group]] } } # IMPUTE DATA ---------------------------------------------------------- # first, we may need to split if(is.null(group_level)){ directions <- directions$Direction[match(colnames(iData_), directions$iCode)] # no splitting here = easy iData_i <- Impute.data.frame(iData_, f_i = f_i, f_i_para = f_i_para, impute_by = impute_by, normalise_first = normalise_first, directions = directions) } else { if(group_level %nin% 2:x$Meta$maxlev){ stop("group_level is out of range: must be between 2 and max level.") } # we split the data set based on its grouping at a given level # get lineage lin <- coin$Meta$Lineage # make sure lineage is in the same order as colnames of data lin <- lin[match(colnames(iData_), lin[[1]]), ] # this is our factor variable for splitting cols f <- lin[[group_level]] # now split iData_split <- split.default(iData_, f) # now do the imputation iData_split_i <- lapply(iData_split, function(dfi){ directions <- directions$Direction[match(colnames(dfi), directions$iCode)] Impute.data.frame(dfi, f_i = f_i, f_i_para = f_i_para, impute_by = impute_by, normalise_first = normalise_first, directions = directions) }) # reassemble iData_i <- Reduce(cbind, iData_split_i) iData_i <- iData_i[colnames(iData_)] # check non-NAs have not changed x_check <- iData_i x_check[is.na(iData_)] <- NA if(!identical(x_check, iData_)){ warning("Differences detected in non-NA values of imputed data frame.") } } # OUTPUT ------------------------------------------------------------------ # reunite with uCode col iData_i <- cbind(uCode = iData$uCode, iData_i) # output list if(out2 == "df"){ iData_i } else { if(is.null(write_to)){ write_to <- "Imputed" } write_dset(coin, iData_i, dset = write_to) } } #' Impute a data frame #' #' Impute a data frame using any function, either column-wise, row-wise or by the whole data frame in one #' shot. #' #' This function only accepts data frames with all numeric columns. It imputes any `NA`s in the data frame #' by invoking the function `f_i` and any optional arguments `f_i_para` on each column at a time (if #' `impute_by = "column"`), or on each row at a time (if `impute_by = "row"`), or by passing the entire #' data frame to `f_i` if `impute_by = "df"`. #' #' Clearly, the function `f_i` needs to be able to accept with the data class passed to it - if #' `impute_by` is `"row"` or `"column"` this will be a numeric vector, or if `"df"` it will be a data #' frame. Moreover, this function should return a vector or data frame identical to the vector/data frame passed to #' it except for `NA` values, which can be replaced. The function `f_i` is not required to replace *all* `NA` #' values. #' #' When imputing row-wise, prior normalisation of the data is recommended. This is because imputation #' will use e.g. the mean of the unit values over all indicators (columns). If the indicators are on #' very different scales, the result will likely make no sense. If the indicators are normalised first, #' more sensible results can be obtained. There are two options to pre-normalise: first is by setting #' `normalise_first = TRUE` - this is anyway the default if `impute_by = "row"`. In this case, you also #' need to supply a vector of directions. The data will then be normalised using a min-max approach #' before imputation, followed by the inverse operation to return the data to the original scales. #' #' Another approach which gives more control is to simply run [Normalise()] first, and work with the #' normalised data from that point onwards. In that case it is better to set `normalise_first = FALSE`, #' since by default if `impute_by = "row"` it will be set to `TRUE`. #' #' Checks are made on the format of the data returned by imputation functions, to ensure the #' type and that non-`NA` values have not been inadvertently altered. This latter check is allowed #' a degree of tolerance for numerical precision, controlled by the `sfigs` argument. This is because #' if the data frame is normalised, and/or depending on the imputation function, there may be a very #' small differences. By default `sfigs = 9`, meaning that the non-`NA` values pre and post-imputation #' are compared to 9 significant figures. #' #' @param x A data frame with only numeric columns. #' @param f_i A function to use for imputation. By default, imputation is performed by simply substituting #' the mean of non-`NA` values for each column at a time. #' @param f_i_para Any additional parameters to pass to `f_i`, apart from `x` #' @param impute_by Specifies how to impute: if `"column"`, passes each column separately as a numerical #' vector to `f_i`; if `"row"`, passes each *row* separately; and if `"df"` passes the entire data frame to #' `f_i`. The function called by `f_i` should be compatible with the type of data passed to it. #' @param normalise_first Logical: if `TRUE`, each column is normalised using a min-max operation before #' imputation. By default this is `FALSE` unless `impute_by = "row"`. See details. #' @param directions A vector of directions: either -1 or 1 to indicate the direction of each column #' of `x` - this is only used if `normalise_first = TRUE`. See details. #' @param ... arguments passed to or from other methods. #' #' @return An imputed data frame #' @export #' #' @examples #' # a df of random numbers #' X <- as.data.frame(matrix(runif(50), 10, 5)) #' #' # introduce NAs (2 in 3 of 5 cols) #' X[sample(1:10, 2), 1] <- NA #' X[sample(1:10, 2), 3] <- NA #' X[sample(1:10, 2), 5] <- NA #' #' # impute using column mean #' Impute(X, f_i = "i_mean") #' #' # impute using row median (no normalisation) #' Impute(X, f_i = "i_median", impute_by = "row", #' normalise_first = FALSE) #' #' Impute.data.frame <- function(x, f_i = NULL, f_i_para = NULL, impute_by = "column", normalise_first = NULL, directions = NULL, ...){ # CHECKS ------------------------------------------------------------------ stopifnot(impute_by %in% c("column", "row", "df")) # check for non-numeric cols non_numeric <- !sapply(x, is.numeric) if(any(non_numeric)){ stop("Non-numeric columns detected in x. Please remove these columns to use this function.") } # check for NAs x_NAs <- is.na(x) # if there are no NAs, just return df as it was if(sum(x_NAs) == 0){ return(x) } # NORMALISE --------------------------------------------------------------- # if normalise_first not specified set TRUE if rowwise or FALSE for anything else if(is.null(normalise_first)){ if(impute_by == "row"){ normalise_first <- TRUE } else { normalise_first <- FALSE } } if(normalise_first){ if(is.null(directions)){ stop("To normalise the data you need to specify the 'directions' argument. If you are normalising by row, normalisation is recommended and is switched on by default.") } # checks stopifnot(length(directions) == ncol(x), all(directions %in% c(-1, 1))) # adjust for directions x_n <- as.data.frame(mapply(`*`, x, directions)) # we will need the original min and max to reconstruct xmins <- sapply(x_n, min, na.rm = T) xmaxs <- sapply(x_n, max, na.rm = T) x_n <- as.data.frame(lapply(x_n, function(x){ (x - min(x, na.rm = TRUE))/(max(x, na.rm = TRUE) - min(x, na.rm = TRUE)) })) } else { x_n <- x } # IMPUTE ------------------------------------------------------------------ if(impute_by == "df"){ # require that first arg of f_i is "x" f_args <- formals(f_i) if(is.null(f_args)){ stop("The function specified by f_i seems to have no input arguments!") } else { if(names(f_args)[1] != "x"){ stop("The first argument of f_i must be called 'x'.") } } # function args f_args <- list(x = x_n) if(!is.null(f_i_para)){ if(!is.list(f_i_para)){ stop("f_i_para must be a list") } f_args <- c(f_args, f_i_para) } x_imp <- do.call(what = f_i, args = f_args) # Checks if(!is.data.frame(x_imp)){ stop("Object returned by f_i is not a data frame.") } if(!identical(dim(x_imp), dim(x))){ stop("Object returned by f_i has different dimensions from x.") } } else if (impute_by == "column") { if(is.null(f_i_para)){ x_imp <- lapply(x_n, Impute.numeric, f_i) } else { x_imp <- lapply(x_n, Impute.numeric, f_i, f_i_para) } } else if (impute_by == "row"){ # work row-wise with apply if(is.null(f_i_para)){ x_imp <- apply(x_n, 1, Impute.numeric, f_i, simplify = FALSE) } else { x_imp <- apply(x_n, 1, Impute.numeric, f_i, f_i_para, simplify = FALSE) } # I have to reassemble carefully into a df x_imp <- Reduce(rbind, x_imp) } x_imp <- as.data.frame(x_imp) # UN-NORMALISE ------------------------------------------------------------ if(normalise_first){ # do normalisation backwards x_imp <- mapply(function(x, mn, mx, direc){ xo <- x*(mx - mn) + mn xo*direc }, x_imp, xmins, xmaxs, directions) x_imp <- as.data.frame(x_imp) } # CHECKS AND OUTPUT ------------------------------------------------------------- # manually reset row names row.names(x_imp) <- attr(x, "row.names") # check non-NAs have not changed x_check <- x_imp x_check[x_NAs] <- NA # to cross check we also have to make all cols from x numeric: some may be integer # columns that are changed to numeric during imputation. In this case, identical() # will return false even if the values are the same x <- as.data.frame(lapply(x, as.numeric)) row.names(x) <- attr(x_imp, "row.names") if(!isTRUE(all.equal(x_check, x))){ stop("Differences detected in non-NA values of imputed data frame.") } # replace non-NA values with original values to avoid any numerical precision issues x_imp[!x_NAs] <- x[!x_NAs] x_imp } #' Impute a numeric vector #' #' Imputes missing values in a numeric vector using a function `f_i`. This function should return a vector identical #' to `x` except for `NA` values, which can be replaced. The function `f_i` is not required to replace *all* `NA` #' values. #' #' This calls the function `f_i()`, with optionally further arguments `f_i_para`, to impute any missing #' values found in `x`. By default, `f_i = "i_mean()"`, which simply imputes `NA`s with the mean of the #' non-`NA` values in `x`. #' #' You could also use one of the imputation functions directly (such as [i_mean()]). However, this #' function offers a few extra advantages, such as checking the input and output formats, and making #' sure the resulting imputed vector agrees with the input. It will also skip imputation entirely if #' there are no `NA`s at all. #' #' @param x A numeric vector, possibly with `NA` values to be imputed. #' @param f_i A function that imputes missing values in a numeric vector. See description and details. #' @param f_i_para Optional further arguments to be passed to `f_i()` #' @param ... arguments passed to or from other methods. #' #' @return An imputed numeric vector of the same length of `x`. #' @export #' #' @examples #' # a vector with a missing value #' x <- 1:10 #' x[3] <- NA #' x #' #' # impute using median #' # this calls COINr's i_median() function #' Impute(x, f_i = "i_median") #' Impute.numeric <- function(x, f_i = NULL, f_i_para = NULL, ...){ # DEFAULTS ---------------------------------------------------------------- f_i <- set_default(f_i, "i_mean") # require that first arg of f_i is "x" f_args <- formals(f_i) if(is.null(f_args)){ stop("The function specified by f_i seems to have no input arguments!") } else { if(names(f_args)[1] != "x"){ stop("The first argument of f_i must be called 'x'.") } } # function args f_args <- list(x = x) if(!is.null(f_i_para)){ if(!is.list(f_i_para)){ stop("f_n_para must be a list") } f_args <- c(f_args, f_i_para) } # IMPUTE --------------------------------------------------------------- nas <- is.na(x) if(sum(nas) == length(x)){ stop("Input is all NAs - cannot impute.") } # call imputation function # if "none" or there are no NAs we skip entirely if((f_i == "none") | (length(nas) == 0)){ return(x) } else { xi <- do.call(what = f_i, args = f_args) } # CHECK and OUTPUT -------------------------------------------------------- if(length(xi) != length(x)){ stop("length of imputed vector not equal to length of x") } if(!is.numeric(xi)){ stop("imputed vector is not numeric") } if(!identical(xi[!nas], x[!nas])){ stop("One or more non-NA values of x has changed as a result of imputation. Check the behaviour of the imputation function.") } xi } #' Imputation of missing data #' #' This is a generic function with the following methods: #' #' * [Impute.numeric()] #' * [Impute.data.frame()] #' * [Impute.coin()] #' * [Impute.purse()] #' #' See those methods for individual documentation. #' #' This function replaces the now-defunct `impute()` from COINr < v1.0. #' #' @param x Object to be imputed #' @param ... arguments passed to or from other methods. #' #' @examples #' # See individual method documentation #' #' @return An object of the same class as `x`, but imputed. #' #' @export Impute <- function(x, ...){ UseMethod("Impute") } #' Impute by mean #' #' Replaces `NA`s in a numeric vector with the mean of the non-`NA` values. #' #' @param x A numeric vector #' #' @return A numeric vector #' @export #' #' @examples #' x <- c(1,2,3,4, NA) #' i_mean(x) #' i_mean <- function(x){ stopifnot(is.numeric(x)) # get mean mx <- mean(x, na.rm = TRUE) # replace by mean x[is.na(x)] <- mx x } #' Impute by median #' #' Replaces `NA`s in a numeric vector with the median of the non-`NA` values. #' #' @param x A numeric vector #' #' @return A numeric vector #' @export #' #' @examples #' x <- c(1,2,3,4, NA) #' i_median(x) #' i_median <- function(x){ stopifnot(is.numeric(x)) # get mean mx <- stats::median(x, na.rm = TRUE) # replace by mean x[is.na(x)] <- mx x } #' Impute by group mean #' #' Replaces `NA`s in a numeric vector with the grouped arithmetic means of the non-`NA` values. #' Groups are defined by the `f` argument. #' #' @param x A numeric vector #' @param f A grouping variable, of the same length of `x`, that specifies the group that each value #' of `x` belongs to. This will be coerced to a factor. #' @param skip_f_na If `TRUE`, will work around any `NA`s in `f` (the corresponding values of `x` will be excluded from the imputation #' and returned unaltered). Else if `FALSE`, will cause an error. #' #' @return A numeric vector #' @export #' #' @examples #' x <- c(NA, runif(10), NA) #' f <- c(rep("a", 6), rep("b", 6)) #' i_mean_grp(x, f) #' i_mean_grp <- function(x, f, skip_f_na = TRUE){ stopifnot(is.numeric(x), length(x)==length(f)) # get any NAs in f fna <- is.na(f) if(sum(fna) == length(x)){ stop("f must have at least one non-NA value") } # extract x values with non-NA f values if(skip_f_na){ x_use <- x[!fna] f_use <- f[!fna] } else { if(any(fna)){ stop("NAs found in f. If skip_f_na = TRUE f cannot contain any NAs.") } x_use <- x f_use <- f[!fna] } # split by factors, apply func then unsplit x_split <- split(x_use, f_use) x_split <- lapply(x_split, i_mean) x_imp <- unsplit(x_split, f_use) # reassemble and output x[!fna] <- x_imp x } #' Impute by group median #' #' Replaces `NA`s in a numeric vector with the grouped medians of the non-`NA` values. #' Groups are defined by the `f` argument. #' #' @param x A numeric vector #' @param f A grouping variable, of the same length of `x`, that specifies the group that each value #' of `x` belongs to. This will be coerced to a factor. #' #' @return A numeric vector #' @export #' #' @examples #' x <- c(NA, runif(10), NA) #' f <- c(rep("a", 6), rep("b", 6)) #' i_median_grp(x, f) #' i_median_grp <- function(x, f){ stopifnot(is.numeric(x), length(x)==length(f)) # split by factors, apply func then unsplit x_split <- split(x, f) x_split <- lapply(x_split, i_median) unsplit(x_split, f) } #' Impute panel data #' #' Given a data frame of panel data, with a time-index column `time_col` and a unit ID column `unit_col`, imputes other #' columns using the entry from the latest available time point. #' #' This presumes that there are multiple observations for each unit code, i.e. one per time point. It then searches for any missing values in the target year, and replaces them with the equivalent points #' from previous time points. It will replace using the most recently available point. #' #' @param iData A data frame of indicator data, containing a time index column `time_col`, a unit code column `unit_col`, #' and other numerical columns to be imputed. #' @param time_col The name of a column found in `iData` to be used as the time index column. Must point to a numeric column. #' @param unit_col The name of a column found in `iData` to be used as the unit code/ID column. Must point to a character column. #' @param cols Optionally, a character vector of names of columns to impute. If `NULL` (default), all columns apart from `time_col` and #' `unit_col` will be imputed where possible. #' @param max_time The maximum number of time points to look backwards to impute from. E.g. if `max_time = 1`, if an #' `NA` is found at time \eqn{t}, it will only look for a replacement value at \eqn{t-1} but not in any time points before that. #' By default, searches all time points available. #' #' @examples #' # Copy example panel data #' iData_p <- ASEM_iData_p #' #' # we introduce two NAs: one for NZ in 2022 in LPI indicator #' iData_p$LPI[iData_p$uCode == "NZ" & iData_p$Time == 2022] <- NA #' # one for AT, also in 2022, but for Flights indicator #' iData_p$Flights[iData_p$uCode == "AT" & iData_p$Time == 2022] <- NA #' #' # impute: target only the two columns where NAs introduced #' l_imp <- impute_panel(iData_p, cols = c("LPI", "Flights")) #' # get imputed df #' iData_imp <- l_imp$iData_imp #' #' # check the output is what we expect: both NAs introduced should now have 2021 values #' iData_imp$LPI[iData_imp$uCode == "NZ" & iData_imp$Time == 2022] == #' ASEM_iData_p$LPI[ASEM_iData_p$uCode == "NZ" & ASEM_iData_p$Time == 2021] #' #' iData_imp$Flights[iData_imp$uCode == "AT" & iData_imp$Time == 2022] == #' ASEM_iData_p$Flights[ASEM_iData_p$uCode == "AT" & ASEM_iData_p$Time == 2021] #' #' @return A list containing: #' * `.$iData_imp`: An `iData` format data frame with missing data imputed using previous time points (where possible). #' * `.$DataT`: A data frame in the same format as `iData`, where each entry shows which time point each data point #' came from. #' #' @export impute_panel <- function(iData, time_col = NULL, unit_col = NULL, cols = NULL, max_time = NULL){ # DEFAULTS ---------------------------------------------------------------- if(is.null(time_col)){ time_col <- "Time" } if(is.null(unit_col)){ unit_col <- "uCode" } # CHECKS ------------------------------------------------------------------ stopifnot(is.character(time_col), length(time_col) == 1, is.character(unit_col), length(unit_col) == 1) if(is.null(iData[[time_col]])){ stop("No Time column found - use 'time_col' argument.") } if(is.null(iData[[unit_col]])){ stop("No unit column found - use 'unit_col' argument.") } if(!is.numeric(iData[[time_col]])){ stop("time_col refers to a non-numeric column") } if(!is.character(iData[[unit_col]])){ stop("unit_col refers to a non-character column") } if(is.null(cols)){ cols <- setdiff(names(iData), c(time_col, unit_col)) } if(any(cols %nin% names(iData))){ stop("One or more entries in 'cols' not found in names(iData).") } # not_numeric <- !sapply(iData[colnames(iData) %nin% c(time_col, unit_col)], is.numeric) # if(any(not_numeric)){ # stop("Non-numeric columns found other than time_col and unit_col - cannot impute.") # } # See what times are in iData yrs <- sort(unique(iData[[time_col]]), decreasing = TRUE) if(length(yrs)==1){ stop("Cannot impute by latest time point because only one time point of data is available.") } # From here I will reduce iData to only the cols to be imputed. Will be put back later iData_orig <- iData iData <- iData[c(time_col, unit_col, cols)] # FUNC TO IMPUTE ---------------------------------------------------------- # Function to impute a data set from a single time point, using previous years of data # I have to do this unit by unit... this is the safest way to deal with the possibility of # (a) different ordering of units # (b) subsets of units being available for different years # Since the each year of the data comes from the same table, column ordering is consistent so # I don't have to worry about that. impute_year <- function(use_year){ # data from year iData_yr <- iData[iData[[time_col]] == use_year, ] # here I prep a data frame which will record the year used for each data point # we only make changes to this when a point is imputed DataYears <- iData_yr DataYears[colnames(DataYears) %nin% c(time_col, unit_col)] <- use_year DataYears[is.na(iData_yr)] <- NA # previous years olderyrs <- yrs[yrs < use_year] if(length(olderyrs) == 0){ return( list(iData = iData_yr, DataT = DataYears) ) } # only look up max number of points backwards if(!is.null(max_time)){ olderyrs <- olderyrs[1:min(c(max_time, length(olderyrs)))] } # find ucodes of rows with nas nacodes <- iData_yr[[unit_col]][rowSums(is.na(iData_yr)) > 0] for(ucode in nacodes){ irow <- iData_yr[iData_yr[[unit_col]] == ucode, ] # otherwise, we have to go year by year for(oldyr in olderyrs){ # get row of same unit, for a previous year irowold <- iData[(iData[[time_col]] == oldyr) & (iData[[unit_col]] == ucode), ] # substitute in any missing values # first, get the equivalent entries of the old row (corresponding to NAs in new row) irowold_replace <- irowold[, as.logical(is.na(irow))] # and the names names_irowold <- names(irowold)[as.logical(is.na(irow))] # replace them into the new row irow[, names_irowold] <- irowold_replace # find which indicators were imputed here ind_imp <- names_irowold[!as.logical(is.na(irowold_replace))] # record what happened in datayears DataYears[DataYears[[unit_col]] == ucode, colnames(DataYears) %in% ind_imp] <- oldyr # check if we need to carry on if(all(!is.na(irow))){break} } # replace with imputed row iData_yr[iData_yr[[unit_col]] == ucode, ] <- irow } # output list list(iData = iData_yr, DataT = DataYears) } # apply function to all years of data l_imp <- lapply(yrs, impute_year) # reassemble data frame iData_imp <- lapply(rev(l_imp), `[[`, "iData") iData_imp <- Reduce(rbind, iData_imp) stopifnot(nrow(iData_imp) == nrow(iData), ncol(iData_imp) == ncol(iData)) # get data times DataT <- lapply(l_imp, `[[`, "DataT") DataT <- Reduce(rbind, DataT) # return iData to its full form if cols was specified iData_imp_full <- iData_orig iData_imp_full[c(time_col, unit_col, cols)] <- iData_imp[c(time_col, unit_col, cols)] # return imputed data list(iData_imp = iData_imp_full, DataT = DataT) }
/scratch/gouwar.j/cran-all/cranData/COINr/R/impute.R
# METHODS ADDED TO NON-COINR GENERICS #' Print coin #' #' Some details about the coin #' #' @param x A coin #' @param ... Arguments to be passed to or from other methods. #' #' @importFrom utils head #' #' @return Text output #' #' @export print.coin <- function(x, ...){ coin <- x cat("--------------\n") cat("A coin with...\n") cat("--------------\n") # Input # Units firstunits <- paste0(utils::head(coin$Data$Raw$uCode, 3), collapse = ", ") if(length(coin$Data$Raw$uCode)>3){ firstunits <- paste0(firstunits, ", ...") } # Indicators iCodes <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Indicator"] firstinds <- paste0(utils::head(iCodes, 3), collapse = ", ") if(length(iCodes)>3){ firstinds <- paste0(firstinds, ", ...") } # Denominators denoms <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Denominator"] if(!is.null(denoms)){ ndenom <- length(denoms) denoms <- paste0(utils::head(denoms, 3), collapse = ", ") if(ndenom>3){ denoms <- paste0(denoms, ", ...") } } else { denoms <- "none" ndenom <- 0 } # groups grps <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Group"] if(length(grps)>0){ ngrp <- length(grps) grps <- paste0(utils::head(grps, 3), collapse = ", ") if(ngrp>3){ grps <- paste0(grps, ", ...") } } else { grps <- "none" ngrp <- 0 } cat("Input:\n") cat(" Units: ", nrow(coin$Data$Raw), " (", firstunits, ")\n", sep = "") cat(paste0(" Indicators: ", length(iCodes), " (", firstinds, ")\n")) cat(paste0(" Denominators: ", ndenom, " (", denoms, ")\n")) cat(paste0(" Groups: ", ngrp, " (", grps, ")\n\n")) # Structure fwk <- coin$Meta$Lineage cat("Structure:\n") for(ii in 1:ncol(fwk)){ codes <- unique(fwk[[ii]]) nuniq <- length(codes) first3 <- utils::head(codes, 3) if(length(codes)>3){ first3 <- paste0(first3, collapse = ", ") first3 <- paste0(first3, ", ...") } else { first3 <- paste0(first3, collapse = ", ") } # colnames are level names levnames <- colnames(fwk) # check if auto-generated, if so we don't additionally print. if(levnames[1] == "Level_1"){ levnames <- NULL } if(ii==1){ cat(paste0(" Level ", ii, " ", levnames[ii], ": ", nuniq, " indicators (", first3,") \n")) } else { cat(paste0(" Level ", ii, " ", levnames[ii], ": ", nuniq, " groups (", first3,") \n")) } } cat("\n") # Data sets cat("Data sets:\n") dsets <- names(coin$Data) for(dset in dsets){ nunit <- nrow(coin$Data[[dset]]) cat(paste0(" ", dset, " (", nunit, " units)\n")) } } #' Print purse #' #' Some details about the purse #' #' @param x A purse #' @param ... Arguments to be passed to or from other methods. #' #' @importFrom utils head #' #' @return Text output #' #' @export print.purse <- function(x, ...){ coin <- x$coin[[1]] cat("-----------------------------\n") cat("A purse with...", nrow(x), "coins \n") cat("-----------------------------\n\n") dfdisplay <- data.frame( Time = x$Time, n_Units = sapply(x$coin, function(coin){nrow(coin$Data$Raw)}), n_Inds = sapply(x$coin, function(coin){sum(coin$Meta$Ind$Type == "Indicator")}), n_dsets = sapply(x$coin, function(coin){length(coin$Data)}) ) print(dfdisplay, row.names = FALSE) cat("\n") cat("-----------------------------------\n") cat("Sample from first coin (", x$Time[1],"):\n", sep = "") cat("-----------------------------------\n\n") # Input # Units firstunits <- paste0(utils::head(coin$Data$Raw$uCode, 3), collapse = ", ") if(length(coin$Data$Raw$uCode)>3){ firstunits <- paste0(firstunits, ", ...") } # Indicators iCodes <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Indicator"] firstinds <- paste0(utils::head(iCodes, 3), collapse = ", ") if(length(iCodes)>3){ firstinds <- paste0(firstinds, ", ...") } # Denominators denoms <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Denominator"] if(!is.null(denoms)){ ndenom <- length(denoms) denoms <- paste0(utils::head(denoms, 3), collapse = ", ") if(ndenom>3){ denoms <- paste0(denoms, ", ...") } } else { denoms <- "none" ndenom <- 0 } # groups grps <- coin$Meta$Ind$iCode[coin$Meta$Ind$Type == "Group"] if(length(grps)>0){ ngrp <- length(grps) grps <- paste0(utils::head(grps, 3), collapse = ", ") if(ngrp>3){ grps <- paste0(grps, ", ...") } } else { grps <- "none" ngrp <- 0 } cat("Input:\n") cat(" Units: ", nrow(coin$Data$Raw), " (", firstunits, ")\n", sep = "") cat(paste0(" Indicators: ", length(iCodes), " (", firstinds, ")\n")) cat(paste0(" Denominators: ", ndenom, " (", denoms, ")\n")) cat(paste0(" Groups: ", ngrp, " (", grps, ")\n\n")) # Structure fwk <- coin$Meta$Lineage cat("Structure:\n") for(ii in 1:ncol(fwk)){ codes <- unique(fwk[[ii]]) nuniq <- length(codes) first3 <- utils::head(codes, 3) if(length(codes)>3){ first3 <- paste0(first3, collapse = ", ") first3 <- paste0(first3, ", ...") } else { first3 <- paste0(first3, collapse = ", ") } # colnames are level names levnames <- colnames(fwk) # check if auto-generated, if so we don't additionally print. if(levnames[1] == "Level_1"){ levnames <- NULL } if(ii==1){ cat(paste0(" Level ", ii, " ", levnames[ii], ": ", nuniq, " indicators (", first3,") \n")) } else { cat(paste0(" Level ", ii, " ", levnames[ii], ": ", nuniq, " groups (", first3,") \n")) } } cat("\n") # Data sets cat("Data sets:\n") dsets <- names(coin$Data) for(dset in dsets){ nunit <- nrow(coin$Data[[dset]]) cat(paste0(" ", dset, " (", nunit, " units)\n")) } } #' Check if object is coin class #' #' @param x An object to be checked. #' #' @return Logical #' #' @export is.coin <- function(x){ inherits(x, "coin") } #' Check if object is purse class #' #' @param x An object to be checked. #' #' @return Logical #' #' @export is.purse <- function(x){ inherits(x, "purse") }
/scratch/gouwar.j/cran-all/cranData/COINr/R/methods_added.R