content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' t_ci_table #' #' A function to calculate credible intervals and make a table. See page 169. #' #' @usage t_ci_table(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975)) #' #' @param coefs vector of coefficient estimates, usually posterior means #' @param cov.mat variance-covariance matrix #' @param level desired coverage level #' @param degrees degrees of freedom parameter for students-t distribution assumption #' @param quantiles vector of desired CDF points (quantiles) to return #' #' @return quantile.mat matrix of quantiles #' #' @author Jeff Gill #' @importFrom stats qt var #' @export t_ci_table <- function(coefs,cov.mat,level=0.95,degrees=Inf,quantiles=c(0.025,0.500,0.975)) { quantile.mat <- cbind( coefs, sqrt(diag(cov.mat)), t(qt(quantiles,degrees) %o% sqrt(diag(cov.mat))) + matrix(rep(coefs,length(quantiles)), ncol=length(quantiles)) ) quantile.names <- c("Mean","Std. Error") for (i in 1:length(quantiles)) quantile.names <- c(quantile.names,paste(quantiles[i], "Quantile")) dimnames(quantile.mat)[2] <- list(quantile.names) return(list(title="Posterior Quantities",round(quantile.mat,4))) }
/scratch/gouwar.j/cran-all/cranData/BaM/R/t_ci_table.R
#' terrorism #' #' @description Dataset comparing incidents of terrorism to car accidents, suicide, and murder, see page 140 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{Year} The given year in which the statistics occurred #' \item\code{X.Terrorism} The number of terrorist attacks that would occur per 100000 in the given year #' \item\code{X.Car.Accidents} The number of car accidents that would occur per 100000 in the given year #' \item\code{X.Suicide} The number of suicide that would occur per 100000 in the given year #' } #' #' @usage data(terrorism) #' @format data frame with 14 observations of death rates for different years with 5 explanatory variables #' @source Falkenrath, R. (2001). Analytical Models and Policy Prescription: Understanding Recent Innovation in U.S. Counterterrorism. Studies in Conflict and Terrorism 24, 159-181. #' @name terrorism #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/terrorism-data.R
#' texas #' #' @description Poverty in Texas, see page 299 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{POV} a dichotomous outcome variable indicates whether 20\% or more of the county's residents live in poverty #' \item\code{BLK} the proportion of Black residents in the county #' \item\code{LAT} the proportion of Latino residents in the county #' \item\code{GVT} a dichotomous variable indicating whether government activities contributed a weighted annual average of 25% or more labor and proprietor income over the previous 3 years #' \item\code{SVC} a dichotomous variable indicating whether service activities contributed a weighted annual average of 50% or more labor and proprietor income over the previous 3 years #' \item\code{FED} a dichotomous variable indicating whether federally owned lands make up 30% or more of a county's land area #' \item\code{XFR} a dichotomous factor indicating whether income from transfer payments (federal, state, and local) contributed a weighted annual average of 25 percent or more of total personal income over the past three years #' \item\code{POP} the log of the county population total for 1989 #' } #' #' @usage data(texas) #' @name texas #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/texas-data.R
#' wars #' #' @description Data for Chinese wars example, see page 163 #' #' The variables included in the dataset are: #' \itemize{ #' \item\code{ONSET} ratio-level variable measuring the epochal (whether historical or calendar) time of event occurrence, measured in calendar year #' \item\code{TERM} ratio-level variable measuring the epochal (historical) time of event conclusion, measured in calendar year #' \item\code{EXTENT} number of belligerents involved on all sides of the war #' \item\code{ETHNIC} intra-group or inter-group conflict #' \item\code{DIVERSE} number of ethnic groups participating as belligerents #' \item\code{ALLIANCE} total number of alliances among belligerents #' \item\code{DYADS} number of alliance pairs #' \item\code{POL.LEV} nominal-level variable measuring the political level of belligerent involvement regarding domestic and foreign belligerents, with a 1 for internal war, 2 for interstate war #' \item\code{COMPLEX} governmental level of the warring parties, where the first variable is multiplied by ten for scale purposes #' \item\code{POLAR} number of relatively major or great powers at the time of onset #' \item\code{BALANCE} the difference in military capabilities: minor-minor, minor-major, major-major #' \item\code{TEMPOR} type of war: protracted rivalry, integrative conquest, disintegrative/fracturing conflict, sporadic event #' \item\code{SCOPE} political scope of conflicts in terms of governmental units affected #' \item\code{DURATION} duration of conflict, measured in years #' } #' #' @usage data(wars) #' @name wars #' @format a data frame of 104 observations of different China wars with 15 explanatory variables #' @source Cioffi-Revilla, C. and Lai, D. (1995). War and Politics in Ancient China, 2700 B.C. to 722 B.C.: Measurement and Comparative Analysis. Journal of Conflict Resolution 39, 467-494. #' @docType data NULL
/scratch/gouwar.j/cran-all/cranData/BaM/R/wars-data.R
BaSkePro <- function (x) { Man <- c(2,1.07,0,0,2,2,2,2,2,0,0) Atl <- c(1,0.49,0,1,1,1,1,1,0,0,0) Axi <-c(1,0.62,0,1,1,1,1,1,0,0,0) Cer <- c(5,0.45,0,5,5,5,5,5,0,0,0) Tho <- c( 13,0.53,0,0,13,13,13,0,0,0,0) Lum <- c( 6,0.51,6,6,6,6,6,6,6,0,0) Rib <- c(26,0.96,26,26,26,26,26,26,26,0,0) Sac <- c(1,0.4,0,0,1,1,1,0,0,0,0) Sca <- c(2,1.04,0,0,0,2,2,2,0,0,0) Hum <- c(2,1.12,0,0,0,2,2,2,2,2,0) Rad <- c(2,1.09,0,0,0,2,2,2,2,2,0) Pel <- c(2,1.02,0,0,2,2,2,0,0,0,0) Mca <- c(2,1.1,0,0,0,2,2,2,2,0,0) Fem <- c(2,1.15,0,0,0,0,2,2,2,2,2) Tib <- c(2,1.13,0,0,0,0,2,2,2,2,2) Mta <- c(2,1.1,0,0,0,0,2,2,2,2,0) config <- base::as.data.frame(base::rbind(Man,Atl,Axi,Cer,Tho, Lum,Rib,Sac,Sca,Hum,Rad,Pel,Mca,Fem,Tib,Mta)) L <- function(theta,config,x) { value<-c(-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1) factor <- stats::dnorm(value,theta[1],0.25) factor <- factor/base::sum(factor) Comb <- factor[1]*config[,3]+factor[2]*config[,4]+factor[3]*config[,5]+factor[4]*config[,6]+factor[5]*config[,7]+factor[6]*config[,8]+factor[7]*config[,9]+factor[8]*config[,10]+factor[9]*config[,11] NME <- Comb*base::exp(-theta[2]*0.53/config[,2]) MAU <- NME/config[,1] valid <- base::which(x>=0) PMAUsim <- MAU[valid]/sum(MAU[valid]) PMAUobs <- x[valid,1]/sum(x[valid,1]) x2 <- ((PMAUsim-PMAUobs)^2)/PMAUsim p <- 1/base::sum(x2)*base::length(base::which(x2<0.01))/base::length(valid) return (p) } Lbound<-c(-1, 0) Ubound<-c(1, 10) theta<-Lbound+ (Ubound-Lbound)*base::matrix(stats::runif(20000),10000,2) C<-stats::cov(theta) N<-100000 theta<-base::matrix(0,N,2) p<-base::array(0,N) thetaOld <- Lbound +(Ubound-Lbound)*stats::runif(2) naccept <- 0 pOld <- L(thetaOld,config,x) base::cat('Sampling with MCMC\n') cont<-1 for (t in seq(1,N)) {if (t>N*cont/20) {base::cat('Progress: ',cont*5,'%\n') cont <- cont+1} if (t<N/3 & floor(t/10000)==t/10000){ C=cov(theta[(t-9999):t,])} out=1 while (out>0) {thetaNew<-MASS::mvrnorm(1,thetaOld,C) out<-0 for (k in seq(1,2)) {if (thetaNew[k]>Ubound[k]|thetaNew[k]<Lbound[k]) {out=out+1} } } pNew<-L(thetaNew,config,x) r<-base::min(1,pNew/pOld) u <- stats::runif(1) if (u<r) {theta[t,] = thetaNew p[t]=pNew naccept <- naccept + 1 pOld = pNew thetaOld<-thetaNew} else {theta[t,]<- thetaOld p[t]<-pOld } } base::cat('Progress: 100%\n') base::cat() a <- base::round(naccept/N,3) b <- base::round(stats::median(theta[,1]),2) c <- c(base::round(stats::quantile(theta[,1],probs=0.025),2),base::round(stats::quantile(theta[,1],probs=0.975),2)) d <- base::round(stats::median(theta[,2]),2) e <- c(base::round(stats::quantile(theta[,2],probs=0.025),2),base::round(stats::quantile(theta[,2],probs=0.975),2)) Table_res <- base::data.frame(a,b,c,d,e) base::colnames(Table_res)<- c("Ratio of aceptance","Parameter alpha median","Parameter alpha 95%CI", "Parameter beta median","Parameter beta 95%CI") graphics::par(mfrow = c(2,1), mar = c(3,4,2,1)) # grDevices::x11() graphics::hist(theta[,1], breaks=40 , xaxt = "n",probability=TRUE ,ylab="Density", col=grDevices::rgb(1,0,0,0.5) , main = expression(paste(alpha," parameter",sep="" ))) graphics::axis(side=1, at=seq(-1,1, 0.25)) graphics::abline(v = stats::median(theta[,1]),col = "red",lwd = 4) # grDevices::x11() graphics::hist(theta[,2], breaks=50 , xaxt = "n", probability =TRUE,ylab = "Density", col=grDevices::rgb(0,0,1,0.5) , main = expression(paste(beta," parameter",sep="" ))) graphics::axis(side=1, at=seq(0,10)) graphics::abline(v = stats::median(theta[,2]),col = "blue",lwd = 4) return(Table_res) }
/scratch/gouwar.j/cran-all/cranData/BaSkePro/R/BaSkePro.4.R
## CrossMST getR1R2 = function(E, treated.index){ R1 = R2 = 0 for (i in 1:nrow(E)){ e1 = is.na(match(E[i,1],treated.index)) e2 = is.na(match(E[i,2],treated.index)) if ((!e1) && (!e2)) R1 = R1 + 1 if (e1 && e2) R2 = R2 + 1 } return(list(R1=R1, R2=R2)) } ## function CrossMST calculates the test statistic R_M and p-value for the new test ## if perm>0, also calculate permutation p-value CrossMST = function(distM,treated.index,perm=0,k=1,discrete.correction=TRUE){ n = length(treated.index) N = dim(distM)[1] case.index = (1:N)[-treated.index] reorder = c(treated.index,case.index) distM2 = distM[reorder,reorder] E = mstree(as.dist(distM2),k) Ebynode = vector("list", N) for (i in 1:N) Ebynode[[i]] = rep(0,0) for (i in 1:nrow(E)){ Ebynode[[E[i,1]]] = c(Ebynode[[E[i,1]]], E[i,2]) Ebynode[[E[i,2]]] = c(Ebynode[[E[i,2]]], E[i,1]) } nE = nrow(E) nodedeg = rep(0,N) for (i in 1:N) nodedeg[i] = length(Ebynode[[i]]) nEi = sum(nodedeg*(nodedeg-1)) # pair of nodes sharing a node * 2 m = N-n mu1 = k*n*(n-1)/N mu2 = k*m*(m-1)/N V1 = n*(n-1)*m*(m-1)/(N*(N-1)*(N-2)*(N-3))*((n-2)/(m-1)*(nEi+2*k*(N-1)-4*k^2*(N-1)^2/N)+k*(N-1)*(N-2*k)/N) V2 = n*(n-1)*m*(m-1)/(N*(N-1)*(N-2)*(N-3))*((m-2)/(n-1)*(nEi+2*k*(N-1)-4*k^2*(N-1)^2/N)+k*(N-1)*(N-2*k)/N) V12 = n*(n-1)*m*(m-1)/(N*(N-1)*(N-2)*(N-3))*(-nEi+k*(N-1)*(4*k*N-N-6*k)/N) temp = getR1R2(E,1:n) R1 = temp$R1 R2 = temp$R2 if (discrete.correction){ R1 = R1-0.5 R2 = R2-0.5 } Z1 = (R1-mu1)/sqrt(V1) Z2 = (R2-mu2)/sqrt(V2) rho = V12/sqrt(V1*V2) x = max(Z1,Z2) p2 = 1-pmvnorm(upper=rep(x,2),mean=rep(0,2),corr=matrix(c(1,rho,rho,1),2))[1] if (perm<=0){ return(list(test.stat.Z=x, pval.appr=p2)) }else{ Rmat = Zmat = matrix(0,perm,2) for (i in 1:perm){ temp2 = getR1R2(E,sample(1:N,n)) Rmat[i,] = c(temp2$R1,temp2$R2) Zmat[i,] = c(Rmat[i,]-c(mu1,mu2))/c(sqrt(V1),sqrt(V2)) } stat = apply(Zmat,1,max) p3 = length(which(stat>=x))/perm return(list(test.stat.Z=x, pval.appr=p2, pval.perm=p3)) } } ## function CrossNN calculates the test statistic and p-value for the new test ## if perm>0, also calculate permutation p-value CrossNN = function(distM,treated.index,perm=0,k=1,discrete.correction=TRUE){ n = length(treated.index) N = dim(distM)[1] if (k==1){ case.index = (1:N)[-treated.index] reorder = c(treated.index,case.index) distM2 = distM[reorder,reorder] diag(distM2) = max(distM)+10 nearest = apply(distM2, 1, which.min) n = length(treated.index) D11 = length(which(nearest[1:n]<=n)) D22 = length(which(nearest[(n+1):N]>n)) m = N-n ### pick one arrow, pick another arrow, 2n*(2n-1) possibilities ## the number of possibilities that the two arrows share the endpoint temp = as.vector(table(nearest)) a = max(temp) share = 0 if (a>1){ for (i in 2:a){ share = share + choose(i,2)*length(which(temp==i)) } } ## number of mutual nearest neighbors * 2 mutual = length(which((nearest[nearest]-1:N)==0)) }else{ temp = getDk(distM, treated.index, k) A = temp$A D11 = temp$D11 D22 = temp$D22 temp2 = table(A) id = as.numeric(row.names(temp2)) deg = rep(0,N) deg[id] = temp2 share = (sum(deg^2)-sum(deg))/2 count = 0 for (i in 1:N){ ids = A[i,] count = count + length(which(A[ids,]==i)) } mutual = count } ## C1: mutual/2; C2: share ED11 = k*n*(n-1)/(N-1) ED22 = k*m*(m-1)/(N-1) VD11 = n*m*(n-1)*(m-1)/(N*(N-1)*(N-2)*(N-3))*(k*N+mutual+(n-2)/(m-1)*(share*2+k*N-k^2*N)-2*k^2*N/(N-1)) VD22 = n*m*(n-1)*(m-1)/(N*(N-1)*(N-2)*(N-3))*(k*N+mutual+(m-2)/(n-1)*(share*2+k*N-k^2*N) -2*k^2*N/(N-1)) CovD = n*m*(n-1)*(m-1)/(N*(N-1)*(N-2)*(N-3))*(mutual-2*share+k^2*N*(N-3)/(N-1)) if (discrete.correction){ D11 = D11-0.5 D22 = D22-0.5 } Z1 = (D11-ED11)/sqrt(VD11) Z2 = (D22-ED22)/sqrt(VD22) rho = CovD/sqrt(VD11*VD22) x = max(Z1,Z2) # p1 = 2*pnorm(x) p2 = 1-pmvnorm(upper=rep(x,2),mean=rep(0,2),corr=matrix(c(1,rho,rho,1),2))[1] if (perm<=0){ return(list(test.stat.Z=x, pval.appr=p2)) }else{ Dmat = Zmat = matrix(0,perm,2) for (i in 1:perm){ if (k==1){ temp2 = getD(distM, sample(1:N,n)) Dmat[i,] = temp2 }else{ temp2 = getDk(distM, sample(1:N,n), k) Dmat[i,] = c(temp2$D11, temp2$D22) } Zmat[i,] = (Dmat[i,]-c(ED11,ED22))/c(sqrt(VD11),sqrt(VD22)) } stat = apply(Zmat,1,max) p3 = length(which(stat>=x))/perm return(list(test.stat.Z=x, pval.appr=p2, pval.perm=p3)) } } # getD calculates D12 and D21 getD = function(distM, treated.index){ N = dim(distM)[1] case.index = (1:N)[-treated.index] reorder = c(treated.index,case.index) distM2 = distM[reorder,reorder] diag(distM2) = max(distM)+10 nearest = apply(distM2, 1, which.min) n = length(treated.index) D11 = length(which(nearest[1:n]<=n)) D22 = length(which(nearest[(n+1):N]>n)) return(c(D11,D22)) } getDk = function(distM, treated.index, k){ N = dim(distM)[1] case.index = (1:N)[-treated.index] reorder = c(treated.index,case.index) distM2 = distM[reorder,reorder] diag(distM2) = max(distM)+10 A = matrix(0,N,k) for (i in 1:N){ A[i,] = (sort(distM2[i,1:N], index.return=T)$ix)[1:k] } n = length(treated.index) D11 = length(which(A[1:n,]<=n)) D22 = length(which(A[(n+1):N,]>n)) return(list(A=A,D11=D11,D22=D22)) }
/scratch/gouwar.j/cran-all/cranData/BalanceCheck/R/BalanceCheck.r
#' @keywords internal #' @docType package #' @name BalancedSampling #' @importFrom Rcpp evalCpp #' @useDynLib BalancedSampling #' @exportPattern "^[[:alpha:]]+" #' #' @description #' Select balanced and spatially balanced probability samples in multi-dimensional #' spaces with any prescribed inclusion probabilities. #' It contains fast (C++ via Rcpp) implementations of the included sampling methods. #' #' You can access the project website at #' <https://envisim.se>. #' #' @author Anton Grafström \email{[email protected]}, Jonathan Lisic, Wilmer Prentius. #' "_PACKAGE" NULL
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/BalancedSampling-package.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 .cps_cpp <- function(cpsMethod, prob, x, treeBucketSize, treeMethod, eps) { .Call('_BalancedSampling_cps_cpp', PACKAGE = 'BalancedSampling', cpsMethod, prob, x, treeBucketSize, treeMethod, eps) } .cps_random_cpp <- function(prob, x, random, treeBucketSize, treeMethod, eps) { .Call('_BalancedSampling_cps_random_cpp', PACKAGE = 'BalancedSampling', prob, x, random, treeBucketSize, treeMethod, eps) } .cube_cpp <- function(prob, x, eps) { .Call('_BalancedSampling_cube_cpp', PACKAGE = 'BalancedSampling', prob, x, eps) } .lcube_cpp <- function(prob, xbal, xspread, treeBucketSize, treeMethod, eps) { .Call('_BalancedSampling_lcube_cpp', PACKAGE = 'BalancedSampling', prob, xbal, xspread, treeBucketSize, treeMethod, eps) } .cube_stratified_cpp <- function(prob, x, strata, eps) { .Call('_BalancedSampling_cube_stratified_cpp', PACKAGE = 'BalancedSampling', prob, x, strata, eps) } .lcube_stratified_cpp <- function(prob, xbalance, xspread, strata, bucketSize, method, eps) { .Call('_BalancedSampling_lcube_stratified_cpp', PACKAGE = 'BalancedSampling', prob, xbalance, xspread, strata, bucketSize, method, eps) } .getpips_cpp <- function(x, n) { .Call('_BalancedSampling_getpips_cpp', PACKAGE = 'BalancedSampling', x, n) } .hlpm2_cpp <- function(prob, x, sizes, treeBucketSize, treeMethod, eps) { .Call('_BalancedSampling_hlpm2_cpp', PACKAGE = 'BalancedSampling', prob, x, sizes, treeBucketSize, treeMethod, eps) } .lpm_cpp <- function(lpMethod, prob, x, treeBucketSize, treeMethod, eps) { .Call('_BalancedSampling_lpm_cpp', PACKAGE = 'BalancedSampling', lpMethod, prob, x, treeBucketSize, treeMethod, eps) } .lpm_int_cpp <- function(lpMethod, n, x, treeBucketSize, treeMethod) { .Call('_BalancedSampling_lpm_int_cpp', PACKAGE = 'BalancedSampling', lpMethod, n, x, treeBucketSize, treeMethod) } .rpm_cpp <- function(prob, eps) { .Call('_BalancedSampling_rpm_cpp', PACKAGE = 'BalancedSampling', prob, eps) } .spm_cpp <- function(prob, eps) { .Call('_BalancedSampling_spm_cpp', PACKAGE = 'BalancedSampling', prob, eps) } .sb_localbalance_cpp <- function(prob, x, sample, treeBucketSize, treeMethod) { .Call('_BalancedSampling_sb_localbalance_cpp', PACKAGE = 'BalancedSampling', prob, x, sample, treeBucketSize, treeMethod) } .sb_voronoi_cpp <- function(prob, x, sample, treeBucketSize, treeMethod) { .Call('_BalancedSampling_sb_voronoi_cpp', PACKAGE = 'BalancedSampling', prob, x, sample, treeBucketSize, treeMethod) } .vsb0_cpp <- function(probs, ys, xs, treeBucketSize, treeMethod) { .Call('_BalancedSampling_vsb0_cpp', PACKAGE = 'BalancedSampling', probs, ys, xs, treeBucketSize, treeMethod) } .vsbn_cpp <- function(probs, ys, xs, n, treeBucketSize, treeMethod) { .Call('_BalancedSampling_vsbn_cpp', PACKAGE = 'BalancedSampling', probs, ys, xs, n, treeBucketSize, treeMethod) }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/RcppExports.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' The Cube method #' #' @description #' Selects balanced samples with prescribed inclusion probabilities #' from a finite population using the fast flight Cube Method. #' #' @details #' If `prob` sum to an integer n, and `prob` is included as the first #' balancing variable, a fixed sized sample (n) will be produced. #' #' ## Stratified cube #' For `cubestratified`, `prob` is automatically inserted as a balancing variable. #' #' The stratified version uses the fast flight Cube method and pooling of #' landing phases. #' #' @templateVar xbal x #' @template sampling_template #' @template x_template #' @template probs_template #' #' @param integerStrata An integer vector of length N with stratum numbers. #' #' @references #' Deville, J. C. and Tillé, Y. (2004). #' Efficient balanced sampling: the cube method. #' Biometrika, 91(4), 893-912. #' #' Chauvet, G. and Tillé, Y. (2006). #' A fast algorithm for balanced sampling. #' Computational Statistics, 21(1), 53-62. #' #' Chauvet, G. (2009). #' Stratified balanced sampling. #' Survey Methodology, 35, 115-119. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' s = cube(prob, x); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' strata = c(rep(1L, 100), rep(2L, 200), rep(3L, 300), rep(4L, 400)); #' s = cubestratified(prob, x, strata); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' prob = c(0.2, 0.25, 0.35, 0.4, 0.5, 0.5, 0.55, 0.65, 0.7, 0.9); #' N = length(prob); #' x = matrix(runif(N * 2), ncol = 2); #' ep = rep(0L, N); #' r = 10000L; #' for (i in seq_len(r)) { #' s = cube(prob, cbind(prob, x)); #' ep[s] = ep[s] + 1L; #' } #' print(ep / r); #' } #' cube = function( prob, x, eps = 1e-12 ) { if (!is.matrix(x)) { x = as.matrix(x); } N = dim(x)[1L]; .eps_check(eps); prob = .prob_check(prob, N); result = .cube_cpp(prob, x, eps); return(result); } #' Stratified Cube method #' #' @describeIn cube #' cubestratified = function( prob, x, integerStrata, eps = 1e-12 ) { if (!is.matrix(x)) { x = as.matrix(x); } N = dim(x)[1L]; .eps_check(eps); prob = .prob_check(prob, N); strata = .strata_check(integerStrata, N); result = .cube_stratified_cpp(prob, x, strata, eps); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/cube.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' Inclusion probabilities proportional-to-size #' #' @family utils #' #' @description #' Computes the first-order inclusion probabilties from a vector of positive numbers, #' for a probabilitiy proportional-to-size design. #' #' @param x A vector of positive numbers #' @param n The wanted sample size #' #' @return A vector of inclusion probabilities proportional-to-size #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' x = matrix(runif(N * 2), ncol = 2); #' prob = getPips(x[, 1], n); #' s = lpm2(prob, x); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' } #' getPips = function( x, n ) { x = as.numeric(x); N = length(x); if (length(n) != 1 || n > N || n < 0 || n %% 1 != 0) stop("'n' must be integer in [0, N]"); if (n == N) return(rep(1.0, N)); if (n == 0) return(rep(0.0, N)); result = .getpips_cpp(x, n); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/getPips.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' Hierarchical Local Pivotal Method 2 #' #' @description #' Selects an initial sample using the [lpm2()], and then splits this sample into #' subsamples of given `sizes` using successive, hierarchical selection with #' the [lpm2()]. #' The method is used to select several subsamples, such that each subsample, and #' the combination (i.e. the union of all subsamples), is spatially balanced. #' #' @details #' The inclusion probabilities `prob` _must_ sum to an integer n. #' The sizes of the subsamples `sum(sizes)` _must_ sum to the same integer n. #' #' @templateVar xspread x #' @template sampling_template #' @template kdtrees_template #' @template x_template #' @template probs_template #' #' @param sizes A vector of integers containing the sizes of the subsamples. #' `sum(sizes) = sum(prob)` must hold. #' #' @return A matrix with the population indices of the combined sample in the #' first column, and the associated subsample in the second column. #' #' @references #' Friedman, J. H., Bentley, J. L., & Finkel, R. A. (1977). #' An algorithm for finding best matches in logarithmic expected time. #' ACM Transactions on Mathematical Software (TOMS), 3(3), 209-226. #' #' Maneewongvatana, S., & Mount, D. M. (1999, December). #' It’s okay to be skinny, if your friends are fat. #' In Center for geometric computing 4th annual workshop on computational geometry (Vol. 2, pp. 1-8). #' #' Grafström, A., Lundström, N.L.P. & Schelin, L. (2012). #' Spatially balanced sampling through the Pivotal method. #' Biometrics 68(2), 514-520. #' #' Lisic, J. J., & Cruze, N. B. (2016, June). #' Local pivotal methods for large surveys. #' In Proceedings of the Fifth International Conference on Establishment Surveys. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' sizes = c(10, 20, 30, 40); #' s = hlpm2(prob, x, sizes); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' } #' hlpm2 = function( prob, x, sizes, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); prob = .prob_check(prob, N); probsum = sum(prob); if (probsum %% 1 != 0) stop("'prob' must sum to an integer"); if (probsum != sum(sizes)) stop("'sizes' must sum to an integer same as the sum of 'prob'"); result = .hlpm2_cpp(prob, x, sizes, bucketSize, method, eps); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/hlpm2.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' The Local Cube method #' #' @description #' Selects doubly balanced samples with prescribed inclusion probabilities #' from a finite population using the Local Cube method. #' #' @details #' If \code{prob} sum to an integer n, and \code{prob} is included as the first #' balancing variable, a fixed sized sample (n) will be produced. #' #' ## Stratified lcube #' For `lcubestratified`, `prob` is automatically inserted as a balancing variable. #' #' The stratified version uses the fast flight Cube method and pooling of #' landing phases. #' #' @templateVar xbal Xbal #' @templateVar xspread Xspread #' @template sampling_template #' @template kdtrees_template #' @template x_template #' @template probs_template #' #' @param integerStrata An integer vector of length N with stratum numbers. #' #' @references #' Deville, J. C. and Tillé, Y. (2004). #' Efficient balanced sampling: the cube method. #' Biometrika, 91(4), 893-912. #' #' Chauvet, G. and Tillé, Y. (2006). #' A fast algorithm for balanced sampling. #' Computational Statistics, 21(1), 53-62. #' #' Chauvet, G. (2009). #' Stratified balanced sampling. #' Survey Methodology, 35, 115-119. #' #' Grafström, A. and Tillé, Y. (2013). #' Doubly balanced spatial sampling with spreading and restitution of auxiliary totals. #' Environmetrics, 24(2), 120-131 #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' xspr = matrix(runif(N * 2), ncol = 2); #' s = lcube(prob, xspr, cbind(prob, x)); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' xspr = matrix(runif(N * 2), ncol = 2); #' strata = c(rep(1L, 100), rep(2L, 200), rep(3L, 300), rep(4L, 400)); #' s = lcubestratified(prob, xspr, x, strata); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' prob = c(0.2, 0.25, 0.35, 0.4, 0.5, 0.5, 0.55, 0.65, 0.7, 0.9); #' N = length(prob); #' x = matrix(runif(N * 2), ncol = 2); #' xspr = matrix(runif(N * 2), ncol = 2); #' ep = rep(0L, N); #' r = 10000L; #' for (i in seq_len(r)) { #' s = lcube(prob, xspr, cbind(prob, x)); #' ep[s] = ep[s] + 1L; #' } #' print(ep / r); #' } #' lcube = function( prob, Xspread, Xbal, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(Xbal)) { Xbal = as.matrix(Xbal); } if (!is.matrix(Xspread)) { Xspread = t(as.matrix(Xspread)); } else { Xspread = t(Xspread); } N = dim(Xbal)[1L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); prob = .prob_check(prob, N); if (N != dim(Xspread)[2L]) stop("the size of 'Xbal' and 'Xspread' does not match"); result = .lcube_cpp(prob, Xbal, Xspread, bucketSize, method, eps); return(result); } #' Stratified Local Cube method #' #' @describeIn lcube #' lcubestratified = function( prob, Xspread, Xbal, integerStrata, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(Xbal)) { Xbal = as.matrix(Xbal); } if (!is.matrix(Xspread)) { Xspread = t(as.matrix(Xspread)); } else { Xspread = t(Xspread); } N = dim(Xbal)[1L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); prob = .prob_check(prob, N); strata = .strata_check(integerStrata, N); if (N != dim(Xspread)[2L]) stop("the size of 'Xbal' and 'Xspread' does not match"); result = .lcube_stratified_cpp(prob, Xbal, Xspread, strata, bucketSize, method, eps); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/lcube.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' The (Local) Pivotal Methods #' #' @description #' Selects spatially balanced samples with prescribed inclusion probabilities #' from a finite population using the Local Pivotal Method 1 (LPM1). #' #' @details #' If `prob` sum to an integer n, a fixed sized sample (n) will be produced. #' For `spm` and `rpm`, `prob` must be a vector of inclusion probabilities. #' If equal inclusion probabilities is wanted, this can be produced by #' `rep(n / N, N)`. #' #' The available pivotal methods are: #' - `lpm1`: The Local Pivotal Mehtod 1 (Grafström et al., 2012). #' Updates only units which are mutual nearest neighbours. #' Selects such a pair at random. #' - `lpm2`, `lpm`: The Local Pivotal Method 2 (Grafström et al., 2012). #' Selects a unit at random, which competes with this units nearest neighbour. #' - `lpm1s`: The Local Pivotal Method 1 search: (Prentius, 2023). #' Updates only units which are mutual nearest neighbours. #' Selects such a pair by branching the remaining units, giving higher #' probabilities to update a pair with a long branch. #' This changes the algorithm of lpm1, but makes it faster. #' - `spm`: The Sequential Pivotal Method. #' Selects the two units with smallest indices to compete against each other. #' If the list is ordered, the algorithm is similar to systematic sampling. #' - `rpm`: The Random Pivotal Method. #' Selects two units at random to compete against each other. #' Produces a design with high entropy. #' #' #' @templateVar xspread x #' @templateVar integerprob TRUE #' @template sampling_template #' @template kdtrees_template #' @template x_template #' @template probs_template #' #' @references #' Friedman, J. H., Bentley, J. L., & Finkel, R. A. (1977). #' An algorithm for finding best matches in logarithmic expected time. #' ACM Transactions on Mathematical Software (TOMS), 3(3), 209-226. #' #' Deville, J.-C., & Tillé, Y. (1998). #' Unequal probability sampling without replacement through a splitting method. #' Biometrika 85, 89-101. #' #' Maneewongvatana, S., & Mount, D. M. (1999, December). #' It’s okay to be skinny, if your friends are fat. #' In Center for geometric computing 4th annual workshop on computational geometry (Vol. 2, pp. 1-8). #' #' Chauvet, G. (2012). #' On a characterization of ordered pivotal sampling. #' Bernoulli, 18(4), 1320-1340. #' #' Grafström, A., Lundström, N.L.P. & Schelin, L. (2012). #' Spatially balanced sampling through the Pivotal method. #' Biometrics 68(2), 514-520. #' #' Lisic, J. J., & Cruze, N. B. (2016, June). #' Local pivotal methods for large surveys. #' In Proceedings of the Fifth International Conference on Establishment Surveys. #' #' Prentius, W. (2023) #' Manuscript. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' s = lpm2(prob, x); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' prob = c(0.2, 0.25, 0.35, 0.4, 0.5, 0.5, 0.55, 0.65, 0.7, 0.9); #' N = length(prob); #' x = matrix(runif(N * 2), ncol = 2); #' ep = rep(0L, N); #' r = 10000L; #' for (i in seq_len(r)) { #' s = lpm2(prob, x); #' ep[s] = ep[s] + 1L; #' } #' print(ep / r); #' #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' lpm1(prob, x); #' lpm2(prob, x); #' lpm1s(prob, x); #' spm(prob); #' rpm(prob); #' } lpm = function( prob, x, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { lpm2(prob, x, type, bucketSize, eps); } #' Local Pivotal Method 1 (LPM1) #' #' @describeIn lpm #' lpm1 = function( prob, x, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); if (.prob_integer_test(prob, N)) { result = .lpm_int_cpp(1, prob, x, bucketSize, method); } else { prob = .prob_check(prob, N); result = .lpm_cpp(1, prob, x, bucketSize, method, eps); } return(result); } #' Local Pivotal Method 2 (LPM2) #' #' @describeIn lpm #' lpm2 = function( prob, x, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); if (.prob_integer_test(prob, N)) { result = .lpm_int_cpp(2, prob, x, bucketSize, method); } else { prob = .prob_check(prob, N); result = .lpm_cpp(2, prob, x, bucketSize, method, eps); } return(result); } #' Local Pivotal Method 1 search (LPM1s) #' #' @describeIn lpm #' lpm1s = function( prob, x, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); if (.prob_integer_test(prob, N)) { result = .lpm_int_cpp(3, prob, x, bucketSize, method); } else { prob = .prob_check(prob, N); result = .lpm_cpp(3, prob, x, bucketSize, method, eps); } return(result); } #' Sequential Pivotal Method (SPM) #' #' @describeIn lpm #' spm = function( prob, eps = 1e-12 ) { if (length(prob) == 1) stop("'prob' must be a vector of probabilities"); prob = as.numeric(prob); .eps_check(eps); result = .spm_cpp(prob, eps); return(result); } #' Random Pivotal Method (RPM) #' #' @describeIn lpm #' rpm = function( prob, eps = 1e-12 ) { if (length(prob) == 1) stop("'prob' must be a vector of probabilities"); prob = as.numeric(prob); .eps_check(eps); result = .rpm_cpp(prob, eps); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/lpm.R
.pop_poisson = function(parents, meanChildren, distribution) { NP = nrow(parents); children = rpois(NP, meanChildren); ccs = cumsum(children); # Draw the population population = matrix(distribution(sum(children)), sum(children), ncol(parents), TRUE); # Center the children around their parents start = 1L; stop = 0L; for (i in seq_len(NP)) { stop = ccs[i]; population[start:stop, ] = sweep( population[start:stop, ], 2, parents[i, ], '+' ); start = stop + 1L; } return(population); } .pop_mirror = function(population, minv, maxv) { N = nrow(population); P = ncol(population); if (length(minv) == 1) minv = rep(minv, P) if (length(maxv) == 1) maxv = rep(maxv, P) diff = maxv - minv; temp = 0.0; flor = 0; for (i in seq_len(N)) { for (j in seq_len(P)) { if (population[i, j] >= minv[j] && population[i, j] <= maxv[j]) { next; } temp = population[i, j] - minv[j]; flor = floor(temp / diff[j]); if (flor %% 2 == 0) { population[i, j] = temp %% diff[j] + minv[j]; } else { population[i, j] = diff[j] - (temp %% diff[j]) + minv[j]; } } } return(population); } #' Generate populations #' #' @importFrom stats rnorm rpois runif #' @description #' Generate uniform and poisson cluster process populations #' #' If `from` and `to` is used with `genpopPoisson` together with `mirror`, the #' population will be bounded within these values. #' For the `genpopUniform`, these numbers represent the minimum and maximum #' values of the uniform distribution. #' #' @param size The size of the population #' @param dims The number of auxiliary variables #' @param from A number or a vector of size `dims` with the minimum values #' @param to A number or a vector of size `dims` with the maximum values #' #' @examples #' \dontrun{ #' set.seed(12345); #' x = genpopUniform(120, 2L); #' N = nrow(x); #' n = 60; #' prob = rep(n / N, N); #' s = lpm2(prob, x); #' b = sb(prob, x, s); #' } #' genpopUniform = function(size, dims = 2L, from = 0.0, to = 1.0) { population = matrix( runif(size * dims, from, to), size, dims, TRUE ); return(population); } #' @describeIn genpopUniform Poisson cluster process #' @param parents The number of parent locations #' @param children A number or a vector of size `parents` with the mean number of #' children to be spawned. #' @param distribution A function taking a number as a variable, returning the #' offset from the parent location. #' @param mirror If `TRUE`, the population is mirrored to be inside `from` and `to`. #' #' @examples #' \dontrun{ #' set.seed(12345); #' x = genpopPoisson(70, 50, 2L); #' N = nrow(x); #' n = 60; #' prob = rep(n / N, N); #' s = lpm2(prob, x); #' b = sb(prob, x, s); #' } #' genpopPoisson = function( parents, children, dims = 2L, from = 0.0, to = 1.0, distribution = function(n) rnorm(n, 0.0, 0.02), mirror = TRUE ) { parpop = genpopUniform(parents, dims, from, to); population = .pop_poisson(parpop, children, distribution); if (mirror == TRUE) { population = .pop_mirror(population, from, to); } return(population); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/populations.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' Spatial balance #' #' @family measure #' #' @description #' Calculates the spatial balance of a sample. #' #' @details #' About voronoi and sumofsquares #' #' @templateVar xspread x #' @templateVar integerprob TRUE #' @template kdtrees_template #' @template x_template #' @template probs_template #' #' @param sample A vector of sample indices. #' #' @return The balance measure of the provided sample. #' #' @references #' Friedman, J. H., Bentley, J. L., & Finkel, R. A. (1977). #' An algorithm for finding best matches in logarithmic expected time. #' ACM Transactions on Mathematical Software (TOMS), 3(3), 209-226. #' #' Maneewongvatana, S., & Mount, D. M. (1999, December). #' It’s okay to be skinny, if your friends are fat. #' In Center for geometric computing 4th annual workshop on computational geometry (Vol. 2, pp. 1-8). #' #' Stevens Jr, D. L., & Olsen, A. R. (2004). #' Spatially balanced sampling of natural resources. #' Journal of the American statistical Association, 99(465), 262-278. #' #' Grafström, A., Lundström, N.L.P. & Schelin, L. (2012). #' Spatially balanced sampling through the Pivotal method. #' Biometrics 68(2), 514-520. #' #' Prentius, W, & Grafström A. (2023). #' Manuscript. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 500; #' n = 70; #' prob = rep(n / N, N); #' x = matrix(runif(N * 2), ncol = 2); #' s = lpm2(prob, x); #' b = sb(prob, x, s); #' } sb = function( prob, x, sample, type = "kdtree2", bucketSize = 10 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); prob = .prob_expand(prob, N); if (N < length(sample)) stop("'sample' must be a vector of unique indices"); result = .sb_voronoi_cpp(prob, x, sample, bucketSize, method); return(result); } #' @family measure #' @describeIn sb Spatial balance using local balance sblb = function( prob, x, sample, type = "kdtree2", bucketSize = 10 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); prob = .prob_expand(prob, N); if (N < length(sample)) stop("'sample' must be a vector of unique indices"); result = .sb_localbalance_cpp(prob, x, sample, bucketSize, method); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/sb.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' Spatially Correlated Poisson Sampling #' #' @description #' Selects spatially balanced samples with prescribed inclusion probabilities #' from a finite population using Spatially Correlated Poisson Sampling (SCPS). #' #' @details #' If `prob` sum to an integer n, a fixed sized sample (n) will be produced. #' The implementation uses the maximal weight strategy, as specified in #' Grafström (2012). #' #' ## Coordinated SCPS #' If `rand` is supplied, coordinated SCPS will be performed. #' The algorithm for coordinated SCPS differs from the SCPS algorithm, as #' uncoordinated SCPS chooses a unit to update randomly, whereas coordinated SCPS #' traverses the units in the supplied order. #' This has a small impact on the efficiency of the algorithm for coordinated SCPS. #' #' ## Locally Correlated Poisson Sampling (LCPS) #' The method differs from SCPS as LPM1 differs from LPM2. In each step of the #' algorithm, the unit with the smallest updating distance is chosen as the #' deciding unit. #' #' @templateVar xspread x #' @templateVar integerprob TRUE #' @template sampling_template #' @template kdtrees_template #' @template x_template #' @template probs_template #' #' @param rand A vector of length N with random numbers. #' If this is supplied, the decision of each unit is taken with the corresponding #' random number. This makes it possible to coordinate the samples. #' #' @references #' Friedman, J. H., Bentley, J. L., & Finkel, R. A. (1977). #' An algorithm for finding best matches in logarithmic expected time. #' ACM Transactions on Mathematical Software (TOMS), 3(3), 209-226. #' #' Maneewongvatana, S., & Mount, D. M. (1999, December). #' It’s okay to be skinny, if your friends are fat. #' In Center for geometric computing 4th annual workshop on computational geometry (Vol. 2, pp. 1-8). #' #' Grafström, A. (2012). #' Spatially correlated Poisson sampling. #' Journal of Statistical Planning and Inference, 142(1), 139-147. #' #' Prentius, W. (2023). #' Locally correlated Poisson sampling. #' Environmetrics, e2832. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' s = scps(prob, x); #' plot(x[, 1], x[, 2]); #' points(x[s, 1], x[s, 2], pch = 19); #' #' set.seed(12345); #' prob = c(0.2, 0.25, 0.35, 0.4, 0.5, 0.5, 0.55, 0.65, 0.7, 0.9); #' N = length(prob); #' x = matrix(runif(N * 2), ncol = 2); #' ep = rep(0L, N); #' r = 10000L; #' for (i in seq_len(r)) { #' s = scps(prob, x); #' ep[s] = ep[s] + 1L; #' } #' print(ep / r); #' #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' scps(prob, x); #' lcps(prob, x); #' } #' scps = function( prob, x, rand = NULL, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); prob = .prob_expand(prob, N); if (!is.null(rand) && is.vector(rand)) { if (length(rand) != N) stop("the size of 'rand' and 'x' does not match"); result = .cps_random_cpp(prob, x, rand, bucketSize, method, eps); } else { result = .cps_cpp(2L, prob, x, bucketSize, method, eps); } return(result); } #' Locally Correlated Poisson Sampling #' #' @describeIn scps #' lcps = function( prob, x, type = "kdtree2", bucketSize = 50, eps = 1e-12 ) { if (!is.matrix(x)) { x = t(as.matrix(x)); } else { x = t(x); } N = dim(x)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); .eps_check(eps); prob = .prob_expand(prob, N); result = .cps_cpp(1L, prob, x, bucketSize, method, eps); return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/scps.R
.eps_check = function(eps) { if (eps < 0.0 || 1e-4 < eps || !is.numeric(eps) || length(eps) != 1) stop("'eps' must be in [0.0, 1e-4]"); } .kdtree_method_check = function(type, bucketSize) { if (type == "kdtree0") { method = 0; } else if (type == "kdtree1") { method = 1; } else if (type == "kdtree2") { method = 2; } else if (type == "notree") { method = 0; } else { stop("'type' must be 'kdtree0', 'kdtree1', 'kdtree2', or 'notree'"); } return(method); } .kdtree_bucket_check = function(N, type, bucketSize) { if (type == "notree") { bucketSize = N; } else { bucketSize = as.numeric(bucketSize)[1L]; } if (length(bucketSize) != 1 || bucketSize < 1 || bucketSize %% 1 != 0) stop("'bucketSize' must be integer > 0"); return(bucketSize); } .prob_check = function(prob, N) { if (length(prob) != N) stop("the size of 'prob' and 'x' does not match"); return(as.numeric(prob)); } .prob_integer_test = function(prob, N) { if (length(prob) == 1 && is.numeric(prob)) { if (prob < 1 || prob > N || prob %% 1 != 0) stop("'prob' must be a vector of probabilities or a single integer in [0, N]"); return(TRUE); } return(FALSE); } .prob_expand = function(prob, N) { if (.prob_integer_test(prob, N)) { prob = rep(prob / N, N); return(prob); } return(.prob_check(prob, N)); } .strata_check = function(strata, N) { if (length(strata) != N) stop("the size of 'strata' and 'x' does not match"); return(as.integer(strata)) }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/utils.R
# ********************************************** # Author: Wilmer Prentius # Licence: GPL (>=2) # ********************************************** #' Variance estimator for spatially balanced samples #' #' @family measure #' #' @description #' Variance estimator of HT estimator of population total. #' #' @details #' If `k = 0L`, the variance estimate is constructed by using all units that #' have the minimum distance. #' #' If `k > 0L`, the variance estimate is constructed by using the `k` closest #' units. If multiple units are located on the border, all are used. #' #' @template kdtrees_template #' #' @param probs A vector of length n with inclusion probabilities. #' @param ys A vector of length n containing the target variable. #' @param xs An n by p matrix of (standardized) auxiliary variables. #' @param k The number of neighbours to construct the means around. #' #' @return The variance estimate. #' #' @references #' Grafström, A., & Schelin, L. (2014). #' How to select representative samples. #' Scandinavian Journal of Statistics, 41(2), 277-290. #' #' @examples #' \dontrun{ #' set.seed(12345); #' N = 1000; #' n = 100; #' prob = rep(n/N, N); #' x = matrix(runif(N * 2), ncol = 2); #' y = runif(N); #' s = lpm2(prob, x); #' vsb(prob[s], y[s], x[s, ]); #' vsb(prob[s], y[s], x[s, ], 0L); #' } #' vsb = function( probs, ys, xs, k = 3L, type = "kdtree2", bucketSize = 40 ) { if (!is.matrix(xs)) { xs = t(as.matrix(xs)); } else { xs = t(xs); } N = dim(xs)[2L]; method = .kdtree_method_check(type, bucketSize); bucketSize = .kdtree_bucket_check(N, type, bucketSize); probs = .prob_check(probs, N); if (length(ys) != N) stop("the size of 'ys' and 'xs' does not match"); ys = as.numeric(ys); if (k == 0L) { result = .vsb0_cpp(probs, ys, xs, bucketSize, method); } else { result = .vsbn_cpp(probs, ys, xs, k, bucketSize, method); } return(result); }
/scratch/gouwar.j/cran-all/cranData/BalancedSampling/R/vsb.R
#' @docType package #' @author Xueqin Wang, Wenliang Pan, Heping Zhang, Hongtu Zhu, Yuan Tian, Weinan Xiao, Chengfeng Liu, Jin Zhu #' @references Wenliang Pan, Yuan Tian, Xueqin Wang, Heping Zhang. Ball Divergence: Nonparametric two sample test. Annals of Statistics. 46 (2018), no. 3, 1109--1137. doi:10.1214/17-AOS1579. https://projecteuclid.org/euclid.aos/1525313077 #' @references Wenliang Pan, Xueqin Wang, Weinan Xiao & Hongtu Zhu (2018) A Generic Sure Independence Screening Procedure, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1462709 #' @references Wenliang Pan, Xueqin Wang, Heping Zhang, Hongtu Zhu & Jin Zhu (2019) Ball Covariance: A Generic Measure of Dependence in Banach Space, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1543600 #' @references Jin Zhu, Wenliang Pan, Wei Zheng, and Xueqin Wang (2021). Ball: An R Package for Detecting Distribution Difference and Association in Metric Spaces, Journal of Statistical Software, Vol.97(6), doi: 10.18637/jss.v097.i06 #' @references Yue Hu, Haizhu Tan, Cai Li, Heping Zhang. (2021). Identifying genetic risk variants associated with brain volumetric phenotypes via K-sample Ball Divergence method. Genetic Epidemiology, 1–11. https://doi.org/10.1002/gepi.22423 #' @rdname Ball-package "_PACKAGE"
/scratch/gouwar.j/cran-all/cranData/Ball/R/Ball.R
#' @inheritParams bcov.test #' @rdname bcov #' @return #' \item{\code{bcor }}{ Ball Correlation statistic.} #' @export #' @examples #' ############# Ball Correlation ############# #' num <- 50 #' x <- 1:num #' y <- 1:num #' bcor(x, y) #' bcor(x, y, weight = "prob") #' bcor(x, y, weight = "chisq") bcor <- function(x, y, distance = FALSE, weight = FALSE) { weight <- examine_weight_arguments(weight) x <- as.matrix(x) y <- as.matrix(y) x_y_info <- examine_x_y(x, y) p <- x_y_info[2] # if(distance == FALSE) { if(p != 1) { x <- as.double(as.vector(dist(x))) y <- as.double(as.vector(dist(y))) dst_y <- as.integer(1) dst_x <- as.integer(1) } else { x <- as.double(x) y <- as.double(y) dst_y <- as.integer(0) dst_x <- as.integer(0) } } else { x <- x[lower.tri(x)] y <- y[lower.tri(y)] x <- as.double(x) y <- as.double(y) dst_y <- as.integer(1) dst_x <- as.integer(1) } bcor_stat <- as.double(numeric(3)) x_number <- as.integer(1) f_number <- as.integer(1) num <- as.integer(x_y_info[1]) p <- as.integer(1) k <- as.integer(1) nth <- as.integer(1) res <- .C("bcor_test", bcor_stat, y, x, x_number, f_number, num, p, k, dst_y, dst_x, nth) bcor_stat <- res[[1]] bcor_stat <- select_ball_stat(bcor_stat, weight, type = "bcor") return(bcor_stat) } #' @title Ball Correlation based Sure Independence Screening (BCor-SIS) #' @author Wenliang Pan, Weinan Xiao, Xueqin Wang, Hongtu Zhu, Jin Zhu #' @description Generic non-parametric sure independence screening (SIS) procedure based on Ball Correlation. #' Ball correlation is a generic measure of dependence in Banach spaces. #' @inheritParams bcov.test #' @param x a numeric matrix or data.frame included \eqn{n} rows and \eqn{p} columns. #' Each row is an observation vector and each column corresponding to a explanatory variable, generally \eqn{p >> n}. #' @param d the hard cutoff rule suggests selecting \eqn{d} variables. Setting \code{d = "large"} or #' \code{d = "small"} means \code{n - 1} or \code{floor(n/log(n))} #' variables are selected. If \code{d} is a integer, \code{d} variables are selected. Default: \code{d = "small"}. #' @param method specific method for the BCor-SIS procedure. It must be one of \code{"standard"}, #' \code{"lm"}, \code{"gam"}, \code{"interaction"}, or \code{"survival"}. #' Setting \code{method = "standard"} means performing standard SIS procedure #' while the options \code{"lm"} and \code{"gam"} mean carrying out iterative SIS procedure with ordinary #' linear regression and generalized additive models, respectively. #' The options \code{"interaction"} and \code{"survival"} are designed for detecting variables #' with potential linear interaction and associated with left censored responses, respectively. #' Any unambiguous substring can be given. Default: \code{method = "standard"}. #' @param distance if \code{distance = TRUE}, \code{y} will be considered as a distance matrix. #' Arguments only available when \code{method = "standard"} and \code{method = "interaction"}. Default: \code{distance = FALSE}. #' @param category a logical value or integer vector indicating columns to be selected as categorical variables. #' If \code{category} is an integer vector, the positive/negative integers select/discard the corresponding columns; #' If \code{category} is a logical value, \code{category = TRUE} select all columns, \code{category = FALSE} select none column. #' Default: \code{category = FALSE}. #' @param parms parameters list only available when \code{method = "lm"} or \code{"gam"}. #' It contains three parameters: \code{d1}, \code{d2}, and \code{df}. \code{d1} is the #' number of initially selected variables, \code{d2} is the number of variables added in each iteration. #' \code{df} is a degree freedom of basis in generalized additive models playing a role only when \code{method = "gam"}. #' Default: \code{parms = list(d1 = 5, d2 = 5, df = 3)}. #' #' @return #' \item{\code{ix }}{ the indices vector corresponding to variables selected by BCor-SIS.} #' \item{\code{method }}{ the method used.} #' \item{\code{weight }}{ the weight used.} #' \item{\code{complete.info }}{ a \code{list} mainly containing a \eqn{p \times 3} matrix, #' where each row is a variable and each column is a weight Ball Correlation statistic. #' If \code{method = "gam"} or \code{method = "lm"}, \code{complete.info} is an empty list.} #' #' @details #' \code{bcorsis} performs a model-free generic sure independence screening procedure, #' BCor-SIS, to pick out variables from \code{x} which are potentially associated with \code{y}. #' BCor-SIS relies on Ball correlation, a universal dependence measure in Banach spaces. #' Ball correlation (BCor) ranges from 0 to 1. A larger BCor implies they are likely to be associated while #' Bcor is equal to 0 implies they are unassociated. (See \code{\link{bcor}} for details.) #' Consequently, BCor-SIS pick out variables with larger Bcor values with \code{y}. #' #' Theory and numerical result indicate that BCor-SIS has following advantages: #' \itemize{ #' \item BCor-SIS can retain the efficient variables even when the dimensionality (i.e., \code{ncol(x)}) is #' an exponential order of the sample size (i.e., \code{exp(nrow(x))}); #' \item It is distribution-free and model-free; #' \item It is very robust; #' \item It is works well for complex data, such as shape and survival data; #' } #' #' If \code{x} is a matrix, the sample sizes of \code{x} and \code{y} must agree. #' If \code{x} is a \code{\link{list}} object, each element of this \code{list} must with the same sample size. #' \code{x} and \code{y} must not contain missing or infinite values. #' #' When \code{method = "survival"}, the matrix or data.frame pass to \code{y} must have exactly two columns, where the first column is #' event (failure) time while the second column is a dichotomous censored status. #' #' @note #' \code{bcorsis} simultaneously computing Ball Correlation statistics with #' \code{"constant"}, \code{"probability"}, and \code{"chisquare"} weights. #' Users can get other Ball Correlation statistics with different weight in the \code{complete.info} element of output. #' We give a quick example below to illustrate. #' #' @seealso #' \code{\link{bcor}} #' #' @references Wenliang Pan, Xueqin Wang, Weinan Xiao & Hongtu Zhu (2018) A Generic Sure Independence Screening Procedure, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1462709 #' #' @export #' @examples #' \dontrun{ #' #' ############### Quick Start for bcorsis function ############### #' set.seed(1) #' n <- 150 #' p <- 3000 #' x <- matrix(rnorm(n * p), nrow = n) #' eps <- rnorm(n) #' y <- 3 * x[, 1] + 5 * (x[, 3])^2 + eps #' res <- bcorsis(y = y, x = x) #' head(res[["ix"]]) #' head(res[["complete.info"]][["statistic"]]) #' #' ############### BCor-SIS: Censored Data Example ############### #' data("genlung") #' result <- bcorsis(x = genlung[["covariate"]], y = genlung[["survival"]], #' method = "survival") #' index <- result[["ix"]] #' top_gene <- colnames(genlung[["covariate"]])[index] #' head(top_gene, n = 1) #' #' #' ############### BCor-SIS: Interaction Pursuing ############### #' set.seed(1) #' n <- 150 #' p <- 3000 #' x <- matrix(rnorm(n * p), nrow = n) #' eps <- rnorm(n) #' y <- 3 * x[, 1] * x[, 5] * x[, 10] + eps #' res <- bcorsis(y = y, x = x, method = "interaction") #' head(res[["ix"]]) #' #' ############### BCor-SIS: Iterative Method ############### #' library(mvtnorm) #' set.seed(1) #' n <- 150 #' p <- 3000 #' sigma_mat <- matrix(0.5, nrow = p, ncol = p) #' diag(sigma_mat) <- 1 #' x <- rmvnorm(n = n, sigma = sigma_mat) #' eps <- rnorm(n) #' rm(sigma_mat); gc(reset = TRUE) #' y <- 3 * (x[, 1])^2 + 5 * (x[, 2])^2 + 5 * x[, 8] - 8 * x[, 16] + eps #' res <- bcorsis(y = y, x = x, method = "lm", d = 15) #' res <- bcorsis(y = y, x = x, method = "gam", d = 15) #' res[["ix"]] #' #' ############### Weighted BCor-SIS: Probability weight ############### #' set.seed(1) #' n <- 150 #' p <- 3000 #' x <- matrix(rnorm(n * p), nrow = n) #' eps <- rnorm(n) #' y <- 3 * x[, 1] + 5 * (x[, 3])^2 + eps #' res <- bcorsis(y = y, x = x, weight = "prob") #' head(res[["ix"]]) #' # Alternative, chisq weight: #' res <- bcorsis(y = y, x = x, weight = "chisq") #' head(res[["ix"]]) #' #' ############### BCor-SIS: GWAS data ############### #' set.seed(1) #' n <- 150 #' p <- 3000 #' x <- sapply(1:p, function(i) { #' sample(0:2, size = n, replace = TRUE) #' }) #' eps <- rnorm(n) #' y <- 6 * x[, 1] - 7 * x[, 2] + 5 * x[, 3] + eps #' res <- bcorsis(x = x, y = y, category = TRUE) #' head(res[["ix"]]) #' head(res[["complete.info"]][["statistic"]]) #' #' x <- cbind(matrix(rnorm(n * 2), ncol = 2), x) #' # remove the first two columns: #' res <- bcorsis(x = x, y = y, category = c(-1, -2)) #' head(res[["ix"]]) #' #' x <- cbind(x[, 3:5], matrix(rnorm(n * p), ncol = p)) #' res <- bcorsis(x = x, y = y, category = 1:3) #' head(res[["ix"]], n = 10) #' } bcorsis <- function(x, y, d = "small", weight = c("constant", "probability", "chisquare"), method = "standard", distance = FALSE, category = FALSE, parms = list(d1 = 5, d2 = 5, df = 3), num.threads = 0) { seed <- 1 y <- as.matrix(y) x <- as.matrix(x) n <- examine_x_y(x, y)[1] p <- dim(x)[2] y_p <- dim(y)[2] colnames(x) <- paste0("x", 1:p) colnames(y) <- paste0("y", 1:y_p) ids <- 1:p complete_info <- list() # check weight weight <- examine_weight_arguments(weight) # decide candicate size final_d <- examine_candiate_size(n, d, p) # check category category_index <- examine_category(category, p) if (length(category_index) != 0 && (method %in% c("lm", "gam", "survival", "interaction"))) { stop("Handling category variables is only available when method = \"standard\".") } # get arguments: d1 <- parms$d1 d2 <- parms$d2 df <- parms$df # examine method arguments method <- examine_method_arguments(method) # examine dst and method arguments examine_dst_method(dst = distance, method = method) if(method == "survival") { Xhavepickout <- bcorsis.surv(y = y, x = x, final_d = final_d, n = n, p = p, ids = ids) complete_info[[1]] <- Xhavepickout[[2]] Xhavepickout <- Xhavepickout[[1]] } if(method %in% c("standard", "pvalue", "interaction")) { if(method == "pvalue") { # examine_R_arguments(R) # seed <- examine_seed_arguments(seed = seed) # set.seed(seed = seed) stop("After version 1.2.0, 'pvalue' method is no longer supported.") } # data prepare for screening: if(distance == FALSE) { if(y_p != 1) { y <- as.vector(dist(y)) distance <- TRUE } } else { y <- y[lower.tri(y)] } # BCor-SIS: rcory_result <- apply_bcor_wrap(x = x, y = y, n = n, p = y_p, distance = distance, weight = weight, method = method, num.threads = num.threads, category = category_index) Xhavepickout <- get_screened_vars(ids, rcory_result[[2]], final_d) complete_info[[1]] <- rcory_result[[1]] # extra method for interaction: Xhavepickout2 <- c() if(method == "interaction") { rcory2_result <- apply_bcor_wrap(x = (x)^2, y = y, n = n, p = y_p, distance = distance, weight = weight, method = method, num.threads = num.threads, category = c()) Xhavepickout2 <- get_screened_vars(ids, rcory2_result[[2]], final_d) complete_info[[2]] <- rcory_result[[2]] } Xhavepickout <- unique(c(Xhavepickout, Xhavepickout2)) } if(method %in% c("gam", "lm")) { # data prepare for screening: y_copy <- preprocess_bcorsis_y(y, y_p) y_copy <- y_copy[[1]] distance <- y_copy[[2]] R <- 0 # Initial screening: rcory_result <- apply_bcor_wrap(x = x, y = y_copy, n = n, p = y_p, distance = distance, weight = weight, method = method, num.threads = num.threads, category = c()) # complete_info[[1]] <- rcory_result[[1]] # get d1 variables as initial variables set: Xhavepickout <- get_screened_vars(ids, rcory_result, d1) Xlastpickout <- Xhavepickout ids <- setdiff(ids, Xhavepickout) # Iterative: if (method == 'lm') { while(length(Xhavepickout) < final_d) { # lm fit for x Xnew <- stats::residuals(stats::lm(x[, ids] ~ x[, Xhavepickout])) # lm fit for y y <- stats::residuals(stats::lm(y ~ x[, Xlastpickout])) # BCor-screening y_copy <- preprocess_bcorsis_y(y, y_p)[[1]] rcory_result <- apply_bcor_wrap(x = Xnew, y = y_copy, n = n, p = y_p, distance = distance, weight = weight, method = method, num.threads = num.threads, category = c()) # get d2 variables for each iteration: Xlastpickout <- get_screened_vars(ids, rcory_result, d2) Xhavepickout <- c(Xhavepickout, Xlastpickout) ids <- setdiff(ids, Xlastpickout) } } if (method == 'gam') { while(length(Xhavepickout) < final_d) { # gam fit for x lastpickout_formula <- paste0(' + gam::s(',colnames(x)[Xlastpickout], collapse = paste0(", df = ", df, ")")) lastpickout_formula <- paste0(lastpickout_formula, paste0(", df = ", df, ")"), collapse = "") lastpickout_dat <- x[, Xlastpickout] Xnew <- sapply(ids, function(index){ formula_one <- paste0(colnames(x)[index], "~", lastpickout_formula) formula_one <- stats::as.formula(formula_one) dat <- as.data.frame(cbind(x[, index], lastpickout_dat)) colnames(dat)[1] <- colnames(x)[index] # colnames(dat) <- paste0("x",c(x,Xhavepickout)) suppressWarnings(residuals_value <- gam::gam(formula_one, data = dat)[["residuals"]]) residuals_value }) # gam fit for y dat <- data.frame("y" = y, lastpickout_dat) formula_Y <- as.formula(paste("y ~ ", lastpickout_formula)) suppressWarnings(y <- gam::gam(formula = formula_Y, data = dat)$residuals) # BCor-screening y_copy <- preprocess_bcorsis_y(y, y_p)[[1]] rcory_result <- apply_bcor_wrap(x = Xnew, y = y_copy, n = n, p = y_p, distance = distance, weight = weight, method = method, num.threads = num.threads, category = c()) # get d2 variables for each iteration: Xlastpickout <- get_screened_vars(ids, rcory_result, d2) Xhavepickout <- c(Xhavepickout, Xlastpickout) ids <- setdiff(ids, Xlastpickout) } } } # return: complete_info[[2]] <- n complete_info[[3]] <- p names(complete_info) <- c("statistic", "n", "p") list("ix" = Xhavepickout, "method" = method, "weight" = weight, "complete.info" = complete_info) } #' @title Ball Correlation Sure Independence Screening For Survival data #' @description Utilize extension of Ball Correlation in survival to select d variables related to survival status. #' @inheritParams bcorsis #' @param y a numeric matrix (first column should be event time, second column should be survival status) or Surv object #' @param standized allows the user to standardize the covariate #' @return the ids of selected variables #' @noRd #' bcorsis.surv <- function(y, x, final_d, n, p, ids, standized = TRUE){ # prepare for screening time <- y[, 1] delta <- y[, 2] ord.t <- sort(time) ix <- order(time) ord.delta <- delta[ix] x <- x[ix, ] if(standized) { x <- apply(x, 2, scale) } # BCor Screening(survival) fitc <- survival::survfit(survival::Surv(time, 1 - delta) ~ 1) Sc <- fitc[["surv"]] if(length(unique(ord.t)) != n) { rep_num <- as.data.frame(table(ord.t))[, "Freq"] Sc <- mapply(function(x, y) { rep(x, y) }, Sc, rep_num, SIMPLIFY = FALSE) Sc <- unlist(Sc) } t_rank <- rank(ord.t, ties.method = "max") - 1 rcory_result <- apply(x, 2, function(x){ bcor_surv(x = x, time_value = t_rank, delta = ord.delta, Sc = Sc, n = n) }) Xhavepickout <- get_screened_vars(ids, rcory_result, final_d) list(Xhavepickout, rcory_result) }
/scratch/gouwar.j/cran-all/cranData/Ball/R/bcorsis.R
#' @title Ball Covariance Test #' @author Wenliang Pan, Xueqin Wang, Heping Zhang, Hongtu Zhu, Jin Zhu #' @description Ball Covariance test of independence. #' Ball covariance are generic dependence measures in Banach spaces. #' #' @inheritParams bd.test #' @param x a numeric vector, matrix, data.frame, or a list containing at least two numeric vectors, matrices, or data.frames. #' @param y a numeric vector, matrix, or data.frame. #' @param num.permutations the number of permutation replications. When \code{num.permutations = 0}, the function just returns #' the Ball Covariance statistic. Default: \code{num.permutations = 99}. #' @param distance if \code{distance = TRUE}, the elements of \code{x} and \code{y} are considered as distance matrices. #' @param weight a logical or character string used to choose the weight form of Ball Covariance statistic.. #' If input is a character string, it must be one of \code{"constant"}, \code{"probability"}, or \code{"chisquare"}. #' Any unambiguous substring can be given. #' If input is a logical value, it is equivalent to \code{weight = "probability"} if \code{weight = TRUE} while #' equivalent to \code{weight = "constant"} if \code{weight = FALSE}. #' Default: \code{weight = FALSE}. #' #' @return If \code{num.permutations > 0}, \code{bcov.test} returns a \code{htest} class object containing the following components: #' \item{\code{statistic}}{Ball Covariance statistic.} #' \item{\code{p.value}}{the p-value for the test.} #' \item{\code{replicates}}{permutation replications of the test statistic.} #' \item{\code{size}}{sample size.} #' \item{\code{complete.info}}{a \code{list} mainly containing two vectors, the first vector is the Ball Covariance statistics #' with different weights, the second is the \eqn{p}-values of weighted Ball Covariance tests.} #' \item{\code{alternative}}{a character string describing the alternative hypothesis.} #' \item{\code{method}}{a character string indicating what type of test was performed.} #' \item{\code{data.name}}{description of data.} #' If \code{num.permutations = 0}, \code{bcov.test} returns a statistic value. #' #' @details #' \code{bcov.test} is non-parametric tests of independence in Banach spaces. #' It can detect the dependence between two random objects (variables) and #' the mutual dependence among at least three random objects (variables). #' #' If two samples are pass to arguments \code{x} and \code{y}, the sample sizes (i.e. number of rows or length of the vector) #' of the two variables must agree. If a \code{\link{list}} object is passed to \code{x}, this \code{list} must contain at least #' two numeric vectors, matrices, or data.frames, and each element of this \code{list} #' must with the same sample size. Moreover, data pass to \code{x} or \code{y} #' must not contain missing or infinite values. #' If \code{distance = TRUE}, \code{x} is considered as a distance matrix or a list containing distance matrices, #' and \code{y} is considered as a distance matrix; otherwise, these arguments are treated as data. #' #' \code{bcov.test} utilizes the Ball Covariance statistics (see \code{\link{bcov}}) to measure dependence and #' derives a \eqn{p}-value via replicating the random permutation \code{num.permutations} times. #' #' See Pan et al 2018 for theoretical properties of the test, including statistical consistency. #' #' @note Actually, \code{bcov.test} simultaneously computing Ball Covariance statistics with #' \code{"constant"}, \code{"probability"}, and \code{"chisquare"} weights. #' Users can get other Ball Covariance statistics with different weight and their corresponding \eqn{p}-values #' in the \code{complete.info} element of output. We give a quick example below to illustrate. #' #' @references Wenliang Pan, Xueqin Wang, Heping Zhang, Hongtu Zhu & Jin Zhu (2019) Ball Covariance: A Generic Measure of Dependence in Banach Space, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1543600 #' @references Jin Zhu, Wenliang Pan, Wei Zheng, and Xueqin Wang (2021). Ball: An R Package for Detecting Distribution Difference and Association in Metric Spaces, Journal of Statistical Software, Vol.97(6), doi: 10.18637/jss.v097.i06. #' #' @rdname bcov.test #' #' @useDynLib Ball, .registration = TRUE #' @export #' @seealso \code{\link{bcov}}, \code{\link{bcor}} #' @examples #' set.seed(1) #' #' ################# Quick Start ################# #' noise <- runif(50, min = -0.3, max = 0.3) #' x <- runif(50, 0, 4*pi) #' y <- cos(x) + noise #' # plot(x, y) #' res <- bcov.test(x, y) #' res #' ## get all Ball Covariance statistics: #' res[["complete.info"]][["statistic"]] #' ## get all test result: #' res[["complete.info"]][["p.value"]] #' #' ################# Quick Start ################# #' x <- matrix(runif(50 * 2, -pi, pi), nrow = 50, ncol = 2) #' noise <- runif(50, min = -0.1, max = 0.1) #' y <- sin(x[,1] + x[,2]) + noise #' bcov.test(x = x, y = y, weight = "prob") #' #' ################# Ball Covariance Test for Non-Hilbert Data ################# #' # load data: #' data("ArcticLake") #' # Distance matrix between y: #' Dy <- nhdist(ArcticLake[["x"]], method = "compositional") #' # Distance matrix between x: #' Dx <- dist(ArcticLake[["depth"]]) #' # hypothesis test with BCov: #' bcov.test(x = Dx, y = Dy, distance = TRUE) #' #' ################ Weighted Ball Covariance Test ################# #' data("ArcticLake") #' Dy <- nhdist(ArcticLake[["x"]], method = "compositional") #' Dx <- dist(ArcticLake[["depth"]]) #' # hypothesis test with weighted BCov: #' bcov.test(x = Dx, y = Dy, distance = TRUE, weight = "prob") #' #' ################# Mutual Independence Test ################# #' x <- rnorm(50) #' y <- (x > 0) * x + rnorm(50) #' z <- (x <= 0) * x + rnorm(50) #' data_list <- list(x, y, z) #' bcov.test(data_list) #' data_list <- lapply(data_list, function(x) { #' as.matrix(dist(x)) #' }) #' bcov.test(data_list, distance = TRUE) #' bcov.test(data_list, distance = FALSE, weight = "chi") #' #' ################# Mutual Independence Test for Meteorology data ################# #' data("meteorology") #' bcov.test(meteorology) #' #' ################ Testing via approximate limit distribution ################# #' \dontrun{ #' set.seed(1) #' n <- 2000 #' x <- rnorm(n) #' y <- rnorm(n) #' bcov.test(x, y, method = "limit") #' bcov.test(x, y) #' } bcov.test <- function(x, ...) UseMethod("bcov.test") #' @rdname bcov.test #' @export #' @method bcov.test default bcov.test.default <- function(x, y = NULL, num.permutations = 99, method = c("permutation", "limit"), distance = FALSE, weight = FALSE, seed = 1, num.threads = 0, ...) { method <- match.arg(method) data_name <- paste(deparse(substitute(x)), "and", deparse(substitute(y))) if (length(data_name) > 1) { data_name <- "" } type <- "bcov" # modify input information: if(class(x)[1] == "list") { data_name <- gsub(x = data_name, pattern = " and NULL", replacement = "") } weight <- examine_weight_arguments(weight) result <- bcov_test_internal_wrap(x = x, y = y, num.permutations = num.permutations, distance = distance, weight = weight, seed = seed, method = method, type = type, num.threads = num.threads) # return result of hypothesis test: if(num.permutations == 0) { return(result) } else { if (weight == "constant") { stat <- result[["statistic"]][1] pvalue <- result[["p.value"]][1] weight_name <- "constant" } else if (weight == "probability") { stat <- result[["statistic"]][2] pvalue <- result[["p.value"]][2] weight_name <- "probability" } else if (weight == "chisquare") { stat <- result[["statistic"]][3] pvalue <- result[["p.value"]][3] weight_name <- "chisquare" } data_name <- paste0(data_name,"\nnumber of observations = ", result[["info"]][["N"]]) if (method == "limit") { null_method <- "Limit Distribution" data_name <- paste0(data_name, "\nreplicates = ", 0) } else { null_method <- "Permutation" data_name <- paste0(data_name, "\nreplicates = ", num.permutations) } data_name <- paste0(data_name, ", weight: ", weight_name) test_method <- "Ball Covariance test of %sindependence (%s)" test_type <- ifelse(class(x)[1] == "list" && length(x) > 2, "mutual ", "") test_method <- sprintf(test_method, test_type, null_method) # if(type == "bcor") { # test_method <- gsub(pattern = "Covariance", replacement = "Correlation", x = test_method) # data_name <- gsub(pattern = "Covariance", replacement = "Correlation", x = data_name) # } alternative_message <- "random variables are dependent" e <- list( statistic = stat, p.value = pvalue, replicates = num.permutations, size = result[["info"]][["N"]], complete.info = result, alternative = alternative_message, method = test_method, data.name = data_name ) class(e) <- "htest" return(e) } } #' @rdname bcov.test #' #' @param formula a formula of the form \code{~ u + v}, where each of \code{u} and \code{v} are numeric variables giving the data values for one sample. The samples must be of the same length. #' @param data an optional matrix or data frame (or similar: see \code{model.frame}) containing the variables in the formula formula. By default the variables are taken from environment(formula). #' @param subset an optional vector specifying a subset of observations to be used. #' @param na.action a function which indicates what should happen when the data contain \code{NA}s. Defaults to \code{getOption("na.action")}. #' @param ... further arguments to be passed to or from methods. #' #' @export #' @method bcov.test formula #' @importFrom stats model.frame #' #' @examples #' #' ################ Formula interface ################ #' ## independence test: #' bcov.test(~ CONT + INTG, data = USJudgeRatings) #' ## independence test with chisquare weight: #' bcov.test(~ CONT + INTG, data = USJudgeRatings, weight = "chi") #' ## mutual independence test: #' bcov.test(~ CONT + INTG + DMNR, data = USJudgeRatings) bcov.test.formula <- function(formula, data, subset, na.action, ...) { if(missing(formula) || !inherits(formula, "formula") || length(formula) != 2L) stop("'formula' missing or invalid") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m[[1L]] <- quote(stats::model.frame) m$... <- NULL mf <- eval(m, environment(formula)) if(length(mf) < 2L) stop("invalid formula") DNAME <- paste(names(mf), collapse = " and ") dat <- list() dat[["x"]] <- as.list(mf) y <- do.call("bcov.test", c(dat, list(...))) remind_info <- strsplit(y$data.name, split = "number of observations")[[1]][2] DNAME <- paste0(DNAME, "\nnumber of observations") y$data.name <- paste0(DNAME, remind_info) y } #' Ball covariance test internal function #' @inheritParams bcov.test #' @param type #' #' @noRd bcov_test_internal <- function(x, y, num.permutations = 99, distance = FALSE, weight = FALSE, seed = 4, method = "permute", num.threads) { if (class(x)[1] == "dist" && class(y)[1] == "dist") { distance <- TRUE } if (distance) { if (class(x)[1] == "dist" || class(y)[1] == "dist") { if (class(x)[1] != "dist") { x <- as.dist(x) } if (class(y)[1] != "dist") { y <- as.dist(y) } num <- attr(x, "Size") x <- as.vector(x) y <- as.vector(y) } else { num <- nrow(x) x <- x[lower.tri(x)] y <- y[lower.tri(y)] } } else { x <- as.matrix(x) y <- as.matrix(y) num <- nrow(x) if ((ncol(x) != 1 || ncol(y) != 1) || (method == "limit")) { x <- as.vector(dist(x)) y <- as.vector(dist(y)) distance <- TRUE } else { x <- as.vector(x) y <- as.vector(y) } } examine_x_y_bcov(x, y) ## memory protect step: # memoryAvailable(num, funs = 'BI.test') ## examine test type: # type <- examine_type_arguments(type) ## examine num.permutations arguments: if(method == "limit") { num.permutations <- 0 } else { examine_R_arguments(num.permutations) } # if(num.permutations == 0) { result <- bcov_test_wrap_c(x = x, y = y, n = num, num.permutations = 0, distance = distance, num.threads = num.threads) if(method == "limit") { eigenvalue <- bcov_limit_wrap_c(x, y, num, distance, num.threads) result[["p.value"]] <- 1 - hbe(eigenvalue, num * result[["statistic"]][1]) return(result) } else { if (weight == WEIGHT_TYPE[1]) { return(result[[1]][1]) } else if (weight == WEIGHT_TYPE[2]) { return(result[[1]][2]) } else if (weight == WEIGHT_TYPE[3]) { return(result[[1]][3]) } } } else { set.seed(seed = examine_seed_arguments(seed)) result <- bcov_test_wrap_c(x = x, y = y, n = num, num.permutations = num.permutations, distance = distance, num.threads = num.threads) set.seed(NULL) return(result) } } #' A internal function for carry out independence test for multiple random variables #' @inheritParams bcov.test #' @inherit return #' @noRd kbcov_test_internal <- function(x, num.permutations = 99, distance = FALSE, weight = FALSE, seed = 1, method = 'permute', num.threads) { ############################################################ #################### R Version (1.1.0) ##################### ############################################################ # x <- lapply(x, as.matrix) # size_list <- sapply(x, nrow) # num <- unique(size_list) # if(length(num) > 1) { # stop("sample sizes of variables are not match!") # } # if(distance) { # # } else { # x <- lapply(x, dist, diag = TRUE, upper = TRUE) # x <- lapply(x, as.matrix) # } # var_num <- length(x) # # compute statistic: # stat_value <- kbcov_stat(x = x, num = num, var_num = var_num, # weight = weight, type = type) # if(num.permutations == 0) { # names(stat_value) # return(stat_value) # } else { # seed <- examine_seed_arguments(seed) # set.seed(seed) # # permutation procedure: # permuted_stat <- matrix(nrow = 3, ncol = num.permutations) # for (r in 1:num.permutations) { # x_copy <- x # for (v in 1:var_num) { # index <- sample(1:num, size = num, replace = FALSE) # x_copy[[v]] <- x[[v]][index, index] # } # permuted_stat[, r] <- kbcov_stat(x = x_copy, num = num, # var_num = var_num, # weight = weight, type = type) # } # permuted_stat <- t(permuted_stat) # # calculate pvalue: # pvalue <- sapply(1:3, function(i) { # calculatePvalue(stat_value[i], permuted_stat[, i]) # }) # names(pvalue) <- paste0(names(stat_value), ".pvalue") # # pvalue <- calculatePvalue(stat_value, permuted_stat) # } # return result: # list("statistic" = stat_value, # "p.value" = pvalue, # "info" = list("N" = num, "num.permutations" = num.permutations)) ############################################################ #################### C Version (1.2.0) ##################### ############################################################ var_num <- length(x) if ((!distance) && all(sapply(x, function(xx) { class(xx)[1] }) == "dist")) { distance <- TRUE } if(distance) { if (class(x[[1]])[1] != "dist") { if (nrow(x[[1]]) != ncol(x[[1]])) { stop("The elements of input list is not a distance matrix.") } } } else { x <- lapply(x, dist) } distance <- TRUE num <- ifelse(class(x[[1]])[1] == "dist", attr(x[[1]], "Size"), nrow(x[[1]])) if (class(x[[1]])[1] == "dist") { x <- lapply(x, as.vector) } else { x <- lapply(x, function(xx) { xx[lower.tri(xx)] }) } if (length(unique(sapply(x, length))) != 1) { stop("sample size of variables are not match!") } else { old_x <- x x <- unlist(x) } # if(method == "limit") { num.permutations <- 0 } else { examine_R_arguments(num.permutations) } # if(num.permutations == 0) { result <- kbcov_test_wrap_c(x = x, K = var_num, n = num, num.permutations = 0, distance = distance, num.threads = num.threads) if(method == "limit") { eigenvalue <- bcov_limit_wrap_c(old_x, NULL, num, distance, num.threads) result[["p.value"]] <- 1 - hbe(eigenvalue, num * result[["statistic"]]) return(result) } else { if (weight == WEIGHT_TYPE[1]) { return(result[[1]][1]) } else if (weight == WEIGHT_TYPE[2]) { return(result[[1]][2]) } else { return(result[[1]][3]) } } } else { set.seed(seed = examine_seed_arguments(seed)) result <- kbcov_test_wrap_c(x = x, K = var_num, n = num, num.permutations = num.permutations, distance = distance, num.threads = num.threads) set.seed(NULL) return(result) } } #' compute extension of BCov for independence of multiple random variables #' #' @param x list containing distance matrix, each element is distance matrix #' @param num sample size #' @param var_num random variables number #' @param weight whether used weight #' @param type Ball Correlation or Ball Covariance. now, only Ball Covariance is considered. #' #' @return Ball Covariance statistic #' @noRd kbcov_stat <- function(x, num, var_num, weight, type) { compare_list <- list() prop_in_ball_vec <- c() value_diff <- numeric(1) value_diff_prob <- numeric(1) value_diff_hhg <- numeric(1) hhg_ball_num <- numeric(1) # stat_value <- numeric(1) stat_value_prob <- numeric(1) stat_value_hhg <- numeric(1) # stat_value_name <- c("bcov", "bcov.prob", "bcov.hhg") # weight_name <- c("none", "prob", "hhg") # names(stat_value) <- stat_value_name[which(weight_name == weight)] # if(type == "bcor") { # names(stat_value) <- gsub(x = names(stat_value), # pattern = "bcov", replacement = "bcor") # } # compute extention of BCov: for (i in 1:num) { for (j in 1:num) { value_diff_hhg <- 0 value_diff_prob <- 0 value_diff <- 0 # compute ball statistic for ball with sample i as center and radius is d(x_{i}, x_{j}) # d(x_{i}, x_{j}) are stored in x[[v]][i, j], x = 1, ..., var_num all_in_ball_vec <- rep(1, num) for (v in 1:var_num) { compare_list[[v]] <- (x[[v]][, i] <= x[[v]][i, j]) all_in_ball_vec <- all_in_ball_vec * compare_list[[v]] } prop_in_ball_vec <- sapply(compare_list, mean) value_diff <- (mean(all_in_ball_vec) - prod(prop_in_ball_vec))^2 value_diff_prob <- value_diff / prod(prop_in_ball_vec) if (!any(prop_in_ball_vec %in% c(0, 1))) { value_diff_hhg <- value_diff / (prod(prop_in_ball_vec)*(prod(1 - prop_in_ball_vec))) hhg_ball_num <- hhg_ball_num + 1 } # aggrate statistic value: stat_value <- stat_value + value_diff stat_value_prob <- stat_value_prob + value_diff_prob stat_value_hhg <- stat_value_hhg + value_diff_hhg } } stat_value <- stat_value / (num)^2 stat_value_prob <- stat_value_prob / (num)^2 stat_value_hhg <- stat_value_hhg / (hhg_ball_num)^2 # c("bcov.constant" = stat_value, "bcov.probability" = stat_value_prob, "bcov.chisquare" = stat_value_hhg) } #' Wrap kbcov_test_internal and bcov_test_internal #' @inheritParams bcov.test #' @noRd bcov_test_internal_wrap <- function(x = x, y = y, num.permutations, distance, seed, weight, method, type, num.threads) { if(class(x)[1] == "list") { if (length(x) > 2) { result <- kbcov_test_internal(x = x, num.permutations = num.permutations, distance = distance, weight = weight, seed = seed, method = method, num.threads = num.threads) } else { y <- x[[2]] x <- x[[1]] result <- bcov_test_internal(x = x, y = y, num.permutations = num.permutations, distance = distance, weight = weight, seed = seed, method = method, num.threads = num.threads) } } else { result <- bcov_test_internal(x = x, y = y, num.permutations = num.permutations, distance = distance, weight = weight, seed = seed, method = method, num.threads = num.threads) } result } #' @title Ball Covariance and Correlation Statistics #' @description Computes Ball Covariance and Ball Correlation statistics, #' which are generic dependence measures in Banach spaces. #' @inheritParams bcov.test #' @rdname bcov #' #' @details #' The sample sizes of the two variables must agree, and samples must not contain missing and infinite values. #' If we set \code{distance = TRUE}, arguments \code{x}, \code{y} can be a \code{dist} object or a #' symmetric numeric matrix recording distance between samples; otherwise, these arguments are treated as data. #' #' \code{bcov} and \code{bcor} compute Ball Covariance and Ball Correlation statistics. #' #' Ball Covariance statistics is a generic dependence measure in Banach spaces. It enjoys the following properties: #' \itemize{ #' \item It is nonnegative and it is equal to zero if and only if variables are unassociated; #' \item It is highly robust; #' \item It is distribution-free and model-free; #' \item it is interesting that the HHG is a special case of Ball Covariance statistics. #' } #' Ball correlation statistics, a normalized version of Ball Covariance statistics, generalizes Pearson correlation in two fundamental ways: #' \itemize{ #' \item It is well-defined for random variables in arbitrary dimension in Banach spaces #' \item BCor is equal to zero implies random variables are unassociated. #' } #' #' The definitions of the Ball Covariance and Ball Correlation statistics between two random variables are as follows. #' Suppose, we are given pairs of independent observations #' \eqn{\{(x_1, y_1),...,(x_n,y_n)\}}, where \eqn{x_i} and \eqn{y_i} can be of any dimension #' and the dimensionality of \eqn{x_i} and \eqn{y_i} need not be the same. #' Then, we define sample version Ball Covariance as: #' \deqn{\mathbf{BCov}_{\omega, n}^{2}(X, Y)=\frac{1}{n^{2}}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{X,Y}-\Delta_{ij,n}^{X}\Delta_{ij,n}^{Y})^{2}} } #' where: #' \deqn{ \Delta_{ij,n}^{X,Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X} \delta_{ij,k}^{Y}}, #' \Delta_{ij,n}^{X}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X}}, #' \Delta_{ij,n}^{Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{Y}} } #' \deqn{\delta_{ij,k}^{X} = I(x_{k} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j}))), #' \delta_{ij,k}^{Y} = I(y_{k} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j})))} #' Among them, \eqn{\bar{B}(x_{i}, \rho(x_{i}, x_{j}))} is a closed ball #' with center \eqn{x_{i}} and radius \eqn{\rho(x_{i}, x_{j})}. #' Similarly, we can define \eqn{ \mathbf{BCov}_{\omega,n}^2(\mathbf{X},\mathbf{X}) } #' and \eqn{ \mathbf{BCov}_{\omega,n}^2(\mathbf{Y},\mathbf{Y}) }. #' We define Ball Correlation statistic as follows. #' \deqn{\mathbf{BCor}_{\omega,n}^2(\mathbf{X},\mathbf{Y})= #' \mathbf{BCov}_{\omega,n}^2(\mathbf{X},\mathbf{Y})/\sqrt{\mathbf{BCov}_{\omega,n}^2(\mathbf{X},\mathbf{X})\mathbf{BCov}_{\omega,n}^2(\mathbf{Y},\mathbf{Y})} #' } #' #' We can extend \eqn{\mathbf{BCov}_{\omega,n}} to measure the mutual independence between \eqn{K} random variables: #' \deqn{\frac{1}{n^{2}}\sum_{i,j=1}^{n}{\left[ (\Delta_{ij,n}^{X_{1}, ..., X_{K}}-\prod_{k=1}^{K}\Delta_{ij,n}^{X_{k}})^{2}\prod_{k=1}^{K}{\hat{\omega}_{k}(X_{ki},X_{kj})} \right]}} #' where \eqn{X_{k}(k=1,\ldots,K)} are random variables and \eqn{X_{ki}} is the \eqn{i}-th observations of \eqn{X_{k}}. #' #' See \code{\link{bcov.test}} for a test of independence based on the Ball Covariance statistic. #' #' @return #' \item{\code{bcov }}{ Ball Covariance statistic.} #' @seealso #' \code{\link{bcov.test}}, \code{\link{bcorsis}} #' @export #' #' @references Wenliang Pan, Xueqin Wang, Heping Zhang, Hongtu Zhu & Jin Zhu (2019) Ball Covariance: A Generic Measure of Dependence in Banach Space, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1543600 #' @references Wenliang Pan, Xueqin Wang, Weinan Xiao & Hongtu Zhu (2018) A Generic Sure Independence Screening Procedure, Journal of the American Statistical Association, DOI: 10.1080/01621459.2018.1462709 #' @references Jin Zhu, Wenliang Pan, Wei Zheng, and Xueqin Wang (2021). Ball: An R Package for Detecting Distribution Difference and Association in Metric Spaces, Journal of Statistical Software, Vol.97(6), doi: 10.18637/jss.v097.i06. #' #' @examples #' ############# Ball Covariance ############# #' num <- 50 #' x <- rnorm(num) #' y <- rnorm(num) #' bcov(x, y) #' bcov(x, y, weight = "prob") #' bcov(x, y, weight = "chisq") bcov <- function(x, y, distance = FALSE, weight = FALSE) { weight <- examine_weight_arguments(weight) res <- bcov_test_internal_wrap(x = x, y = y, num.permutations = 0, distance = distance, seed = 1, weight = weight, method = "permute", num.threads = 0) res }
/scratch/gouwar.j/cran-all/cranData/Ball/R/bcov.R
#' @title Ball Divergence based Equality of Distributions Test #' #' @description Performs the nonparametric two-sample or \eqn{K}-sample Ball Divergence test for #' equality of multivariate distributions #' #' @aliases bd.test #' #' @author Wenliang Pan, Yuan Tian, Xueqin Wang, Heping Zhang, Jin Zhu #' #' @param x a numeric vector, matrix, data.frame, or a list containing at least two numeric vectors, matrices, or data.frames. #' @param y a numeric vector, matrix, data.frame. #' @param num.permutations the number of permutation replications. When \code{num.permutations = 0}, the function just returns #' the Ball Divergence statistic. Default: \code{num.permutations = 99}. #' @param distance if \code{distance = TRUE}, the elements of \code{x} will be considered as a distance matrix. Default: \code{distance = FALSE}. #' @param size a vector recording sample size of each group. #' @param seed the random seed. Default \code{seed = 1}. #' @param num.threads number of threads. If \code{num.threads = 0}, then all of available cores will be used. Default \code{num.threads = 0}. #' @param kbd.type a character string specifying the \eqn{K}-sample Ball Divergence test statistic, #' must be one of \code{"sum"}, \code{"summax"}, or \code{"max"}. Any unambiguous substring can be given. #' Default \code{kbd.type = "sum"}. #' @param method if \code{method = "permutation"}, a permutation procedure is carried out to compute the \eqn{p}-value; #' if \code{ method = "limit"}, an approximate null distribution is used when \code{weight = "constant"}. #' Any unambiguous substring can be given. Default \code{method = "permutation"}. #' @param weight a character string specifying the weight form of Ball Divergence statistic. #' It must be one of \code{"constant"} or \code{"variance"}. #' Any unambiguous substring can be given. Default: \code{weight = "constant"}. #' @param ... further arguments to be passed to or from methods. #' ## @param weight not available now #' #' @return If \code{num.permutations > 0}, \code{bd.test} returns a \code{htest} class object containing the following components: #' \item{\code{statistic}}{Ball Divergence statistic.} #' \item{\code{p.value}}{the \eqn{p}-value for the test.} #' \item{\code{replicates}}{permutation replications of the test statistic.} #' \item{\code{size}}{sample sizes.} #' \item{\code{complete.info}}{a \code{list} mainly containing two vectors, the first vector is the Ball Divergence statistics #' with different aggregation strategy and weight, the second vector is the \eqn{p}-values of tests.} #' \item{\code{alternative}}{a character string describing the alternative hypothesis.} #' \item{\code{method}}{a character string indicating what type of test was performed.} #' \item{\code{data.name}}{description of data.} #' If \code{num.permutations = 0}, \code{bd.test} returns a statistic value. #' #' @rdname bd.test #' #' @details #' \code{bd.test} is nonparametric test for the two-sample or \eqn{K}-sample problem. #' It can detect distribution difference between \eqn{K(K \geq 2)} sample even though sample size are imbalanced. #' This test can cope well multivariate dataset or complex dataset. #' #' If only \code{x} is given, the statistic is #' computed from the original pooled samples, stacked in #' matrix where each row is a multivariate observation, or from the distance matrix #' when \code{distance = TRUE}. The first \code{sizes[1]} rows of \code{x} are the first sample, the next #' \code{sizes[2]} rows of \code{x} are the second sample, etc. #' If \code{x} is a \code{list}, its elements are taken as the samples to be compared, #' and hence, this \code{list} must contain at least two numeric data vectors, matrices or data.frames. #' #' \code{bd.test} utilizes the Ball Divergence statistics (see \code{\link{bd}}) to measure dispersion and #' derives a \eqn{p}-value via replicating the random permutation \code{num.permutations} times. #' The function simply returns the test statistic #' when \code{num.permutations = 0}. #' #' The time complexity of \code{bd.test} is around \eqn{O(R \times n^2)}, #' where \eqn{R} = \code{num.permutations} and \eqn{n} is sample size. #' #' @note Actually, \code{bd.test} simultaneously computing \code{"sum"}, \code{"summax"}, and \code{"max"} Ball Divergence statistics #' when \eqn{K \geq 3}. #' Users can get other Ball Divergence statistics and their corresponding \eqn{p}-values #' in the \code{complete.info} element of output. We give a quick example below to illustrate. #' #' @seealso #' \code{\link{bd}} #' #' @references Wenliang Pan, Yuan Tian, Xueqin Wang, Heping Zhang. Ball Divergence: Nonparametric two sample test. Annals of Statistics. 46 (2018), no. 3, 1109--1137. doi:10.1214/17-AOS1579. https://projecteuclid.org/euclid.aos/1525313077 #' @references Jin Zhu, Wenliang Pan, Wei Zheng, and Xueqin Wang (2021). Ball: An R Package for Detecting Distribution Difference and Association in Metric Spaces, Journal of Statistical Software, Vol.97(6), doi: 10.18637/jss.v097.i06. #' #' @export #' @examples #' ################# Quick Start ################# #' set.seed(1) #' x <- rnorm(50) #' y <- rnorm(50, mean = 1) #' # plot(density(x)) #' # lines(density(y), col = "red") #' bd.test(x = x, y = y) #' #' ################# Quick Start ################# #' x <- matrix(rnorm(100), nrow = 50, ncol = 2) #' y <- matrix(rnorm(100, mean = 3), nrow = 50, ncol = 2) #' # Hypothesis test with Standard Ball Divergence: #' bd.test(x = x, y = y) #' #' ################# Simlated Non-Hilbert data ################# #' data("bdvmf") #' \dontrun{ #' library(scatterplot3d) #' scatterplot3d(bdvmf[["x"]], color = bdvmf[["group"]], #' xlab = "X1", ylab = "X2", zlab = "X3") #' } #' # calculate geodesic distance between sample: #' Dmat <- nhdist(bdvmf[["x"]], method = "geodesic") #' # hypothesis test with BD : #' bd.test(x = Dmat, size = c(150, 150), num.permutations = 99, distance = TRUE) #' #' ################# Non-Hilbert Real Data ################# #' # load data: #' data("macaques") #' # number of femala and male Macaca fascicularis: #' table(macaques[["group"]]) #' # calculate Riemannian shape distance matrix: #' Dmat <- nhdist(macaques[["x"]], method = "riemann") #' # hypothesis test with BD: #' bd.test(x = Dmat, num.permutations = 99, size = c(9, 9), distance = TRUE) #' #' ################ K-sample Test ################# #' n <- 150 #' bd.test(rnorm(n), size = c(40, 50, 60)) #' # alternative input method: #' x <- lapply(c(40, 50, 60), rnorm) #' res <- bd.test(x) #' res #' ## get all Ball Divergence statistics: #' res[["complete.info"]][["statistic"]] #' ## get all test result: #' res[["complete.info"]][["p.value"]] #' #' ################ Testing via approximate limit distribution ################# #' \dontrun{ #' set.seed(1) #' n <- 1000 #' x <- rnorm(n) #' y <- rnorm(n) #' res <- bd.test(x, y, method = "limit") #' bd.test(x, y) #' } bd.test <- function(x, ...) UseMethod("bd.test") #' @rdname bd.test #' @export #' @method bd.test default bd.test.default <- function(x, y = NULL, num.permutations = 99, method = c("permutation", "limit"), distance = FALSE, size = NULL, seed = 1, num.threads = 0, kbd.type = c("sum", "maxsum", "max"), weight = c("constant", "variance"), ...) { weight <- match.arg(weight) data_name <- paste(deparse(substitute(x)), "and", deparse(substitute(y))) kbd.type <- match.arg(kbd.type) method <- match.arg(method) if (length(data_name) > 1) { data_name <- "" } if(is.null(x) || is.null(y)) { if(is.null(x) & is.null(y)) { stop("x and y are all null!") } # modify input information: data_name <- gsub(x = data_name, pattern = " and NULL", replacement = "") if (class(x)[1] == "dist") { distance <- TRUE } if(distance) { examine_size_arguments(size) if (length(size) >= 2) { if (class(x)[1] == "dist") { if (attr(x, "Size") != sum(size)) { stop("size arguments is error!") } xy <- as.vector(x) } else { if (nrow(x) != sum(size)) { stop("size arguments is error!") } xy <- x[lower.tri(x)] } } else if (length(size) < 2) { stop("size arguments is error!") } } else { if (is.list(x)) { x <- lapply(x, as.matrix) if (length(unique(sapply(x, ncol))) != 1) { stop("data with different dimension!") } size <- sapply(x, nrow) x <- do.call("rbind", x) p <- ncol(x) } else if (is.vector(x)) { p <- 1 } else { p <- ncol(x) if (p == 1) { x <- as.vector(x) } } if (p > 1) { xy <- dist(x) if (attr(xy, "Size") != sum(size)) { stop("size arguments is error!") } xy <- as.vector(xy) distance <- TRUE } else { xy <- x distance <- FALSE } } } else { x <- as.matrix(x) y <- as.matrix(y) p <- examine_dimension(x, y) # if(p > 1 || method == "limit") { xy <- get_vectorized_distance_matrix(x, y) distance <- TRUE size <- c(xy[[2]], xy[[3]]) xy <- xy[[1]] } else { xy <- c(x, y) distance <- FALSE size <- c(dim(x)[1], dim(y)[1]) } } ## memory protect step: # memoryAvailable(n = sum(size), funs = 'BD.test') ## examine num.permutations arguments: if(method == "limit") { num.permutations <- 0 } else { examine_R_arguments(num.permutations) } ## examine num.thread arguments: examine_threads_arguments(num.threads) ## main: if(num.permutations == 0) { result <- bd_test_wrap_c(xy, size, num.permutations = 0, weight, distance, num.threads) # approximately method: if(method == "limit") { if(result[["info"]][["K"]] == 2) { eigenvalue <- bd_limit_wrap_c(xy, size, distance, num.threads) result[["p.value"]] <- 1 - hbe(eigenvalue, prod(size) * result[["statistic"]] / sum(size)) } else { return(result[["statistic"]]) } } # return statistic when num.permutations = 0: else { if (result[["info"]][["K"]] == 2) { return_stat <- result[["statistic"]][1] } else { if (kbd.type == "sum") { return_stat <- result[["statistic"]][1] } else if (kbd.type == "max") { return_stat <- result[["statistic"]][2] } else if (kbd.type == "maxsum") { return_stat <- result[["statistic"]][3] } } return(return_stat) } } else { # permutation method: ## examine seed arguments: set.seed(examine_seed_arguments(seed)) ## hypothesis test: result <- bd_test_wrap_c(xy, size, num.permutations, weight, distance, num.threads) # pvalue <- calculatePvalue(result[["statistic"]], result[["permuted_stat"]]) set.seed(NULL) } # output information: if (result[["info"]][["K"]] == 2) { stat <- result[["statistic"]][1] pvalue <- result[["p.value"]][1] } else if (kbd.type == "sum") { stat <- result[["statistic"]][1] pvalue <- result[["p.value"]][1] stat_message <- "sum" } else if (kbd.type == "max") { stat <- result[["statistic"]][2] pvalue <- result[["p.value"]][2] stat_message <- "max" } else { stat <- result[["statistic"]][3] pvalue <- result[["p.value"]][3] stat_message <- "maxsum" } data_name <- paste(data_name, sprintf("\nnumber of observations = %s,", result[["info"]][["N"]])) data_name <- paste(data_name, "group sizes:", paste0(result[["info"]][["size"]], collapse = " ")) if (method == "limit") { null_method <- "Limit Distribution" data_name <- paste0(data_name, "\nreplicates = ", 0) } else { null_method <- "Permutation" data_name <- paste0(data_name, "\nreplicates = ", num.permutations) } data_name <- paste0(data_name, ", weight: ", weight) if (result[["info"]][["K"]] == 3) { data_name <- paste0(data_name, ", kbd.type: ", stat_message) } # data_name <- paste0(data_name, ", Weighted Ball Divergence = ", result[["info"]][["weight"]]) alternative_message <- "distributions of samples are distinct" # return: e <- list( statistic = stat, p.value = pvalue, replicates = num.permutations, size = result[["info"]][["size"]], complete.info = result[["info"]], alternative = alternative_message, method = sprintf("%s-sample Ball Divergence Test (%s)", result[["info"]][["K"]], null_method), data.name = data_name ) class(e) <- "htest" return(e) } #' @rdname bd.test #' #' @param formula a formula of the form \code{response ~ group} where \code{response} gives the data values and \code{group} a vector or factor of the corresponding groups. #' @param data an optional matrix or data frame (or similar: see \code{model.frame}) containing the variables in the formula \code{formula}. By default the variables are taken from \code{environment(formula)}. #' @param subset an optional vector specifying a subset of observations to be used. #' @param na.action a function which indicates what should happen when the data contain \code{NA}s. Defaults to \code{getOption("na.action")}. #' @method bd.test formula #' @export #' @examples #' #' ################ Formula interface ################ #' ## Two-sample test #' bd.test(extra ~ group, data = sleep) #' ## K-sample test #' bd.test(Sepal.Width ~ Species, data = iris) #' bd.test(Sepal.Width ~ Species, data = iris, kbd.type = "max") bd.test.formula <- function(formula, data, subset, na.action, ...) { if(missing(formula) || (length(formula) != 3L) || (length(attr(terms(formula[-2L]), "term.labels")) != 1L)) stop("'formula' missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) ## need stats:: for non-standard evaluation m[[1L]] <- quote(stats::model.frame) m$... <- NULL mf <- eval(m, parent.frame()) DNAME <- paste(names(mf), collapse = " by ") names(mf) <- NULL response <- attr(attr(mf, "terms"), "response") g <- factor(mf[[-response]]) if(nlevels(g) < 2L) stop("grouping factor must contain at least two levels") DATA <- list() DATA[["x"]] <- split(mf[[response]], g) y <- do.call("bd.test", c(DATA, list(...))) remind_info <- strsplit(y$data.name, split = "number of observations")[[1]][2] DNAME <- paste0(DNAME, "\nnumber of observations") y$data.name <- paste0(DNAME, remind_info) y } #' @title Ball Divergence statistic #' @description Compute Ball Divergence statistic, which is a generic dispersion measure in Banach spaces. #' @author Wenliang Pan, Yuan Tian, Xueqin Wang, Heping Zhang #' @inheritParams bd.test #' @rdname bd #' @return #' \item{\code{bd }}{ Ball Divergence statistic} #' #' @details #' Given the samples not containing missing values, \code{bd} returns Ball Divergence statistics. #' If we set \code{distance = TRUE}, arguments \code{x}, \code{y} can be a \code{dist} object or a #' symmetric numeric matrix recording distance between samples; #' otherwise, these arguments are treated as data. #' #' Ball divergence statistic measure the distribution difference of two datasets in Banach spaces. #' The Ball divergence statistic is proven to be zero if and only if two datasets are identical. #' #' The definition of the Ball Divergence statistics is as follows. #' Given two independent samples \eqn{ \{x_{1}, \ldots, x_{n}\} } with the associated probability measure \eqn{\mu} and #' \eqn{ \{y_{1}, \ldots, y_{m}\} } with \eqn{\nu}, where the observations in each sample are \emph{i.i.d}. #' Let \eqn{\delta(x,y,z)=I(z\in \bar{B}(x, \rho(x,y)))}, #' where \eqn{\delta(x,y,z)} indicates whether \eqn{z} is located in the closed ball \eqn{\bar{B}(x, \rho(x,y))} #' with center \eqn{x} and radius \eqn{\rho(x, y)}. #' We denote: #' \deqn{ #' A_{ij}^{X}=\frac{1}{n}\sum_{u=1}^{n}{\delta(X_i,X_j,X_u)}, \quad A_{ij}^{Y}=\frac{1}{m}\sum_{v=1}^{m}{\delta(X_i,X_j,Y_v)}, #' } #' \deqn{ #' C_{kl}^{X}=\frac{1}{n}\sum_{u=1}^{n}{\delta(Y_k,Y_l,X_u)}, \quad C_{kl}^{Y}=\frac{1}{m}\sum_{v=1}^{m}{\delta(Y_k,Y_l,Y_v)}. #' } #' \eqn{A_{ij}^X} represents the proportion of samples \eqn{ \{x_{1}, \ldots, x_{n}\} } located in the #' ball \eqn{\bar{B}(X_i,\rho(X_i,X_j))} and \eqn{A_{ij}^Y} represents the proportion of samples \eqn{ \{y_{1}, \ldots, y_{m}\} } #' located in the ball \eqn{\bar{B}(X_i,\rho(X_i,X_j))}. #' Meanwhile, \eqn{C_{kl}^X} and \eqn{C_{kl}^Y} represent the corresponding proportions located in the ball \eqn{\bar{B}(Y_k,\rho(Y_k,Y_l))}. #' The Ball Divergence statistic is defined as: #' \deqn{D_{n,m}=A_{n,m}+C_{n,m}} #' #' Ball Divergence can be generalized to the \emph{K}-sample test problem. Suppose we #' have \eqn{K} group samples, each group include \eqn{n_{k}} samples. #' The definition of \eqn{K}-sample Ball Divergence statistic could be #' to directly sum up the two-sample Ball Divergence statistics of all sample pairs (\code{kbd.type = "sum"}) #' \deqn{\sum_{1 \leq k < l \leq K}{D_{n_{k},n_{l}}},} #' or to find one sample with the largest difference to the others (\code{kbd.type = "maxsum"}) #' \deqn{\max_{t}{\sum_{s=1, s \neq t}^{K}{D_{n_{s}, n_{t}}},}} #' to aggregate the \eqn{K-1} most significant different two-sample Ball Divergence statistics (\code{kbd.type = "max"}) #' \deqn{\sum_{k=1}^{K-1}{D_{(k)}},} #' where \eqn{D_{(1)}, \ldots, D_{(K-1)}} are the largest \eqn{K-1} two-sample Ball Divergence statistics among #' \eqn{\{D_{n_s, n_t}| 1 \leq s < t \leq K\}}. When \eqn{K=2}, #' the three types of Ball Divergence statistics degenerate into two-sample Ball Divergence statistic. #' #' See \code{\link{bd.test}} for a test of distribution equality based on the Ball Divergence. #' #' @seealso #' \code{\link{bd.test}} #' @export #' #' @references Wenliang Pan, Yuan Tian, Xueqin Wang, Heping Zhang. Ball Divergence: Nonparametric two sample test. Ann. Statist. 46 (2018), no. 3, 1109--1137. doi:10.1214/17-AOS1579. https://projecteuclid.org/euclid.aos/1525313077 #' #' @examples #' ############# Ball Divergence ############# #' x <- rnorm(50) #' y <- rnorm(50) #' bd(x, y) bd <- function(x, y = NULL, distance = FALSE, size = NULL, num.threads = 1, kbd.type = c("sum", "maxsum", "max")) { res <- bd.test(x = x, y = y, distance = distance, size = size, num.permutations = 0, kbd.type = kbd.type) res }
/scratch/gouwar.j/cran-all/cranData/Ball/R/bd.R
#' @title Fast K-sample Ball Divergence Test for GWAS Data #' @inheritParams bd.test #' @param x a numeric vector, matrix, data.frame, dist object. #' @param snp a numeric matrix recording the values of single nucleotide polymorphism (SNP). Each column must be an integer vector. #' @param refine a logical value. If \code{refine = TRUE}, a \eqn{p}-values refining process is applied to #' the SNPs which passes the pre-screening process. Default: \code{refine = TRUE} (At present, \code{refine = FALSE} is not available). #' @param num.permutations the number of permutation replications. When \code{num.permutations = 0}, #' the function just returns the Ball Divergence statistic. Default: \code{num.permutations = 100 * ncol(snp)} #' @param screening.method if \code{screening.method = "spectrum"}, the spectrum method is applied to #' screening the candidate SNPs, or otherwise, the permutation method is applied. Default: \code{screening.method = "permute"}. #' @param screening.result A object return by \code{bd.gwas.test} that #' preserving the pre-screening result. #' It works only if the pre-screening is available. #' Default: \code{screening.result = NULL}. #' @param alpha the significance level. Default: \code{0.05 / ncol(snp)}. #' @param verbose Show computation status and estimated runtimes. Default: \code{verbose = FALSE}. #' #' @return bd.gwas.test returns a list containing the following components: #' \item{\code{statistic}}{ball divergence statistics vector.} #' \item{\code{permuted.statistic}}{a data.frame containing permuted ball divergence statistic for pre-screening SNPs. #' If \code{refine = FALSE}, it takes value \code{NULL}.} #' \item{\code{eigenvalue}}{the eigenvalue of spectrum decomposition. If \code{refine = TRUE}, it takes value \code{NULL}.} #' \item{\code{p.value}}{the p-values of ball divergence test.} #' \item{\code{refined.snp}}{the SNPs have been refined.} #' \item{\code{refined.p.value}}{the refined \eqn{p}-value of significant snp.} #' \item{\code{refined.permuted.statistic}}{a data.frame containing permuted ball divergence statistics for refining \eqn{p}-values.} #' \item{\code{screening.result}}{a list containing the result of screening.} #' #' @export #' #' @references Yue Hu, Haizhu Tan, Cai Li, and Heping Zhang. (2021). Identifying genetic risk variants associated with brain volumetric phenotypes via K-sample Ball Divergence method. Genetic Epidemiology, 1–11. https://doi.org/10.1002/gepi.22423 #' #' @author Jin Zhu #' #' @seealso #' \code{\link{bd}}, \code{\link{bd.test}} #' #' @examples #' \donttest{ #' library(Ball) #' set.seed(1234) #' num <- 200 #' snp_num <- 500 #' p <- 5 #' x <- matrix(rnorm(num * p), nrow = num) #' snp <- sapply(1:snp_num, function(i) { #' sample(0:2, size = num, replace = TRUE) #' }) #' snp1 <- sapply(1:snp_num, function(i) { #' sample(1:2, size = num, replace = TRUE) #' }) #' snp <- cbind(snp, snp1) #' res <- Ball::bd.gwas.test(x = x, snp = snp) #' mean(res[["p.value"]] < 0.05) #' mean(res[["p.value"]] < 0.005) #' #' ## only return the test statistics; #' res <- Ball::bd.gwas.test(x = x, snp = snp, num.permutation = 0) #' #' ## save pre-screening process results: #' x <- matrix(rnorm(num * p), nrow = num) #' snp <- sapply(1:snp_num, function(i) { #' sample(0:2, size = num, replace = TRUE, prob = c(1/2, 1/4, 1/4)) #' }) #' snp_screening <- Ball::bd.gwas.test(x = x, snp = snp, #' alpha = 5*10^-4, #' num.permutations = 19999) #' mean(res[["p.value"]] < 0.05) #' mean(res[["p.value"]] < 0.005) #' mean(res[["p.value"]] < 0.0005) #' ## refine p-value according to the pre-screening process result: #' res <- Ball::bd.gwas.test(x = x, snp = snp, alpha = 5*10^-4, #' num.permutations = 19999, #' screening.result = snp_screening[["screening.result"]]) #' } bd.gwas.test <- function(x, snp, screening.method = c("permute", "spectrum"), refine = TRUE, num.permutations, distance = FALSE, alpha, screening.result = NULL, verbose = TRUE, seed = 1, num.threads = 0, ...) { snp <- as.matrix(snp) num <- nrow(snp) snp_num <- ncol(snp) distance <- ifelse(class(x)[1] == "dist", TRUE, FALSE) if (distance) { if (class(x)[1] == "dist") { x <- as.vector(x) } else { x <- x[lower.tri(x)] } } else { x <- as.vector(stats::dist(x)) } if ((0.5 * num * (num - 1)) != length(x)) { stop("sample size of x and snp are not match!") } x <- stats::na.fail(x) snp <- stats::na.fail(snp) snp_class_num <- apply(snp, 2, function(x) { # dplyr::n_distinct(x) length(unique(x)) }) if (any(snp_class_num == 1)) { stop("there are some snps only contain 1 group!") } # unique_class_num <- dplyr::n_distinct(snp_class_num) unique_class_num <- length(unique(snp_class_num)) each_class_num <- as.vector(table(snp_class_num)) if (length(screening.method) > 1) { screening.method <- "permute" } else { screening.method <- match.arg(screening.method) } if (screening.method == "permute") { if (missing(num.permutations)) { num.permutations <- 100 * snp_num } r <- num.permutations } else { r <- 0 } if (r == 0) { verbose <- FALSE } eigenvalue <- NULL p_value <- NULL if (is.null(screening.result)) { set.seed(seed) statistic <- as.double(numeric(snp_num * 2)) permuted_statistic <- as.double(numeric(r * 2 * unique_class_num)) p_value <- as.double(numeric(snp_num * 2) + 1) x_index <- as.integer(numeric(num * num)) ties <- integer(1) x <- as.double(x) snp_vec <- as.integer(snp) num <- as.integer(num) snp_num <- as.integer(snp_num) unique_class_num <- as.integer(unique_class_num) each_class_num <- as.integer(each_class_num) r <- as.integer(r) nth <- as.integer(num.threads) verbose_out <- as.integer(verbose) screen_res <- .C( "bd_gwas_screening", statistic, permuted_statistic, p_value, x_index, ties, x, snp_vec, num, snp_num, unique_class_num, each_class_num, r, nth, verbose_out ) statistic <- screen_res[[1]][1:snp_num] permuted_statistic <- data.frame() x_index <- screen_res[[4]] ties <- screen_res[[5]] if (screening.method == "permute") { if (num.permutations > 0) { permuted_statistic <- data.frame(matrix(screen_res[[2]], nrow = r)) permuted_statistic <- permuted_statistic[, seq(1, 2 * unique_class_num, by = 2), drop = FALSE] colnames(permuted_statistic) <- paste0("g", sort(unique(snp_class_num))) p_value <- screen_res[[3]][1:snp_num] } } else { # TODO: save the eigenvalues of spectrum method eigenvalue <- NULL p_value <- NULL } screening_result <- list(statistic, permuted_statistic, p_value, x_index, ties) } else { statistic <- screening.result[["statistic"]] permuted_statistic <- screening.result[["permuted_statistic"]] p_value <- screening.result[["p_value"]] x_index <- screening.result[["x_index"]] ties <- screening.result[["ties"]] screening_result <- screening.result } significant_snp <- NULL significant_snp_p_value <- NULL refine_permuted_statistic <- data.frame() refine_snp_index <- NULL refine_p_value_vector <- NULL refine_permuted_statistic_matirx <- NULL if (refine) { if (!is.null(p_value)) { if (missing(alpha)) { alpha <- 0.05 / snp_num } refine_snp_index <- which(p_value < alpha) refine_snp_num <- length(refine_snp_index) if (refine_snp_num == 0) { if (verbose) { print("None of SNP pass the pre-screening process!") } } else { set.seed(seed) refine_permuted_statistic_matirx <- matrix(nrow = r, ncol = refine_snp_num) refine_p_value_vector <- numeric(refine_snp_num) x_index <- as.integer(x_index) ties <- as.integer(ties) x <- as.double(x) num <- as.integer(num) refine_snp_num <- as.integer(refine_snp_num) r <- as.integer(r) nth <- as.integer(num.threads) verbose_out <- as.integer(verbose) for (i in 1:length(refine_snp_index)) { refine_snp_statistic <- as.double(c(statistic[refine_snp_index[i]], statistic[refine_snp_index[i]])) refine_permuted_statistic <- as.double(numeric(r * 2)) refine_p_value <- as.double(numeric(2)) refine_snp_size_vec <- as.integer(table(snp[, refine_snp_index[i]])) refine_i_th <- as.integer(i) refine_k_num <- as.integer(snp_class_num[refine_snp_index[i]]) refine_res <- .C( "bd_gwas_refining_single", refine_snp_statistic, refine_permuted_statistic, refine_p_value, x_index, ties, x, num, refine_snp_size_vec, refine_i_th, refine_k_num, refine_snp_num, r, nth, verbose_out ) refine_permuted_statistic_matirx[, i] <- refine_res[[2]][seq(1, 2 * r, by = 2)] refine_p_value_vector[i] <- refine_res[[3]][1] } colnames(refine_permuted_statistic_matirx) <- paste0("SNP", refine_snp_index) p_value[refine_snp_index] <- refine_p_value_vector if (any(refine_p_value_vector < alpha)) { significant_snp <- which(p_value < alpha) significant_snp_p_value <- p_value[p_value < alpha] } } } } return( list( "statistic" = statistic, "permuted.statistic" = permuted_statistic, "eigenvalue" = eigenvalue, "p.value" = p_value, "refined.snp" = refine_snp_index, "refined.p.value" = refine_p_value_vector, "refined.permuted.statistic" = refine_permuted_statistic_matirx, "screening.result" = screening_result ) ) }
/scratch/gouwar.j/cran-all/cranData/Ball/R/bd.gaws.R
# Various imports #' @importFrom stats pgamma #' @importFrom stats dist #' @importFrom stats runif #' @importFrom stats rnorm #' @importFrom stats as.formula #' @importFrom stats rnorm #' @importFrom stats model.frame #' @importFrom stats setNames #' @importFrom stats terms #' @importFrom stats as.dist #' @importFrom stats lm #' @importFrom stats residuals #' @importFrom utils data #' @importFrom utils head #' @importFrom utils memory.limit #' @importFrom survival survfit #' @importFrom survival Surv #' @importFrom gam gam #' @importFrom gam s #' @importFrom mvtnorm rmvnorm # @importFrom dplyr n_distinct #' @importFrom stats na.fail NULL #' @title meteorological data #' @name meteorology #' @docType data #' @description #' A meteorological data include 46 records about air, soil, humidity, wind and evaporation. #' #' @details #' This meteorological data containing 46 observations on five groups of variables: air temperature, soil temperature, #' relative humidity, wind speed as well as evaporation. #' Among them, maximum, minimum and average value for air temperature, soil temperature, and #' relative humidity are recorded. As regards to wind speed and evaporation, #' there are univariate numerical variables. We desire to test the independence of these five #' groups of variables. #' #' @format #' \itemize{ #' \code{meteorology$air}: A data.frame containing 3 variables: maximum, minimum and average daily air temperature #' #' \code{meteorology$soil}: A data.frame containing 3 covariates: maximum, minimum and average daily soil temperature #' #' \code{meteorology$humidity}: A data.frame containing 3 covariates: maximum, minimum and average daily humidity temperature, #' #' \code{meteorology$wind}: a vector object record total wind, measured in miles per day #' #' \code{meteorology$evaporation}: a vector object record evaporation #' } #' NULL #' @title Lung cancer genomic data #' @name genlung #' @docType data #' @description Publicly available lung cancer genomic data from the Chemores Cohort Study, containing #' the expression levels of mRNA, miRNA, artificial noise variables as well as clinical variables and response. #' #' @details #' Tissue samples were analysed from a cohort of 123 patients, who underwent complete surgical resection at the Institut Mutualiste #' Montsouris (Paris, France) between 30 January 2002 and 26 June 2006. The studied outcome was the "Disease-Free Survival Time". #' Patients were followed until the first relapse occurred or administrative censoring. In this genomic dataset, #' the expression levels of Agilent miRNA probes (\eqn{p=939}) were included from the \eqn{n=123} cohort samples. #' The miRNA data contains normalized expression levels. See below the paper by Lazar et al. (2013) and Array Express #' data repository for the complete description of the samples, tissue preparation, Agilent array technology, and data normalization. #' In addition to the genomic data, five clinical variables, also evaluated on the cohort samples, are included as #' continuous variable ('Age') and nominal variables ('Type','KRAS.status','EGFR.status','P53.status'). #' See Lazar et al. (2013) for more details. Moreover, we add 1056 standard gaussian variables #' which are independent with the censored response as noise covariates. This dataset represents a situation where the number of #' covariates dominates the number of complete observations or \eqn{p >> n} case. #' #' @format #' \itemize{ #' \code{genlung$survival}: A data.frame containing \eqn{n=123} complete observations. #' The first column is disease-free survival time and the #' second column is censoring status. #' #' \code{genlung$covariate}: A data.frame containing \eqn{p=2000} covariates. #' } #' #' @references Lazar V. et al. (2013). Integrated molecular portrait of non-small cell lung cancers. BMC Medical Genomics 6:53-65. NULL #' @title Arctic lake sediment samples of different water depth #' @name ArcticLake #' @docType data #' @description Sand, silt and clay compositions of 39 sediment samples of different water #' depth in an Arctic lake. #' #' @details Sand, silt and clay compositions of 39 sediment samples at different water #' depth (in meters) in an Arctic lake. The additional feature is a concomitant variable or #' covariate, water depth, which may account for some of the variation in the compositions. #' In statistical terminology, we have a multivariate regression problem with sediment #' composition as predictors and water depth as a response. All row percentage sums to 100, #' except for rounding errors. #' #' @format #' \itemize{ #' \code{ArcticLake$depth}: water depth (in meters). #' #' \code{ArcticLake$x}: compositions of three covariates: sand, silt, and clay. #' } #' #' @references Aitchison: The Statistical Analysis of Compositional Data, 1986, Data 5, pp5. #' @source Aitchison: CODA microcomputer statistical package, 1986, the file name ARCTIC.DAT, here included under the GNU Public Library Licence Version 2 or newer. #' @note Courtesy of J. Aitchison NULL #' @title Male and Female macaque data #' @name macaques #' @docType data #' @description Male and female macaque skull data. 7 landmarks in 3 dimensions, #' 18 individuals (9 males, 9 females) #' #' @details In an investigation into sex differences in the crania of a species of #' Macaca fascicularis (a type of monkey), random samples of 9 male and 9 female #' skulls were obtained by Paul O’Higgins (Hull-York Medical School) (Dryden and Mardia 1993). #' A subset of seven anatomical landmarks was located on each cranium and the three-dimensional (3D) #' coordinates of each point were recorded. #' #' @format #' \itemize{ #' \code{macaques$x}: An array of dimension \eqn{7 \times 3 \times 18} #' #' \code{macaques$group}: A factor indicating the sex ('m' for male and 'f' for female) #' } #' #' @note Dryden, I.L. and Mardia, K.V. (1998). Statistical Shape Analysis, Wiley, Chichester. #' @references Dryden, I. L. and Mardia, K. V. (1993). Multivariate shape analysis. Sankhya Series A, 55, 460-480. NULL #' @title Simulated von Mises-Fisher Data #' @name bdvmf #' @docType data #' #' @description Simulated random vectors following the von Mises-Fisher distribution #' with mean direction \eqn{\mu_{x}=(1, 0, 0)} and \eqn{\mu_{y}=(1, 1, 1)}, #' and concentration parameter is \eqn{\kappa = 3}. #' #' @details In directional statistics, the von Mises–Fisher distribution #' (named after Ronald Fisher and Richard von Mises), is a probability distribution #' on the \eqn{(p-1)}-dimensional sphere in \eqn{R^{p}} #' #' The parameters \eqn{\mu}, and \eqn{\kappa}, are called the mean direction and concentration #' parameter, respectively. The greater the value of \eqn{\kappa}, #' the higher the concentration of the distribution around the mean #' direction \eqn{\mu},. The distribution is unimodal for \eqn{\kappa}, #' and is uniform on the sphere for \eqn{\kappa=0}. #' #' @format #' \itemize{ #' \code{bdvmf$x}: A \eqn{300 \times 3} numeric matrix containing simulated von Mises-Fisher data. #' #' \code{bdvmf$group}: A group index vector. #' } #' #' @references Embleton, N. I. Fisher, T. Lewis, B. J. J. (1993). Statistical analysis of spherical data (1st pbk. ed.). Cambridge: Cambridge University Press. pp. 115–116. ISBN 0-521-45699-1. #' NULL #' @title Distribution of BD when Null hypothesis is correct. #' @name BDTestNullDistribution #' @description Distribution of BD when Null hypothesis is correct. #' @noRd NULL #' @title Distribution of BCor when Null hypothesis is correct. #' @name BITestNullDistribution #' @description Distribution of BCor when Null hypothesis is correct. #' @noRd NULL #' @title Generate example demostarted data in vignettes #' @param n The desired sample size. #' @param type This argument is used to select a specific dataset. #' @description Three examples (bd-type1error, bd-unvariate, bd-multivariate) generate data suitable for demonstrating the 2-sample test for unvariate or multivariate variables. #' The remaining examples (bcov-w, bcor-multivariate) are simple univariate and multivariate dependence structures used to demonstrate the tests of independece. #' #' @details Choose a example type and set the size of the example. #' \code{ball.example} will return an example list to demonstate #' two-sample test or test of independece. Given data in examples(bd-type1error, bd-unvariate, bd-multivariate), we would like to test whether the two variables are identically distributed using \code{bd.test}.For data in examples(bcov-w, bcor-multivariate), we would like to test whether the two variables are statistically independent using \code{bcov.test}. Noted that they are both dependent. #' #' @return A list contains two elements \code{x} and \code{y} is returned. #' @noRd ball.example <- function(n = 50, type) { set.seed(1) error <- runif(n, min = -0.3, max = 0.3) if(type %in% c('bd-type1error', 'bcov-type1error', 'bcor-type1error')) { x <- runif(n) y <- runif(n) } if(type %in% c("bcov-unvariate", "bcov-unvariate")) { x <- runif(n, 0, 4*pi) y <- cos(x) + error } if(type %in% c("bcor-multivariate", "bcov-multivariate")) { p <- 2 x <- matrix(runif(n, -pi, pi), nrow = n, ncol = p) y <- (sin((x[,1])^2 + x[,2])) + error } if(type %in% c("bd-unvariate")) { x <- rnorm(n/2) y <- rnorm(n/2, mean = 1) } if(type %in% c("bd-multivariate")) { p <- 2 x <- matrix(rnorm(n/2*p), nrow = n/2, ncol = p) y <- matrix(rnorm(n/2*p, mean = 3), nrow = n/2, ncol = p) } # if(type %in% c("bd-vmf")) { # data("bdvmf", package = 'Ball', envir = environment(), verbose = FALSE) # return(bdvmf) # } # if(type %in% c("bd-macaques")) { # data("macaques", package = 'Ball', envir = environment(), verbose = FALSE) # return(macaques) # } # if(type %in% c("bcov-arcticlake")) { # data("ArcticLake", package = 'Ball', envir = environment(), verbose = FALSE) # return(ArcticLake) # } list('y' = y, 'x' = x) }
/scratch/gouwar.j/cran-all/cranData/Ball/R/data.R
#' @title Distance Matrix Computation for Non-Hilbert Data #' #' @description This function computes and returns the numeric distance matrix #' computed by using the specified distance measure to compute #' the distances between the rows of a data matrix. #' #' @param x a numeric matrix, data frame or numeric array of dimension \eqn{k \times m \times n} #' containing \eqn{n} samples in \eqn{k \times m} dimension. #' @param method the distance measure to be used. This must be one of \code{"geodesic"}, \code{"compositional"}, or \code{"riemann"}. #' Any unambiguous substring can be given. #' #' @details Available distance measures are geodesic, compositional and riemann. #' Denoting any two sample in the dataset as \eqn{x} and \eqn{y}, #' we give the definition of distance measures as follows. #' #' geodesic: #' #' The shortest route between two points on the Earth's surface, namely, a segment of a great circle. #' \deqn{\arccos(x^{T}y), \|x\|_{2} = \|y\|_{2} = 1} #' #' compositional: #' #' First, we apply scale transformation to it, i.e., \eqn{(x_{i1}/t, ..., x_{ip}/t_{i}), t_{i} = \sum_{d=1}^{p}{x_{d}}} #' . Then, apply the square root transformation to data and calculate the geodesic distance between samples. #' #' riemann: #' #' \eqn{k \times m \times n} array where \eqn{k} = number of landmarks, \eqn{m} = number of dimensions and \eqn{n} = sample size. Detail about #' riemannian shape distance was given in Kendall, D. G. (1984). #' #' @return \eqn{n \times n} numeric distance matrix #' @references Kendall, D. G. (1984). Shape manifolds, Procrustean metrics and complex projective spaces, Bulletin of the London Mathematical Society, 16, 81-121. #' @export #' @examples #' data('bdvmf') #' Dmat <- nhdist(bdvmf[['x']], method = "geodesic") #' #' data("ArcticLake") #' Dmat <- nhdist(ArcticLake[['x']], method = "compositional") #' #' data("macaques") #' Dmat <- nhdist(macaques[["x"]], method = "riemann") #' #' # unambiguous substring also available: #' Dmat <- nhdist(macaques[["x"]], method = "rie") #' nhdist <- function(x, method = 'geodesic') { METHODS <- c("geodesic", "compositional", "riemann", "bhattacharyya", "angular") methodIndex <- pmatch(method, METHODS) if (is.na(method)) stop("invalid distance method") if (method == -1) stop("ambiguous distance method") method <- METHODS[methodIndex] # if(method %in% c('geodesic')) { return(distsurface(x)) } else if(method %in% c('compositional')) { return(dist_bhattacharyya(x)) } else if(method == "bhattacharyya") { return(dist_bhattacharyya(x)) } else if(method == "angular") { return(dist_angular(x)) } else if(method %in% c('riemann')) { return(distrieman(x)) } } #' Distance for compositional data (Bhattacharyya distance) #' #' @param x Matrix object. #' #' @return Distance matrix #' @noRd #' @examples #' data("ArcticLake") #' head(ArcticLake) #' distcompositional(ArcticLake[, 1:3]) dist_bhattacharyya <- function(x) { xRowSum <- rowSums(x) x <- apply(x, 2, function(z) { z/xRowSum }) x <- sqrt(x) # distsurface(x) } #' Distance for compositional data (Angular distance) #' #' @param x Matrix object. #' #' @return Distance matrix #' @noRd #' @examples #' data("ArcticLake") #' head(ArcticLake) #' distcompositional(ArcticLake[, 1:3]) dist_angular <- function(x) { x <- x^2 xRowSum <- rowSums(x) x <- apply(x, 2, function(z) { z/xRowSum }) x <- sqrt(x) # distsurface(x) } #' @title Geodesic Distance in Unit Ball #' #' @param x Matrix object. #' #' @return Distance matrix #' @noRd #' @examples #' data("bdvmf") #' Dmat <- distsurface(bdvmf[['x']]) #' distsurface <- function(x) { x <- as.matrix(x) Dmat <- x %*% t(x) diag(Dmat) <- 1 suppressWarnings(Dmat <- acos(Dmat)) Dmat[is.na(Dmat)] <- 1 Dmat <- (Dmat + t(Dmat)) / 2 Dmat <- round(Dmat, digits = 13) Dmat } #' @title Riemannian shape distance #' #' @param x \eqn{ k \times m \times n } array. #' @description Calculates the Riemannian shape distance rho between two configurations #' @return Distance matrix #' @noRd #' @examples #' data("macaques") #' Dmat <- distrieman(macaques[["x"]]) #' distrieman <- function(x) { n <- dim(x)[3] dist_mat <- matrix(0, nrow = n, ncol = n) for (i in 1:n) { for (j in i:n) { if (i != j) { dist_mat[i, j] <- riemdist(x[, , i], x[, , j]) if (dist_mat[i, j] < 10 * .Machine$double.eps) { dist_mat[i, j] <- 0 } dist_mat[j, i] <- dist_mat[i, j] } } } dist_mat # sapply(1:n, function(i) { # sapply(1:n, function(j) { # riemdist(x[,,i], x[,,j]) # }) # }) } realtocomplex<-function(x) { #input k × 2 matrix - return complex k-vector k <- nrow(x) zstar <- x[, 1] + (1i) * x[, 2] zstar } defh<-function(nrow) { #Defines and returns an nrow × (nrow+1) Helmert sub-matrix k <- nrow h <- matrix(0, k, k + 1) j <- 1 while(j <= k) { jj <- 1 while(jj <= j) { h[j, jj] <- -1/sqrt(j * (j + 1)) jj <- jj + 1 } h[j, j + 1] <- j/sqrt(j * (j + 1)) j <- j + 1 } h } st<-function(zstar) { #input complex matrix #output transpose of the complex conjugate st <- t(Conj(zstar)) st } centroid.size<-function(x) { #returns the centroid size of a configuration (or configurations) #input: k × m matrix/or a complex k-vector # or input a real k × m × n array to get a vector of sizes for a sample if ((is.vector(x)==FALSE) && is.complex(x)){ k <- nrow(x) n <- ncol(x) tem <- array(0,c(k,2,n)) tem[ ,1, ] <- Re(x) tem[ ,2, ] <- Im(x) x <- tem } { if (length(dim(x))==3){ n <- dim(x)[3] sz <- rep(0,times=n) k <- dim(x)[1] h <- defh(k - 1) for (i in 1:n){ xh <- h %*% x[ , ,i] sz[i] <- sqrt(sum(diag(t(xh) %*% xh))) } sz } else { if (is.vector(x) && is.complex(x)) { x <- cbind(Re(x), Im(x)) } k <- nrow(x) h <- defh(k - 1) xh <- h %*% x size <- sqrt(sum(diag(t(xh) %*% xh))) size } } } preshape<-function(x) { #input k × m matrix / complex k-vector #output k-1 × m matrix / k-1 × 1 complex matrix if(is.complex(x)) { k <- nrow(as.matrix(x)) h <- defh(k - 1) zstar <- x ztem <- h %*% zstar size <- sqrt(diag(Re(st(ztem) %*% ztem))) if(is.vector(zstar)) z <- ztem/size if(is.matrix(zstar)) z <- ztem %*% diag(1/size) } else { if(length(dim(x)) == 3) { k <- dim(x)[1] h <- defh(k - 1) n <- dim(x)[3] m <- dim(x)[2] z <- array(0, c(k - 1, m, n)) for(i in 1:n) { z[, , i] <- h %*% x[, , i] size <- centroid.size(x[, , i]) z[, , i] <- z[, , i]/size } } else { k <- nrow(as.matrix(x)) h <- defh(k - 1) ztem <- h %*% x size <- centroid.size(x) z <- ztem/size } } z } riemdist<-function(x, y, reflect=FALSE) { #input two k × m matrices x, y or complex k-vectors #output Riemannian distance rho between them if (sum((x-y)**2)==0){ riem <- 0 } if (sum((x-y)**2)!=0){ if (reflect==FALSE) { if(ncol(as.matrix(x)) < 3) { if (is.complex(x)==FALSE){x<-realtocomplex(x)} if (is.complex(y)==FALSE){y<-realtocomplex(y)} #riem <- c(acos(Mod(st(preshape(x)) %*% preshape(y)))) riem<-c(acos(min(1,(Mod(st(preshape(x)) %*% preshape(y)))))) } else { m <- ncol(x) z <- preshape(x) w <- preshape(y) Q <- t(z) %*% w %*% t(w) %*% z ev <- eigen(t(z) %*% w)$values check <- 1 for(i in 1:m) { check <- check * ev[i] } ev <- sqrt(abs(eigen(Q, symmetric = TRUE)$values)) if(Re(check) < 0) ev[m] <- - ev[m] riem <- acos(min(sum(ev),1)) } } if (reflect==TRUE){ m <- ncol(x) z <- preshape(x) w <- preshape(y) Q <- t(z) %*% w %*% t(w) %*% z ev <- sqrt(abs(eigen(Q, symmetric = TRUE)$values)) riem <- acos(min(sum(ev),1)) } } riem }
/scratch/gouwar.j/cran-all/cranData/Ball/R/nhdist.R
.onUnload <- function (libpath) { library.dynam.unload("Ball", libpath) } WEIGHT_TYPE <- c("constant", "probability", "chisquare") BD_WEIGHT_TYPE <- c("constant", "variance") BCOR_WEIGHT_STATS <- c("bcor.constant", "bcor.probability", "bcor.chisquare") BCOV_WEIGHT_STATS <- c("bcov.constant", "bcov.probability", "bcov.chisquare") BD_WEIGHT_STATS <- c("bd.constant", "bd.variance") KBD_WEIGHT_STATS <- c("kbd.sum.constant", "kbd.sum.variance", "kbd.max.constant", "kbd.max.variance", "kbd.maxsum.constant", "kbd.maxsum.variance") center_bdd_matrix <- function(bdd) { num <- dim(bdd)[1] bdd <- sweep(bdd, 2, colMeans(bdd)) - rowMeans(bdd) + mean(bdd) bdd } #' Hall-Buckley-Eagleson method #' #' Computes the cdf of a positively-weighted sum of chi-squared random variables with the Hall-Buckley-Eagleson (HBE) method. #' @keywords distribution #' @references #' \itemize{ #' \item P. Hall. Chi squared approximations to the distribution of a sum of independent random variables. \emph{The Annals of Probability}, 11(4):1028-1036, 1983. #' \item M. J. Buckley and G. K. Eagleson. An approximation to the distribution of quadratic forms in normal random variables. \emph{Australian Journal of Statistics}, 30(1):150-159, 1988. #' } #' @examples #' hbe(c(1.5, 1.5, 0.5, 0.5), 10.203) # should give value close to 0.95 #' @noRd hbe <- function(coeff, x){ # compute cumulants and nu K_1 <- sum(coeff) K_2 <- 2 * sum(coeff^2) K_3 <- 8 * sum(coeff^3) nu <- 8 * (K_2^3) / (K_3^2) # gamma parameters for chi-square gamma_k <- nu/2 gamma_theta <- 2 # need to transform the actual x value to x_chisqnu ~ chi^2(nu) # This transformation is used to match the first three moments # First x is normalised and then scaled to be x_chisqnu x_chisqnu_vec <- sqrt(2 * nu / K_2) * (x - K_1) + nu # now this is a chi_sq(nu) variable p_chisqnu_vec <- pgamma(x_chisqnu_vec, shape = gamma_k, scale = gamma_theta) p_chisqnu_vec } #' calculate Pvalue #' #' @param statValue Statistic Value #' @param NullDistribution Ball statistic distribution when null hypothesis is true #' @noRd #' @return p-value #' calculatePvalue <- function(statValue, NullDistribution) { surpass_number <- sum(statValue < NullDistribution) if (surpass_number == 0) { p.value <- (surpass_number + 1) / (length(NullDistribution) + 1) } else { p.value <- surpass_number / length(NullDistribution) } p.value } #' Estimate memory consumpation #' #' @param n Sample size #' @param funs Functions #' @noRd #' @return the available flag #' memoryAvailable <- function(n, funs) { sysname <- Sys.info()[1] # memory check only available for window platform if(sysname == "Windows") { MemoryAvailable <- TRUE sizeLevel <- (n/1000)^2 if( funs == 'UBI.test' ) { queryMemory <- 0.05 * sizeLevel } else if( funs == 'BI.test' ) { queryMemory <- 0.16 * sizeLevel } else if( funs == 'UBD.test' ) { queryMemory <- 0.02 * sizeLevel } else if( funs == 'BD.test' ) { queryMemory <- 0.08 * sizeLevel } sysMemory <- memory.limit()/1024 if(queryMemory > sysMemory) { # MemoryAvailable <- FALSE stop("Sample size too large and system memory is not available!") } } else { if(n > 8000) { warning("You may suffer from memory insufficient!") } } } #' Examine x, y arguments in bcov.test, bcov #' @inheritParams bcov.test #' @noRd #' examine_x_y <- function(x, y) { dim_x <- dim(x) dim_y <- dim(y) if(is.null(dim_x) | is.null(dim_y)) { stop("x or y is NULL!") } n <- dim_x[1] if(n != dim_y[1]) { stop("x and y have different sample sizes!") } if(any(apply(y, 2, anyNA))) { stop("Missing data in y!") } if(any(apply(x, 2, anyNA))) { stop("Missing data in x!") } if((dim_x[2] == 1) & (dim_y[2] == 1)) { p <- 1 } else { p <- -1 } c(n, p) } #' Examine x, y arguments in bcov.test, bcov #' @inheritParams bcov.test #' @noRd #' examine_x_y_bcov <- function(x, y) { if (anyNA(x) || anyNA(y)) { stop("Missing value exist!") } if (length(x) != length(y)) { stop("x and y have different sample sizes!") } } #' Examine seed arguments in bcov.test, bd.test #' @param seed A integer number #' @return A integer number #' @noRd #' examine_seed_arguments <- function(seed) { if(is.null(seed)) { seed <- runif(1 , 0, .Machine$integer.max) } seed } #' Examine weight arguments in bcov.test, bd.test #' @param weight A bool or character value #' @param fun "bd.test" or "bcov.test" #' @return A integer number #' @noRd #' examine_weight_arguments <- function(weight) { if (is.logical(weight) || is.character(weight)) { if (is.logical(weight)) { weight <- ifelse(weight, "probability", "constant") } else { weight <- match.arg(arg = weight, choices = WEIGHT_TYPE) } return(weight) } else { stop("The weight arguments is invalid!") } } #' Title #' #' @param category #' @param p #' #' @noRd #' examine_category <- function(category, p) { if (is.logical(category)) { if (category) { category_index <- 1:p } else { category_index <- c() } } else { stopifnot(all(category < 0) || all(category > 0)) if (any(category > 0)) { category_index <- category } else { category_index <- setdiff(1:p, category) } } category_index } select_ball_stat <- function(ball_stat, weight, type = "bcov", fun_name = "bcov") { if (fun_name == "bcorsis") { if (weight == "constant") { ball_stat <- ball_stat[, 1] } else if (weight == "probability") { ball_stat <- ball_stat[, 2] } else if (weight == "chisquare") { ball_stat <- ball_stat[, 3] } } else { if (weight == "constant") { ball_stat <- ball_stat[1] names(ball_stat) <- ifelse(type == "bcov", BCOV_WEIGHT_STATS[1], BCOR_WEIGHT_STATS[1]) } else if (weight == "probability") { ball_stat <- ball_stat[2] names(ball_stat) <- ifelse(type == "bcov", BCOV_WEIGHT_STATS[2], BCOR_WEIGHT_STATS[2]) } else if (weight == "chisquare") { ball_stat <- ball_stat[3] names(ball_stat) <- ifelse(type == "bcov", BCOV_WEIGHT_STATS[3], BCOR_WEIGHT_STATS[3]) } } return(ball_stat) } #' Examine size arguments in bcov.test, bd.test #' @param size A integer vector #' @noRd #' examine_size_arguments <- function(size) { # self examine: if(is.null(size)) { stop("size arguments is needed") } size <- as.integer(size) if(any(is.na(size)) | any(size <= 0) | (length(size)==1)) { stop("size arguments is invalid!") } } #' Examine R arguments in bcov.test, bd.test #' @param R A integer number #' @noRd #' examine_R_arguments <- function(R) { if(is.null(R) | (R < 0)) { stop("R arguments is invalid!") } } #' Examine num.threads arguments in bcov.test, bd.test #' @param R A integer number #' @noRd #' examine_threads_arguments <- function(num.threads) { if(is.null(num.threads) | (num.threads < 1)) { num.threads <<- 0; # stop("num.threads arguments is invalid!") } } #' Examine type arguments in bcov.test, bd.test #' @param type "bcor" or "bcov" #' @noRd #' examine_type_arguments <- function(type) { if(all(!(type %in% c("bcov", "bcor")))) { type <- "bcov" } type } #' Examine dimension equality of input arguments x and y in bd.test #' #' @param x numeric matrix #' @param y numeric matrix #' @return dimension #' @noRd #' examine_dimension <- function(x, y) { dim_x <- dim(x) dim_y <- dim(y) p1 <- dim_x[2] p2 <- dim_y[2] if(p1 != p2) { stop("x and y with different dimension!") } p1 } #' get vectorized distance matrix #' @inheritParams bd.test #' @return vectorized distance matrix #' @noRd get_vectorized_distance_matrix <- function(x, y) { n1 <- dim(x)[1] n2 <- dim(y)[1] n <- n1 + n2 xy <- rbind(x, y) dxy <- as.vector(dist(xy)) list(dxy, n1, n2) } #' @inheritParams bd.test #' @return matrix #' @noRd #' get_matrixed_x <- function(x, y) { if(is.null(x)) { x <- y } as.matrix(x) } #' return candidate set size in bcorsis function #' @inheritParams bcorsis #' @param n sample size #' @return size of candidate set #' @noRd #' examine_candiate_size <- function(n, candidate, p) { if(p > n) { if(is.numeric(candidate)) { if(candidate <= 0) { stop("candidate argument is invalid!") } final_d <- as.integer(candidate) } else { if(candidate == "small"){ final_d <- floor(n/log(n)) } else if(candidate == "large") { final_d <- n - 1 } else { stop("candidate argument is invalid!") } } } else { final_d <- p message("the number of covariate not larger than sample sizes, and SIS procedure is not essential") } final_d } get_screened_vars <- function(ids, rcory_result, final_d) { max_ids <- order(rcory_result, decreasing=T) chooseids <- max_ids[1:final_d] ids[chooseids] } preprocess_bcorsis_y <- function(y, y_p) { if(y_p != 1) { y_copy <- as.vector(dist(y)) dst <- TRUE } else { y_copy <- as.vector(y) dst <- FALSE } list(y_copy, dst) } #' Examine method arguments in bcorsis #' @inheritParams bcorsis #' @noRd #' examine_method_arguments <- function(method) { method <- head(unlist(strsplit(method, "-")), n = 1) if(!(method %in% c("standard", "pvalue", "interaction", "survival", "gam", "lm"))) { stop("method argument is invalid!") } method } examine_dst_method <- function(dst, method) { if(method %in% c("survival", "lm", "gam")) { if(dst) { messages <- " methods is not available when distance = TRUE" messages <- paste0(method, messages) stop(messages) } } }
/scratch/gouwar.j/cran-all/cranData/Ball/R/utilize.R
#' @title compute Ball Divergence statistic #' @description wrapper C function which compute Ball Divergence via limit distribution #' @inheritParams bd.test #' @noRd bcov_limit_wrap_c <- function(x, y, num, distance, num.threads) { if (!is.null(y)) { x <- list(x, y) } N <- as.integer(num) distance <- as.integer(distance) num.threads <- as.integer(num.threads) bdd_xy_eigen <- matrix(data = 1, ncol = num, nrow = num) for (i in 1:length(x)) { xy <- as.double(x[[i]]) bdd_xy <- double((num + 1) * num / 2) res <- .C("bdd_matrix_bias", bdd_xy, xy, N, num.threads) rm(bdd_xy); gc(reset = TRUE, verbose = FALSE) bdd_xy <- matrix(0, nrow = num, ncol = num) bdd_xy[lower.tri(bdd_xy, diag = TRUE)] <- res[[1]] bdd_xy <- bdd_xy + t(bdd_xy) diag(bdd_xy) <- diag(bdd_xy) / 2 bdd_xy_eigen <- bdd_xy_eigen * center_bdd_matrix(bdd_xy) } eigenvalue <- eigen(bdd_xy_eigen, only.values = TRUE, symmetric = TRUE)$values eigenvalue <- eigenvalue[eigenvalue > 0] / num eigenvalue } #' @title compute Ball Divergence statistic #' @description wrapper C function which compute Ball Divergence via limit distribution #' @inheritParams bd.test #' @noRd bd_limit_wrap_c <- function(xy, size, distance, num.threads) { xy <- as.double(xy) n1 <- as.integer(size[1]) n2 <- as.integer(size[2]) distance <- as.integer(distance) num.threads <- as.integer(num.threads) num <- as.integer(sum(size)) bdd_xy <- double((num + 1) * num / 2) res <- .C("bdd_matrix_bias_two_group", bdd_xy, xy, n1, n2, num.threads) bdd_xy <- matrix(0, nrow = num, ncol = num) bdd_xy[lower.tri(bdd_xy, diag = TRUE)] <- res[[1]] bdd_xy <- bdd_xy + t(bdd_xy) diag(bdd_xy) <- diag(bdd_xy) / 2 bdd_xy <- center_bdd_matrix(bdd_xy) eigenvalue <- eigen(bdd_xy, only.values = TRUE, symmetric = TRUE)$values eigenvalue <- eigenvalue[eigenvalue > 0] / num 2 * eigenvalue } #' @title compute Ball Divergence statistic #' @description wrapper C function which compute Ball Divergence #' @inheritParams bd.test #' @param xy numeric vector #' #' @return Ball Divergence statistic #' @useDynLib Ball, .registration = TRUE #' @noRd bd_value_wrap_c <- function(xy, size, weight, distance, num.threads) { xy <- as.double(xy) bd <- as.double(numeric(1)) weight <- as.integer(weight) distance <- as.integer(distance) num.threads <- as.integer(num.threads) K <- as.integer(length(size)) size <- as.integer(size) N <- as.integer(sum(size)) res <- .C("bd_stat", bd, xy, size, N, K, weight, distance, num.threads) # bd <- res[[1]] names(bd) <- ifelse(weight, "wbd", "bd") list("statistic" = bd, "permuted_stat" = NULL, "info" = list("N" = N, "K" = K, "size" = size, "weight" = as.logical(weight))) } #' @title compute Ball Divergence statistic and Ball Divergence statistic after permutation #' @description wrapper C function which compute Ball Divergence and Ball Divergence statistic after permutation #' @inheritParams bd.test #' @param xy numeric vector #' #' @return Ball Divergence statistic #' @useDynLib Ball, .registration = TRUE #' @noRd bd_test_wrap_c <- function(xy, size, num.permutations, weight, distance, num.threads) { xy <- as.double(xy) distance <- as.integer(distance) r <- as.integer(num.permutations) num.threads <- as.integer(num.threads) # K <- as.integer(length(size)) stat_num <- ifelse(K == 2, 2, 6) bd <- as.double(numeric(stat_num)) p_value <- as.double(numeric(stat_num)) size <- as.integer(size) N <- as.integer(sum(size)) res <- .C("bd_test", bd, p_value, xy, size, N, K, distance, r, num.threads) bd <- res[[1]] p_value <- res[[2]] if (K == 2) { names(bd) <- BD_WEIGHT_STATS } else { names(bd) <- KBD_WEIGHT_STATS } names(p_value) <- paste0(names(bd), ".pvalue") if (weight == BD_WEIGHT_TYPE[1]) { if (K == 2) { index <- 1 } else { index <- c(1, 3, 5) } } else { if (K == 2) { index <- 2 } else { index <- c(2, 4, 6) } } return_bd <- bd[index] return_p_value <- p_value[index] list('statistic' = return_bd, 'p.value' = return_p_value, 'info' = list('statistic' = bd, "p.value" = p_value, 'N' = N, 'K' = K, 'size' = size, 'weight' = weight, 'num.permutations' = num.permutations)) } #' #' compute Ball Covariance statistic #' #' @inheritParams bcov.test #' #' @param x numeric vector. #' #' @param y numeric vector. #' #' @param n sample size. it must be integer value. #' #' @param type if type == 1, function return bcov, otherwise, bcor insteaded. #' #' #' #' @return A list contain: Ball Covariance statistic, sample size, weight #' #' @useDynLib Ball, .registration = TRUE #' #' @noRd #' #' #' bcov_value_wrap_c <- function(x, y, n, weight, distance, type, num.threads) { #' bcov <- as.double(numeric(1)) #' distance <- as.integer(distance) #' x <- as.double(x) #' y <- as.double(y) #' n <- as.integer(n) #' weight_cp <- weight #' weight <- as.integer(weight) #' num.threads <- as.integer(num.threads) #' type <- ifelse(type == "bcov", 1, 2) #' type <- as.integer(type) #' if (type == 2) { #' res <- .C("bcov_stat", bcov, x, y, n, weight, distance, type, num.threads) #' bcov <- res[[1]] #' } else { #' bcov <- as.double(numeric(3)) #' p_value <- as.double(numeric(3)) #' r <- as.integer(numeric(1)) #' weight <- as.integer(numeric(1)) #' weight_cp <- examine_weight_arguments(weight_cp, "bcov.test") #' res <- .C("bcov_test", bcov, p_value, x, y, n, r, distance, num.threads) #' bcov <- res[[1]] #' } #' if (type == 1) { #' if (weight_cp == "none") { #' bcov <- bcov[1] #' names(bcov) <- "bcov" #' } else if(weight_cp == "prob") { #' bcov <- bcov[2] #' names(bcov) <- "bcov.prob" #' } else { #' bcov <- bcov[3] #' names(bcov) <- "bcov.chisq" #' } #' } else { #' names(bcov) <- ifelse(weight, "wbcor", "bcor") #' } #' list('statistic' = bcov, "permuted_stat" = NULL, #' "info" = list("N" = res[[4]], "weight" = as.logical(weight))) #' } #' compute Ball Covariance statistic and Ball Covariance statistic after permutation #' @inheritParams bcov.test #' @param x numeric vector. #' @param y numeric vector. #' @param n sample size. it must be integer value. #' #' @return A list contain: Ball Covariance statistic, Ball Covariance statistic after permutation, #' sample size, replication times, weight #' @useDynLib Ball, .registration = TRUE #' @noRd #' bcov_test_wrap_c <- function(x, y, n, num.permutations, distance, num.threads) { x <- as.double(x) y <- as.double(y) n <- as.integer(n) distance <- as.integer(distance) num.threads <- as.integer(num.threads) r <- as.integer(num.permutations) # bcov <- as.double(numeric(3)) p_value <- as.double(numeric(3)) res <- .C("bcov_test", bcov, p_value, x, y, n, r, distance, num.threads) bcov <- res[[1]] p_value <- res[[2]] names(bcov) <- BCOV_WEIGHT_STATS names(p_value) <- paste0(names(bcov), ".pvalue") list('statistic' = bcov, 'p.value' = p_value, 'info' = list("N" = res[[5]], "num.permutations" = res[[6]])) } #' compute K Ball Covariance statistic and Ball Covariance statistic after permutation #' @inheritParams bcov.test #' @param x numeric vector. #' @param n sample size. it must be integer value. #' #' @return A list contain: Ball Covariance statistic, Ball Covariance statistic after permutation, #' sample size, replication times, weight #' @useDynLib Ball, .registration = TRUE #' @noRd #' kbcov_test_wrap_c <- function(x, K, n, num.permutations, distance, num.threads) { x <- as.double(x) K <- as.integer(K) n <- as.integer(n) r <- as.integer(num.permutations) distance <- as.integer(distance) num.threads <- as.integer(num.threads) # kbcov <- as.double(numeric(3)) p_value <- as.double(numeric(3)) res <- .C("kbcov_test", kbcov, p_value, x, K, n, r, distance, num.threads) bcov <- res[[1]] p_value <- res[[2]] names(bcov) <- BCOV_WEIGHT_STATS names(p_value) <- paste0(names(bcov), ".pvalue") list('statistic' = bcov, 'p.value' = p_value, 'info' = list("N" = res[[5]], "num.permutations" = res[[6]])) } #' compute Ball Covariance statistic for each variable #' @inheritParams bcov.test #' @param x numeric vector. #' @param y numeric vector. #' @param n sample size. it must be integer value. #' #' @return A list contain: Ball Covariance statistic, sample size, weight #' @useDynLib Ball, .registration = TRUE #' @noRd #' apply_bcor_wrap <- function(x, y, n, p, distance, weight, method, num.threads, category) { p_all <- ncol(x) if (length(category) != 0) { x_category <- x[, category, drop = FALSE] x <- x[, -category, drop = FALSE] } else { x_category <- matrix(0, nrow = 0, ncol = 0) } p_continuous <- ncol(x) p_category <- ncol(x_category) if (distance) { p <- as.integer(0) } else { p <- as.integer(p) } y <- as.double(y) num <- as.integer(n) dst_x <- as.integer(0) nth <- as.integer(num.threads) dst_y <- as.integer(distance) if (p_continuous != 0) { bcor_stat1 <- as.double(numeric(3 * p_continuous)) x <- as.double(as.vector(x)) x_number <- as.integer(rep(1, p_continuous)) f_number <- as.integer(p_continuous) k <- as.integer(1) # res <- .C("bcor_test", bcor_stat1, y, x, x_number, f_number, num, p, k, dst_y, dst_x, nth)[[1]] bcor_stat1 <- matrix(res, ncol = 3, byrow = TRUE) } if (p_category != 0) { bcor_stat2 <- as.double(numeric(3 * p_category)) x <- as.double(as.vector(x_category)) x_number <- as.integer(rep(1, p_category)) f_number <- as.integer(p_category) k <- as.integer(2) # res <- .C("bcor_test", bcor_stat2, y, x, x_number, f_number, num, p, k, dst_y, dst_x, nth)[[1]] bcor_stat2 <- matrix(res, ncol = 3, byrow = TRUE) if (p_continuous == 0) { bcor_stat <- bcor_stat2 } else { bcor_stat <- matrix(nrow = p_all, ncol = 3) bcor_stat[-category, ] <- bcor_stat1 bcor_stat[category, ] <- bcor_stat2 } } else { bcor_stat <- bcor_stat1 } colnames(bcor_stat) <- BCOR_WEIGHT_STATS screening_bcor_stat <- select_ball_stat(bcor_stat, weight = weight, fun_name = "bcorsis") if (method %in% c("interaction", "standard")) { return(list(bcor_stat, screening_bcor_stat)) } else { return(screening_bcor_stat) } } #' @title Ball Correlation in survival. #' @param x ordered covariate #' @param t ordered survival event time #' @param delta ordered survival event status #' @param Sc Survfit object #' @param n Sample size #' @useDynLib Ball, .registration = TRUE #' @noRd #' #' bcor_surv <- function(x, time_value, delta, Sc, n){ # R function name should not the same as C(C++) function names RCT <- numeric(1) RCT <- .C("SRCT_new", as.double(t(x)), as.integer(t(time_value)), as.integer(t(delta)), as.double(t(Sc)), as.integer(n), RC = as.double(RCT)) RCT[["RC"]] }
/scratch/gouwar.j/cran-all/cranData/Ball/R/wrap_c.R
## ---- echo=FALSE, message=FALSE, warning=FALSE-------------------------------- knitr::opts_chunk$set(comment = "#", warning = FALSE, eval = TRUE, message = FALSE) set.seed(1) library(Ball) ## ----eval=FALSE--------------------------------------------------------------- # install.packages("Ball") ## ---- eval=FALSE-------------------------------------------------------------- # library(devtools) # install_github("Mamba413/Ball", build_vignettes = TRUE) ## ---- echo=FALSE-------------------------------------------------------------- library(Ball) ## ----------------------------------------------------------------------------- x <- rnorm(50) y <- rnorm(50, mean = 1) # plot(density(x), xlim = c(-5, 5)) # lines(density(y), col = 'red') ## ----------------------------------------------------------------------------- bd.test(x = x, y = y) ## ----------------------------------------------------------------------------- x <- matrix(rnorm(100), nrow = 50, ncol = 2) y <- matrix(rnorm(100, mean = 3), nrow = 50, ncol = 2) ## ----------------------------------------------------------------------------- bd.test(x = x, y = y) ## ----------------------------------------------------------------------------- # generate random perturbation: noise <- runif(50, min = -0.3, max = 0.3) x <- runif(50, 0, 4*pi) y <- cos(x) + noise # plot(x, y) ## ----------------------------------------------------------------------------- bcov.test(x = x, y = y) ## ----------------------------------------------------------------------------- x <- matrix(runif(50 * 2, -pi, pi), nrow = 50, ncol = 2) noise <- runif(50, min = -0.1, max = 0.1) y <- 2 * sin(x[,1] + x[,2]) + noise ## ----------------------------------------------------------------------------- bcov.test(x = x, y = y, weight = "prob") ## ----------------------------------------------------------------------------- # load data: data("bdvmf") ## ---- eval=FALSE, echo=FALSE-------------------------------------------------- # library(scatterplot3d) # scatterplot3d(bdvmf[["x"]], color = bdvmf[["group"]], # xlab = "X1", ylab = "X2", zlab = "X3") ## ----------------------------------------------------------------------------- # calculate geodesic distance between samples: dx <- nhdist(bdvmf[["x"]], method = "geodesic") # sample sizes in each group: 150, 150 # Two-Sample Test based on BD : bd.test(x = dx, size = c(150, 150), num.permutations = 99, distance = TRUE) ## ----------------------------------------------------------------------------- # load data: data("macaques") # number of femala and male Macaca fascicularis: # table(macaques[["group"]]) # f: 9; m: 9 # calculate Riemannian shape distance matrix: dx <- nhdist(macaques[["x"]], method = "riemann") # hypothesis test with BD: bd.test(x = dx, num.permutations = 99, size = c(9, 9), distance = TRUE) ## ----------------------------------------------------------------------------- data("ArcticLake") # Distance matrix between y: dy <- nhdist(ArcticLake[["x"]], method = "compositional") # Distance matrix between x: dx <- dist(ArcticLake[["depth"]]) # hypothesis test with BCov: bcov.test(x = dx, y = dy, num.permutations = 99, distance = TRUE) ## ----------------------------------------------------------------------------- n <- 150 bd.test(rnorm(n), size = rep(50, 3)) ## ----------------------------------------------------------------------------- data("ArcticLake") Dy <- nhdist(ArcticLake[["x"]], method = "compositional") Dx <- dist(ArcticLake[["depth"]]) # hypothesis test with weighted BCov: bcov.test(x = Dx, y = Dy, num.permutations = 99, distance = TRUE, weight = "constant") ## ----------------------------------------------------------------------------- x <- rnorm(50) y <- (x > 0) * x + rnorm(50) z <- (x <= 0) * x + rnorm(50) example1 <- list(x, y, z) ## ----------------------------------------------------------------------------- h <- rnorm(50) w <- (h)^2 x <- abs(h) y <- h * (h < 0) z1 <- h * (h < 0.5) z2 <- h * (h > -0.5) z <- cbind(z1, z2) example2 <- list(w, x, y, z) ## ----------------------------------------------------------------------------- bcov.test(x = example1, num.permutations = 199) bcov.test(x = example2, num.permutations = 199) ## ----------------------------------------------------------------------------- set.seed(1) n <- 150 p <- 3000 x <- matrix(rnorm(n * p), nrow = n) noise <- rnorm(n) y <- 3*x[, 1] + 5*(x[, 3])^2 + noise ## ----------------------------------------------------------------------------- res <- bcorsis(y = y, x = x) head(res[[1]], n = 5) ## ----------------------------------------------------------------------------- result <- bcorsis(x = genlung[["covariate"]], y = genlung[["survival"]], d = "small", method = "survival") top_gene <- colnames(genlung[["covariate"]])[result[["ix"]]] head(top_gene, n = 1)
/scratch/gouwar.j/cran-all/cranData/Ball/inst/doc/Ball.R
--- title: "Ball: Statistical Inference and Sure Independence Screening via Ball Statistics" author: "Jin Zhu, [email protected]" date: "December 18, 2017" # bibliography: reference.bib output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Ball: Statistical Inference and Sure Independence Screening via Ball Statistics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo=FALSE, message=FALSE, warning=FALSE} knitr::opts_chunk$set(comment = "#", warning = FALSE, eval = TRUE, message = FALSE) set.seed(1) library(Ball) ``` # Quick Start The fundamental problems for data mining and statistical analysis are: 1. Whether distributions of two samples are distinct? 2. Whether two random variables are dependent? Two-sample test, which is designed to solve the first problem, is very important in medicine, psychology, biology and so on. For instance, we want to know whether lifespan of male and female is different. Thus, we collect lifetime data, and try to figure out whether ages in two samples are identically distributed. As the following images shown, if distribution of life span in two groups look like the left one, we conclude that lifetime are not identically distributed. But for the right one, it indicates that they are most likely to be identically distributed. ![Figure 1](classificationDemo.png) Test of independence, which is designed to solve the other problem, is also very essential. As the following images shown, there is a strong linear relation with Y and X1, while X2 seems to have nothing to do with Y. So X1 should be taken into account and added in to the regression model for Y, or should be studied carefully in order to confirm the correlation mechanism with Y. ![Figure 2](regressionDemo.png) **Ball** package provides solution for independence test, two-sample test or even K-sample test. Moreover, a generic non-parametric sure independence screening procedure also implemented to deal with ultra high dimensional data. The three core functions are: - **bd.test**: examine whether $K(K \geq 2)$ univariate or multivariate distributions are identical. - **bcov.test**: test whether univariate or multivariate variables are related to each other. - **bcorsis**: carry out sure independence screening procedure to pick out the variables potentially related to response. ### Installation #### CRAN version To install the Ball R package from CRAN, just run: ```{r,eval=FALSE} install.packages("Ball") ``` #### Github version To install the development version from GitHub, run: ```{r, eval=FALSE} library(devtools) install_github("Mamba413/Ball", build_vignettes = TRUE) ``` *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first. ```{r, echo=FALSE} library(Ball) ``` ### Quick Start: Univariate Two-sample Test In this example, we generate two normal random variables with different location parameter: $$X \sim N(0,1), Y \sim N(1, 1)$$ <!-- In the mean time, we use the non-parametric kernel density estimation to plot the kernel density of two distribution: --> ```{r} x <- rnorm(50) y <- rnorm(50, mean = 1) # plot(density(x), xlim = c(-5, 5)) # lines(density(y), col = 'red') ``` <!-- ![](QuickStartUBD.png) --> We use **bd.test** to perform the two-sample test to determine whether two samples come from the same distribution. ```{r} bd.test(x = x, y = y) ``` The result of **bd.test** is that *p*-value < 0.05, which means to reject the null hypothesis, and conclude that two samples are come from different distribution. Consequently, the hypothesis test result is concordant to data generation mechanism. ### Quick Start: Multivariate Two-sample Test In this example, we will demonstrate how to perform a test of whether two multivariate distributions are identical. We generate two random samples of size 50, which are sampled from two different multivariate normal distributions: $$X \sim N(\mu_{X},I_{2 \times 2}), Y \sim N(\mu_{Y}, I_{2 \times 2})$$ $$\mu_{X} = (0,0), \mu_{Y} = (1,1)$$ <!-- Then we will show the difference between these two samples in the way of kernel density estimation: --> ```{r} x <- matrix(rnorm(100), nrow = 50, ncol = 2) y <- matrix(rnorm(100, mean = 3), nrow = 50, ncol = 2) ``` <!-- ![](./QuickStartBD.png) --> We use **bd.test** to test whether two multivariate random samples are identically distributed. ```{r} bd.test(x = x, y = y) ``` The result of **bd.test** is that *p*-value < 0.05, so we conclude that two samples are not identically distributed. ### Quick Start: Univariate Test of Independence In this example, we will use the "W-shape" data from [WIKI](https://en.wikipedia.org/wiki/Correlation_and_dependence) to demonstrate how to perform univariate test of independence with **bcov.test** . We generate a dataset containing 50 samples. ```{r} # generate random perturbation: noise <- runif(50, min = -0.3, max = 0.3) x <- runif(50, 0, 4*pi) y <- cos(x) + noise # plot(x, y) ``` <!-- From the plot we can see that $X$ has complex non-linear relation with Y. --> Obviously, $X$ is related to $Y$, but the relationship is non-linear. We use **bcov.test** to perform the test of independence between $X$ and $Y$. ```{r} bcov.test(x = x, y = y) ``` The result of **bcov.test** is that *p*-value < 0.05, so we conclude that $X$ and $Y$ are not independent, which means there is some kind of correlation between X and Y. ### Quick Start: Multivariate Test of Independence For multivariate independence test, we will demonstrate the usage of **bcov.test** with the following example: $X=(x_{1}, x_{2})$ come from the bivariate normal distribution. The relation between $Y$ and $X$ is: $$Y=2\sin(x_{1} + x_{2})+ \epsilon, \quad \epsilon \sim U(-0.1, 0.1)$$ ```{r} x <- matrix(runif(50 * 2, -pi, pi), nrow = 50, ncol = 2) noise <- runif(50, min = -0.1, max = 0.1) y <- 2 * sin(x[,1] + x[,2]) + noise ``` <!-- The following image shows the distribution of the data: --> <!-- ![](QuickStartBCov.png) --> We use **bcov.test** to perform multivariate independence test: ```{r} bcov.test(x = x, y = y, weight = "prob") ``` The result of **bcov.test** is that *p*-value < 0.05, so we conclude that multivariate random variable $X$ and $Y$ are associated. <!-- *** --> <!-- # Introduction --> <!-- ### Ball Divergence --> <!-- We want to determine whether distributions of two samples $\mathcal{X} = \lbrace x_{1},...,x_{n} \rbrace$ and $\mathcal{Y} = \lbrace y_{1},...,y_{m} \rbrace$ are distinct. --> <!-- Intuitively, if $\mathcal{X}$ and $\mathcal{Y}$ come from identical distribution and we use any two points $x_{i}, x_{j} \in \mathcal{X}$ to construct a ball, then the ratio that elements of $\mathcal{X}$ and $\mathcal{Y}$ in the ball will be close to each other, which means: --> <!-- $$A^{X}_{ij} \approx A^{Y}_{ij}$$ --> <!-- $$A^{X}_{ij} = \frac{1}{n}\sum_{u=1}^{n}{I(x_{u} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j}))}$$ --> <!-- $$A^{Y}_{ij} = \frac{1}{m}\sum_{v=1}^{m}{I(y_{v} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j}))}$$ --> <!-- where $\bar{B}(x_{i}, \rho(x_{i}, x_{j}))$ is a closed ball with center $x_{i}$, --> <!-- and radius $\rho(x_{i}, x_{j})$ and $I$ is indicator function. --> <!-- ![](BDPlot.jpeg) --> <!-- In a similar way, for any two points $y_{i}, y_{j} \in \mathcal{Y}$, --> <!-- they should also have the property: --> <!-- $$C^{X}_{ij} \approx C^{Y}_{ij}$$ --> <!-- $$C^{X}_{ij} = \frac{1}{n}\sum_{u=1}^{n}{I(x_{u} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j}))}$$ --> <!-- $$C^{Y}_{ij}=\frac{1}{m}\sum_{v=1}^{m}{I(y_{v} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j}))}$$ --> <!-- We combine the difference between $A^{X}_{ij}, A^{Y}_{ij}$ and the difference between --> <!-- $C^{X}_{ij}, C^{Y}_{ij}$ together in the following way: --> <!-- $$D_{n,m} = A_{n,m} + C_{n,m}$$ --> <!-- where: --> <!-- $$A_{n,m} = \frac{1}{n^{2}}\sum_{i,j=1}^{n}{(A_{ij}^{X}-A_{ij}^{Y})^{2}}$$ $$C_{n,m}=\frac{1}{m^{2}}\sum_{k,l=1}^{m}{(C_{kl}^{X}-C_{kl}^{Y})^{2}}$$ --> <!-- $D_{n,m}$ is the sample version of Ball Divergence, defined by Pan et.al(2017). Techinical proofs provided by Pan et.al ensure $D_{n, m}$ converges to $D(\mu, \nu)$ when $n, m$ increase to infinity so long as: --> <!-- $$\frac{n}{m+n} \to \tau, \tau \in [0, 1].$$ --> <!-- [Pan et al. (2017)](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0ahUKEwjBvbb7gdTWAhUEoZQKHYvODxkQFggmMAA&url=https%3A%2F%2Fwww.e-publications.org%2Fims%2Fsubmission%2FAOS%2Fuser%2FsubmissionFile%2F24632%3Fconfirm%3D9219c1d0&usg=AOvVaw3I3Tad92DvETqhJEnJ7FyN) had proven that sample version of ball divergence $D(\mu, \nu) \geq 0$ where the equality holds if and only if $\mu=\nu$ where $\mu, \nu$ are induced measure corresponding to distribution of sample $\mathcal{X}$ and $\mathcal{Y}$. Theory and numerical result guarantee Two-Sample Test based on ball divergence have following advantages: --> <!-- - It is applicable to the univariate or multivariate data in Banach Space. --> <!-- - Robust to heavy-tail data or outliers. --> <!-- - Cope well for imbalanced data. --> <!-- - Works fine for most problems without tuning a variety of parameters. --> <!-- ### Ball Covariance --> <!-- Investigate the dependence between variables is a fundamental step in statistical inference and data mining. Suppose, We are given pairs of independent observations $\{(x_1, y_1),\ldots,(x_n,y_n)\}$, where $x_i$ and $y_i$ can be of any dimension and the dimensionality of $x_i$ and $y_i$ need not be the same. Let $X = (x_1,\ldots,x_n)$ be the $X$ vector, and $Y=(y_1,\ldots,y_n)$ the --> <!-- $Y$ vector and we want to determine whether $X$ and $Y$ are dependent. --> <!-- <!-- , which means whether $F_{XY}=F_{X}F_{Y}$ is valid or not. Where $F_{XY}$ is the joint distribution function of $X, Y$. --> <!-- To achieve the goal, we come up with **Ball Covariance** ($\mathbf{BCor}_{\omega, n}^{2}$), a generic measure of dependence in banach space. Moreover, the $\mathbf{BCor}_{\omega, n}^{2}$ based independence test utilized permutation technique to calculate *p*-value is also developed. Sample version of $\mathbf{BCor}_{\omega, n}^{2}$ is defined as follow: --> <!-- $$\mathbf{BCor}_{\omega, n}^{2}(X, Y)=\frac{1}{n^{2}}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{X,Y}-\Delta_{ij,n}^{X}\Delta_{ij,n}^{Y})^{2}}\hat{\omega}_1(X_i,X_j)\hat{\omega}_2(Y_i,Y_j)$$ --> <!-- where: --> <!-- $$ \Delta_{ij,n}^{X,Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X} \delta_{ij,k}^{Y}}$$ --> <!-- $$\Delta_{ij,n}^{X}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X}}, --> <!-- \Delta_{ij,n}^{Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{Y}} $$ --> <!-- $$ \delta_{ij,k}^{X} = I(x_{k} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j})))$$ --> <!-- $$\delta_{ij,k}^{Y} = I(y_{k} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j})))$$ --> <!-- Generally, we define $\hat{\omega}_1(X_i,X_j) = \hat{\omega}_2(Y_i,Y_j) = 1$, and simplify the notation $\mathbf{BCor}^{2}_{\omega, n}$ as $\mathbf{BCor}^{2}_{n}$ --> <!-- ![](./BCovPlot.jpeg) --> <!-- As the image above shown, the joint probability that $X, Y$ are both in the ball are intuitively closed to the product of marginal probability that $X$ and $Y$ are in the ball when $X$ and $Y$ are independent, i.e.: --> <!-- $$\Delta_{ij,n}^{X,Y} \approx \Delta_{ij,n}^{X}\Delta_{ij,n}^{Y}$$ --> <!-- Consequently, if $\mathbf{BCor}_{\omega, n}^{2}$ is significantly larger than 0, then it indicates that $X$ and $Y$ are not independent. --> <!-- As Pan's paper proved theoretically and demonstrated numerically, the independence test based on $\mathbf{BCor}_{\omega, n}^{2}$ has several advantages: --> <!-- - It is applicable to the univariate and multivariate data in banach space. --> <!-- - Robust to heavy-tail data or outliers --> <!-- - Works fine for most problems without tuning a variety of parameters. --> <!-- *** --> # Advance Features The features below have been implemented to help you analyse diverse and complicated real data. ## Non-Hilbert Space Data During the scientific research, we always have to deal with Non-Hilbert space data. However, the traditional statistical inference methods usually depend on some assumptions, which are not able to perform statistical inference on this kind of data directly. Whereas ball divergence doesn't depend on the assumptions needed in traditional statistical inference method, and it's able to perform two-sample test for data from Non-Hilbert space. We will demonstrate how to use **Ball** package to perform statistical inference for data from Non-Hilbert space with three examples: #### Example 1: Simulated von Mises-Fisher distribution data ```{r} # load data: data("bdvmf") ``` The distribution of the data is shown in the following image: ```{r, eval=FALSE, echo=FALSE} library(scatterplot3d) scatterplot3d(bdvmf[["x"]], color = bdvmf[["group"]], xlab = "X1", ylab = "X2", zlab = "X3") ``` ![](./BDVmf.png) In the image, the black dots ($X$) and red dots ($Y$) respectively represent two group of simulated data with different distributions. The distributions are denoted by: $$X \sim M(\mu_{X}, \kappa), Y \sim M(\mu_{Y}, \kappa)$$ Where $M$ denotes [von Mises-Fisher distribution](https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution), $\mu_{X} = (1, 0, 0), \mu_{Y} = (1, 1, 1)$ are the orientation parameter of von Mises-Fisher distribution, $\kappa = 3$ denotes aggregation parameter. We can tell from the image that, red dots and black dots are not identically distributed. However, it is a tough task for the traditional statistical method to distinguish distribution because it is not a conventional data in Hilbert space. Fortunately, since the computation for sample version of ball divergence (ball covariance) only involves calculate distance matrix and counting the number of samples located in a ball, we can obtain empirical ball divergence so long as we can define the distance metric between observations. Therefore, ball divergence still work for this example. We apply ball divergence to this data by carrying out the following step. First, we calculate the geodesic distance matrix of the data, which have been implemented in function \code{nhdist}. Later, we pass the distance matrix to arguments \code{x} and let \code{distance = TRUE}, \code{num.permutations = 99}, and \code{size = c(150, 150)}. The detailed solution is demonstrated below: ```{r} # calculate geodesic distance between samples: dx <- nhdist(bdvmf[["x"]], method = "geodesic") # sample sizes in each group: 150, 150 # Two-Sample Test based on BD : bd.test(x = dx, size = c(150, 150), num.permutations = 99, distance = TRUE) ``` In this example, we firstly calculate the geodesic distance matrix using **nhdist** function in *Ball* package. Then, pass *dx* to arguments *x* and set *distance = TRUE* to indicate that the *x* parameter is a distance matrix. Meanwhile, we set the size of each sample *size = c(150, 150)* and set the replication times *num.permutations = 99*. The result is that *p*-value < 0.05, which means that red dots and black dots are not identically distributed. #### Example 2: Macaques Data Based on Macaques data provided by dryden, scientists want to figure out whether there are differences in the shape of skull between Macaques of different genders. In a similar way, we can calculate the distance matrix of the data and transform this problem into two-sample test that can be solved by BD. Riemann shape distance is always used to describe the distance between shape data. By setting *method = "riemann"* in the **nhdist** function, we are able to calculate the riemann shape distance between shape data. The detailed procedure is demonstrated below: ```{r} # load data: data("macaques") # number of femala and male Macaca fascicularis: # table(macaques[["group"]]) # f: 9; m: 9 # calculate Riemannian shape distance matrix: dx <- nhdist(macaques[["x"]], method = "riemann") # hypothesis test with BD: bd.test(x = dx, num.permutations = 99, size = c(9, 9), distance = TRUE) ``` *p*-value is under 0.05, which means the skull shape differs between male macaques and female macaques. #### Example 3: ArcticLake Data **bcov.test** is related to calculating the distance between samples of two multivariate random variables. Therefore, we can examine independence assumption by employing **bcov.test** to non-Hilbert space real data so long as we obtain the distance matrix of the samples. We take a data in the Book, **The Statistical Analysis of Compositional Data**, as an example to demonstrate how to use **bcov.test** to determine the dependence of non-Hilbert space data. Scientists collect Sand, silt and clay compositions of 39 sediment samples of different water depth in an Arctic lake. They want to figure out whether the compositions of sediment samples of different water depth are identical or not. To achieve the goal, we use **bcov.test** to perform the test of independence. The detailed procedure is demonstrated below: ```{r} data("ArcticLake") # Distance matrix between y: dy <- nhdist(ArcticLake[["x"]], method = "compositional") # Distance matrix between x: dx <- dist(ArcticLake[["depth"]]) # hypothesis test with BCov: bcov.test(x = dx, y = dy, num.permutations = 99, distance = TRUE) ``` We first calculate the distance matrix *dy* and *dx*. Then, we pass *dx* to arguments *x*, *dy* to arguments *y*, and set the replication times *num.permutations = 99*, *distance = TRUE* to indicate that the *x* and *y* parameters are distance matrices. The result shows that *p*-value is less than 0.05, an usual significance level, so we conclude that the compositions of sediment is associated with the water depth. In the example above, we use the square root transformed data to calculate the geodesic distance as a measurement of the difference between different compositions of sediment samples (*Dy*). Meanwhile, we use euclidean distance to measure the difference of different water depth (*Dx*). For different data, we can use different measurements to cope with the different features in data. ## K-Sample Test **bd.test** is also applicable for testing of multiple samples. We generate three random normal samples of size 50, which are sampled from the same normal distribution. As an example, we use **bd.test** to test whether these samples are identically distributed. ```{r} n <- 150 bd.test(rnorm(n), size = rep(50, 3)) ``` As the result shown, *p*-value>0.05, which means we can't reject the null hypothesis. We can also utilize **bd.test** to deal with $K$-Sample problem in non-Hilbert space following the aforementioned procedure. At the same time, remember to assign size vector to parameter *size* arguments and set *distance = TRUE*. <!-- Independent test based on ball correlation, which is a normalized coefficient of ball covariance also available now. Ball correlation statistic will be used when setting --> <!-- *type = "Bcor"* in **bcov.test**. --> ## Weighted Ball Covariance Test <!-- Moreover, we can extend defintion of $\hat{\omega}_1(X_i,X_j), \hat{\omega}_2(Y_i,Y_j)$. For example, we let: --> <!-- $$\hat{\omega}_1(X_i,X_j) = \frac{1}{\rho(X_{i}, X_{j})}, \hat{\omega}_2(Y_i,Y_j) = \frac{1}{\rho(Y_{i}, Y_{j})}$$ --> <!-- and calculate the weighted ball covariance: --> <!-- $$\mathbf{BCov}^2_{\omega,n}(\mathbf{X},\mathbf{Y}):=\frac{1}{n^2}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{X,Y}-\Delta_{ij,n}^{X}\Delta_{ij,n}^{Y})^2\hat{\omega}_1(X_i,X_j)\hat{\omega}_2(Y_i,Y_j)}$$ --> Pan et. al(2017) show that the weighted ball covariance based independence test is statistical consistent against all dependence alternatives without any moment conditions and some times superior to standard version of ball covariance. We have been implemented weighted ball covariance test in **Ball** package and we can employ it to data analysis by just setting *weight = TRUE* in **bcov.test**. Take *ArcticLake* data as example: ```{r} data("ArcticLake") Dy <- nhdist(ArcticLake[["x"]], method = "compositional") Dx <- dist(ArcticLake[["depth"]]) # hypothesis test with weighted BCov: bcov.test(x = Dx, y = Dy, num.permutations = 99, distance = TRUE, weight = "constant") ``` ## Ball Covariance Mutual Independence Test Apart from the relationships between two random variables, another important dependence concept for a set of variables is mutual (or joint) independence, which says that any two disjoint subsets of variables are independent from each other. For instance, we know to investigate whether air temperature, soil temperature, humidity, wind and evaporation are correlated. It is natural to extend ball covariance to measure mutual independence between $K$ random variables. <!-- as follows: --> <!-- $$\mathbf{BCor}_{\omega, n}^{2}(R_{1}, ..., R_{K})=\frac{1}{n^{2}}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{R_{1}, ..., R_{K}}-\prod_{k=1}^{K}\Delta_{ij,n}^{R_{k}})^{2}\prod_{k=1}^{K}{\hat{\omega}_{k}(R_{ki},R_{kj})}}$$ --> <!-- where $R_{k}, k=1,...K$ indicate random variables and $R_{ki}, i=1,...,n$ denote $n$ random samples of $R_{k}$. --> More importantly, Mutual independence test based on ball covariance have been implemented in **Ball** package. We give two simply example in the following to demonstrate its usage. The first example, $X, \epsilon_{1}, \epsilon_{2}$ are independent from the standard normal distribution $N(0,1)$, and $$Y = \max(X, 0) + \epsilon_{1}, \; Z = \min(X, 0) + \epsilon_{2}$$ ```{r} x <- rnorm(50) y <- (x > 0) * x + rnorm(50) z <- (x <= 0) * x + rnorm(50) example1 <- list(x, y, z) ``` The Second example, $W, X, Y, Z$ are connected by a latent random variable $H \sim N(0,1)$, and $$W = H^{2}; X = |H|, Y = min(H, 0)$$ $$Z = (Z_{1}, Z_{2}), Z_{1}=I(H<0.5)H, Z_{2}=I(H>-0.5)H$$ ```{r} h <- rnorm(50) w <- (h)^2 x <- abs(h) y <- h * (h < 0) z1 <- h * (h < 0.5) z2 <- h * (h > -0.5) z <- cbind(z1, z2) example2 <- list(w, x, y, z) ``` We bind these data to list *example1* and *example2* and pass them to arguments *x* in **bcov.test** to carry out ball covariance mutual independence test. ```{r} bcov.test(x = example1, num.permutations = 199) bcov.test(x = example2, num.permutations = 199) ``` The hypothesis test result for two examples show that *p*-value < 0.05, coinciding with the simulation setting. ## Ball Correlation Based Sure Independence Screening Recent technological advances have made it possible to collect ultra high-dimensional data. A common feature of these data is that the number of variables $p$ is generally much larger than sample sizes $n$. For instance, the number of gene expression profiles is in the order of tens of thousands while the number of patient samples is in the order of tens or hundreds. However, traditional variable selection algorithms such as LASSO, SCAD may not perform well due to the statistical inaccuracy, and algorithmic instability. A new framework, sure independence screening (SIS), was proposed to tackle the challenges above. SIS tries to filtering out the features that have marginal correlation with the response, hence effectively reducing the dimensionality $p$ to a moderate scale so that performing statistical algorithm is feasible. BCor-SIS, a generic non-parametric sure independence screening procedure based on ball correlation, is able to pick out explanatory variables related to response. The linear, non-linear or linear interaction effect relationship can be captured by BCor-SIS even though data is heavy tail or existing outliers. More importantly, BCor-SIS is able to retain all of the important features in the model with probability tending to 1 under mild conditions. ### BCor-SIS: Quick Start Example In this example, we will utilize **bcorsis** function to carry out BCor-SIS procedure. We generate 150 high dimensional instances with 3000 independent standard gaussian explanatory variables $X$ and univariate response variable $Y$. The relation between $Y$ and $X$ is: $$Y=3 X_{1} + 5 X_{3}^{2} + \epsilon, \quad \epsilon \sim N(0, 1)$$ ```{r} set.seed(1) n <- 150 p <- 3000 x <- matrix(rnorm(n * p), nrow = n) noise <- rnorm(n) y <- 3*x[, 1] + 5*(x[, 3])^2 + noise ``` We perform BCor-SIS procedure and display the top 5 variables index selected by BCor-SIS. ```{r} res <- bcorsis(y = y, x = x) head(res[[1]], n = 5) ``` The **bcorsis** result shows that the first and the third variable are the two most important variables in 3000 explanatory variables which is consistent to simulation settings. ### Extension of BCor-SIS: A Censored Survival Data Survival analysis is a commonly used method for the analysis of censored data such as biological death and mechanical failure, which is usually subject to censoring. The main goal of survival analysis is to study the dependence of the survival time $T$ on covariate variables $X, X \in R^{p}$. With the remarkable development of modern technology, a huge amount of covariate information such as microarray and SNP data are collected. Consequently, SIS procedure designed for censored survival data is in need. Pan et al(2017) proposed a extend BCor-SIS procedure which is able to selected the significant variables for censored data. We implement BCor-SIS procedure for survival data in **Ball** package and use a publicly lung cancer genomic data from the Chemores Cohort Study to demonstrate its usage. The data outcome was the "Disease-Free Survival Time". Patients were followed until the first relapse occurred or administrative censoring. In this genomic dataset, the expression levels of mRNA, miRNA as well as clinical variables from the 123 samples were included. Moreover, this dataset include 944 biological covariates and 1056 artificial standard gaussian variables which are independence with response. We employ extension of Bcor-SIS on this data to hunt for efficient covariates and demonstrate detailed procedure in the following. ```{r} result <- bcorsis(x = genlung[["covariate"]], y = genlung[["survival"]], d = "small", method = "survival") top_gene <- colnames(genlung[["covariate"]])[result[["ix"]]] head(top_gene, n = 1) ``` We first pass covariates and censored information to arugments *x* and *y*, and set the *method = "survival"* to indicate that the *y* should be considered as a survival status containing event time and censored status. BCor-SIS asserts that *hsa.miR.564*, corresponding to gene *MIR564*, is strongly relevant to disease-free survival status. The conclusion is highly coincident with the statement in other public literature. <!-- ### Reference -->
/scratch/gouwar.j/cran-all/cranData/Ball/inst/doc/Ball.Rmd
## ---- message=FALSE, warning=FALSE-------------------------------------------- library(mvtnorm) num <- 100 snp_num <- 200 k <- 100 rho <- 0.5 freq0 <- 0.75 d <- 3 set.seed(2021) ar1 <- function (p, rho = 0.5) { Sigma <- matrix(0, p, p) for (i in 1:p) { for (j in 1:p) { Sigma[i, j] <- rho^(abs(i - j)) } } return(Sigma) } mean0 <- rep(0, k) mean1 <- rep(0.1 * d, k) mean2 <- rep(-0.1 * d, k) cov0 <- ar1(p = k, rho = rho) cov1 <- ar1(p = k, rho = rho - 0.1 * d) cov2 <- ar1(p = k, rho = rho + 0.1 * d) p1 <- freq0 ^ 2 p2 <- 2 * freq0 * (1 - freq0) n1 <- round(num * p1) n2 <- round(num * p2) n3 <- num - n1 - n2 x0 <- rmvnorm(n1, mean = mean0, sigma = cov0) x1 <- rmvnorm(n2, mean = mean1, sigma = cov1) x2 <- rmvnorm(n3, mean = mean2, sigma = cov2) x <- rbind(x0, x1, x2) head(x[, 1:6]) ## ----------------------------------------------------------------------------- effect_snp <- c(rep(0, n1), rep(1, n2), rep(2, n3)) noise_snp <- sapply(2:snp_num, function(j) { sample( 0:2, size = num, replace = TRUE, prob = c(p1, p2, 1 - p1 - p2) ) }) snp <- cbind(effect_snp, noise_snp) head(snp[, 1:6]) ## ----------------------------------------------------------------------------- library(Ball) res <- bd.gwas.test(x = x, snp = snp) ## ----------------------------------------------------------------------------- str(res) ## ---- echo=FALSE, fig.align='center', eval=FALSE------------------------------ # library(ggplot2) # library(ggpubr) # size_15 <- 9 # size_20 <- 12 # # df_1_75<-data.frame(d=c(1:5,1:5,1:5), # power=c(0,0.98,1,1,1,0,1,1,1,1,0.04,1,1,1,1), # group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) # df_1_75$group<-factor(df_1_75$group, levels = c("n=500", "n=750", "n=1000")) # # p1 <- ggplot(data=df_1_75, aes(x=d, y=power, colour=group,linetype=group)) + # geom_point()+ # geom_line(size=1) + # scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ # labs(x="d",y="power",title = "power curve \n rho=0.5,p=0.75", # caption = "Figure 1")+ # labs(colour="sample size",linetype="sample size")+ # theme_classic()+ # theme(plot.title = element_text(hjust = 0.5,size=size_20), # plot.caption = element_text(hjust = 0.5,size=size_15), # axis.title.x =element_text(size=size_15), # axis.title.y=element_text(size=size_15), # axis.text.x = element_text(size=size_15), # axis.text.y = element_text(size=size_15), # legend.title = element_text(size=size_15), # legend.text = element_text(size=size_15), # legend.position = "bottom") # # # df_1_95<-data.frame(d=c(1:5,1:5,1:5), # power=c(0.01,0.03,0.5,1,1,0,0.15,0.81,1,1,0.01,0.21,0.99,1,1), # group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) # df_1_95$group<-factor(df_1_95$group, levels = c("n=500", "n=750", "n=1000")) # # p2 <- ggplot(data=df_1_95, aes(x=d, y=power, colour=group,linetype=group)) + # geom_point()+ # geom_line(size=1) + # scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ # labs(x="d",y="power", title = "power curve \n rho=0.5,p=0.95", # caption = "Figure 2")+ # labs(colour="sample size",linetype="sample size")+ # theme_classic()+ # theme(plot.title = element_text(hjust = 0.5,size=size_20), # plot.caption = element_text(hjust = 0.5,size=size_15), # axis.title.x =element_text(size=size_15), # axis.title.y=element_text(size=size_15), # axis.text.x = element_text(size=size_15), # axis.text.y = element_text(size=size_15), # legend.title = element_text(size=size_15), # legend.text = element_text(size=size_15), # legend.position = "bottom") # # df_2_75<-data.frame(d=c(1:5,1:5,1:5), # power=c(0,1,1,1,1,0,1,1,1,1,0,1,1,1,1), # group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) # df_2_75$group<-factor(df_2_75$group, levels = c("n=500", "n=750", "n=1000")) # # p3 <- ggplot(data=df_2_75, aes(x=d, y=power, colour=group,linetype=group)) + # geom_point()+ # geom_line(size=1) + # scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ # labs(x="d",y="power",title = "power curve \n rho=0,p=0.75", # caption = "Figure 3")+ # labs(colour="sample size",linetype="sample size")+ # theme_classic()+ # theme(plot.title = element_text(hjust = 0.5,size=size_20), # plot.caption = element_text(hjust = 0.5,size=size_15), # axis.title.x =element_text(size=size_15), # axis.title.y=element_text(size=size_15), # axis.text.x = element_text(size=size_15), # axis.text.y = element_text(size=size_15), # legend.title = element_text(size=size_15), # legend.text = element_text(size=size_15), # legend.position = "bottom") # # # # df_2_95<-data.frame(d=c(1:5,1:5,1:5), # power=c(0,0.02,0.56,1,1,0,0.04,0.93,1,1,0,0.11,1,1,1), # group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) # df_2_95$group<-factor(df_2_95$group, levels = c("n=500", "n=750", "n=1000")) # # p4 <- ggplot(data=df_2_95, aes(x=d, y=power, colour=group,linetype=group)) + # geom_point()+ # geom_line(size=1) + # scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ # labs(x="d",y="power",title = "power curve \n rho=0,p=0.95", # caption = "Figure 4")+ # labs(colour="sample size",linetype="sample size")+ # theme_classic()+ # theme(plot.title = element_text(hjust = 0.5,size=size_20), # plot.caption = element_text(hjust = 0.5,size=size_15), # axis.title.x =element_text(size=size_15), # axis.title.y=element_text(size=size_15), # axis.text.x = element_text(size=size_15), # axis.text.y = element_text(size=size_15), # legend.title = element_text(size=size_15), # legend.text = element_text(size=size_15), # legend.position = "bottom") # # p <- ggarrange(p1, p2, p3, p4, nrow = 2, widths = 8, heights = 8, # common.legend = TRUE, ncol = 2, legend = "bottom") # ggexport(p, filename = "kbd_gwas.png")
/scratch/gouwar.j/cran-all/cranData/Ball/inst/doc/bd_gwas.R
--- title: '`bd.gwas.test`: Fast Ball Divergence Test for Multiple Hypothesis Tests' author: "Yue Hu, Jin Zhu" date: "2021/8/25" output: html_document: toc: yes pdf_document: toc: yes vignette: | %\VignetteIndexEntry{Fast Ball Divergence Test for Multiple Hypothesis Tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The $K$-sample Ball Divergence (KBD) is a nonparametric method to test the differences between $K$ probability distributions. It is specially designed for metric-valued and imbalanced data, which is consistent with the characteristics of the GWAS data. It is computationally intensive for a large GWAS dataset because of the ultra-high dimensionality of the data. Therefore, a fast KBD Test for GWAS data implemented in function `bd.gwas.test` is developed and programmed to accelerate the computational speed. ## Faster implementation: quick start We use a synthetic data to demonstrate the usage of `bd.gwas.test`. In this example, phenotype data are generated from three multivariate normal distributions with the same dimension but heterogeneous mean and covariance matrix. The three multivariate normal distributions are: (i). $N\sim(\mu, \Sigma^{(1)})$, (ii) $N \sim (\mu + 0.1 \times d, \Sigma^{(2)})$, and (iii) $N \sim (\mu + 0.1 \times d, \Sigma^{(3)})$. Here, the mean $\mu$ is set to $\textbf{0}$ and the covariance matrix covariance matrices follow the auto-regressive structure with some perturbations: $$\Sigma_{ij}^{(1)}=\rho^{|i-j|}, ~~ \Sigma^{(2)}_{ij}=(\rho-0.1 \times d)^{|i-j|}, ~~ \Sigma^{3}_{ij}=(\rho+0.1 \times d)^{|i-j|}.$$ The dimension of phenotype $k$ is fixed as 100. ```{r, message=FALSE, warning=FALSE} library(mvtnorm) num <- 100 snp_num <- 200 k <- 100 rho <- 0.5 freq0 <- 0.75 d <- 3 set.seed(2021) ar1 <- function (p, rho = 0.5) { Sigma <- matrix(0, p, p) for (i in 1:p) { for (j in 1:p) { Sigma[i, j] <- rho^(abs(i - j)) } } return(Sigma) } mean0 <- rep(0, k) mean1 <- rep(0.1 * d, k) mean2 <- rep(-0.1 * d, k) cov0 <- ar1(p = k, rho = rho) cov1 <- ar1(p = k, rho = rho - 0.1 * d) cov2 <- ar1(p = k, rho = rho + 0.1 * d) p1 <- freq0 ^ 2 p2 <- 2 * freq0 * (1 - freq0) n1 <- round(num * p1) n2 <- round(num * p2) n3 <- num - n1 - n2 x0 <- rmvnorm(n1, mean = mean0, sigma = cov0) x1 <- rmvnorm(n2, mean = mean1, sigma = cov1) x2 <- rmvnorm(n3, mean = mean2, sigma = cov2) x <- rbind(x0, x1, x2) head(x[, 1:6]) ``` The number of SNPs is fixed as $200$ and the sample size is set to $100$. The sample sizes of the three groups follow the transmission ratio: $$n_1:n_2:n_3 \approx p^2:2pq:q^2,(p+q=1,n_1+n_2+n_3=100).$$ Here, $p$ is set to be $0.75$, representing a scenario that close to the real data. $d$ is a user-specific positive integer, indicating the differences between the three probability distributions. Here, we use $d=3$, aiming to show that the SNP which matched with the distribution can be identified, even when the differences between distribution is small. ```{r} effect_snp <- c(rep(0, n1), rep(1, n2), rep(2, n3)) noise_snp <- sapply(2:snp_num, function(j) { sample( 0:2, size = num, replace = TRUE, prob = c(p1, p2, 1 - p1 - p2) ) }) snp <- cbind(effect_snp, noise_snp) head(snp[, 1:6]) ``` Given the synthetic dataset `x` and `snp`, multiple KBD tests is conducted by: ```{r} library(Ball) res <- bd.gwas.test(x = x, snp = snp) ``` And we present the SNPs that is significant: ```{r} str(res) ``` ## Why `bd.gwas.test` is faster? Our faster implementation for multiple testing significantly speeds up the KBD test in two aspects. ### Two-step algorithm First, it uses a two-step algorithm for KBD. The algorithm first computes an empirical $p$-value for each SNP using a modest number of permutations which gives precise enough estimates of the $p$-values above a threshold. Then, the SNPs with first stage $p$-values being less than the threshold are moved to the second stage for a far greater number of permutations. ### Recycle permutation result Another key technique in `bd.test.gwas` is reusing the empirical KBD's distribution under the null hypothesis. This technique is particularly helpful for decreasing computational burden when the number of factors $p$ is very large and $K$ is a single digit. A typical case is the GWAS study, in which $p \approx 10^4$ or $10^5$ but $K = 3$. ## Power evaluation According to the simulations: - the empirical type I errors of KBD are reasonably controlled around $10^{-5}$; - the power of KBD increases as either the sample size or the difference between means or covariance matrices increases. The empirical power is close to $1$ when the difference between distributions is large enough. Furthermore, correlated responses may slightly decrease the power of the test compared to the case of independent responses. Moreover, KBD performs better when the data are not extremely imbalanced and it maintains reasonable power for the imbalanced setting. Compared to other methods, KBD performs better in most of the scenarios, especially when the simulation setting is close to the real data. Moreover, KBD is more computationally efficient in identifying significant variants. From Figures 1 and 3, we can notice that the power curves are similar after sample size of 500, when the minor allele frequency is not small. On the other hand, when the minor allele is rare, a larger sample size can lead to a higher power from Figures 2 and 4. The four figures show how sample size could affect the power of the KBD method, indicating that there is an inverse relationship between minor allele frequency and the sample sizes in order to get sufficient power. <!-- ![](./kbd_gwas.png) --> <p align="center"> <img src="./kbd_gwas.png" width="500" height="500"> </p> <!-- ### Summary plots of simulation results of heterogeneous settings of the KBD method --> ```{r, echo=FALSE, fig.align='center', eval=FALSE} library(ggplot2) library(ggpubr) size_15 <- 9 size_20 <- 12 df_1_75<-data.frame(d=c(1:5,1:5,1:5), power=c(0,0.98,1,1,1,0,1,1,1,1,0.04,1,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_1_75$group<-factor(df_1_75$group, levels = c("n=500", "n=750", "n=1000")) p1 <- ggplot(data=df_1_75, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0.5,p=0.75", caption = "Figure 1")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_1_95<-data.frame(d=c(1:5,1:5,1:5), power=c(0.01,0.03,0.5,1,1,0,0.15,0.81,1,1,0.01,0.21,0.99,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_1_95$group<-factor(df_1_95$group, levels = c("n=500", "n=750", "n=1000")) p2 <- ggplot(data=df_1_95, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power", title = "power curve \n rho=0.5,p=0.95", caption = "Figure 2")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_2_75<-data.frame(d=c(1:5,1:5,1:5), power=c(0,1,1,1,1,0,1,1,1,1,0,1,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_2_75$group<-factor(df_2_75$group, levels = c("n=500", "n=750", "n=1000")) p3 <- ggplot(data=df_2_75, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0,p=0.75", caption = "Figure 3")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_2_95<-data.frame(d=c(1:5,1:5,1:5), power=c(0,0.02,0.56,1,1,0,0.04,0.93,1,1,0,0.11,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_2_95$group<-factor(df_2_95$group, levels = c("n=500", "n=750", "n=1000")) p4 <- ggplot(data=df_2_95, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0,p=0.95", caption = "Figure 4")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") p <- ggarrange(p1, p2, p3, p4, nrow = 2, widths = 8, heights = 8, common.legend = TRUE, ncol = 2, legend = "bottom") ggexport(p, filename = "kbd_gwas.png") ``` ## Conclusion We implement `bd.test.gwas` in Ball package for handling multiple KBD test. KBD is a powerful method that can detect the significant variants with a controllable type I error regardless if the data are balanced or not. ## Reference Yue Hu, Haizhu Tan, Cai Li, Heping Zhang. (2021). Identifying genetic risk variants associated with brain volumetric phenotypes via K-sample Ball Divergence method. Genetic Epidemiology, 1–11. https://doi.org/10.1002/gepi.22423
/scratch/gouwar.j/cran-all/cranData/Ball/inst/doc/bd_gwas.Rmd
--- title: "Ball: Statistical Inference and Sure Independence Screening via Ball Statistics" author: "Jin Zhu, [email protected]" date: "December 18, 2017" # bibliography: reference.bib output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Ball: Statistical Inference and Sure Independence Screening via Ball Statistics} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, echo=FALSE, message=FALSE, warning=FALSE} knitr::opts_chunk$set(comment = "#", warning = FALSE, eval = TRUE, message = FALSE) set.seed(1) library(Ball) ``` # Quick Start The fundamental problems for data mining and statistical analysis are: 1. Whether distributions of two samples are distinct? 2. Whether two random variables are dependent? Two-sample test, which is designed to solve the first problem, is very important in medicine, psychology, biology and so on. For instance, we want to know whether lifespan of male and female is different. Thus, we collect lifetime data, and try to figure out whether ages in two samples are identically distributed. As the following images shown, if distribution of life span in two groups look like the left one, we conclude that lifetime are not identically distributed. But for the right one, it indicates that they are most likely to be identically distributed. ![Figure 1](classificationDemo.png) Test of independence, which is designed to solve the other problem, is also very essential. As the following images shown, there is a strong linear relation with Y and X1, while X2 seems to have nothing to do with Y. So X1 should be taken into account and added in to the regression model for Y, or should be studied carefully in order to confirm the correlation mechanism with Y. ![Figure 2](regressionDemo.png) **Ball** package provides solution for independence test, two-sample test or even K-sample test. Moreover, a generic non-parametric sure independence screening procedure also implemented to deal with ultra high dimensional data. The three core functions are: - **bd.test**: examine whether $K(K \geq 2)$ univariate or multivariate distributions are identical. - **bcov.test**: test whether univariate or multivariate variables are related to each other. - **bcorsis**: carry out sure independence screening procedure to pick out the variables potentially related to response. ### Installation #### CRAN version To install the Ball R package from CRAN, just run: ```{r,eval=FALSE} install.packages("Ball") ``` #### Github version To install the development version from GitHub, run: ```{r, eval=FALSE} library(devtools) install_github("Mamba413/Ball", build_vignettes = TRUE) ``` *Windows* user will need to install [Rtools](https://cran.r-project.org/bin/windows/Rtools/) first. ```{r, echo=FALSE} library(Ball) ``` ### Quick Start: Univariate Two-sample Test In this example, we generate two normal random variables with different location parameter: $$X \sim N(0,1), Y \sim N(1, 1)$$ <!-- In the mean time, we use the non-parametric kernel density estimation to plot the kernel density of two distribution: --> ```{r} x <- rnorm(50) y <- rnorm(50, mean = 1) # plot(density(x), xlim = c(-5, 5)) # lines(density(y), col = 'red') ``` <!-- ![](QuickStartUBD.png) --> We use **bd.test** to perform the two-sample test to determine whether two samples come from the same distribution. ```{r} bd.test(x = x, y = y) ``` The result of **bd.test** is that *p*-value < 0.05, which means to reject the null hypothesis, and conclude that two samples are come from different distribution. Consequently, the hypothesis test result is concordant to data generation mechanism. ### Quick Start: Multivariate Two-sample Test In this example, we will demonstrate how to perform a test of whether two multivariate distributions are identical. We generate two random samples of size 50, which are sampled from two different multivariate normal distributions: $$X \sim N(\mu_{X},I_{2 \times 2}), Y \sim N(\mu_{Y}, I_{2 \times 2})$$ $$\mu_{X} = (0,0), \mu_{Y} = (1,1)$$ <!-- Then we will show the difference between these two samples in the way of kernel density estimation: --> ```{r} x <- matrix(rnorm(100), nrow = 50, ncol = 2) y <- matrix(rnorm(100, mean = 3), nrow = 50, ncol = 2) ``` <!-- ![](./QuickStartBD.png) --> We use **bd.test** to test whether two multivariate random samples are identically distributed. ```{r} bd.test(x = x, y = y) ``` The result of **bd.test** is that *p*-value < 0.05, so we conclude that two samples are not identically distributed. ### Quick Start: Univariate Test of Independence In this example, we will use the "W-shape" data from [WIKI](https://en.wikipedia.org/wiki/Correlation_and_dependence) to demonstrate how to perform univariate test of independence with **bcov.test** . We generate a dataset containing 50 samples. ```{r} # generate random perturbation: noise <- runif(50, min = -0.3, max = 0.3) x <- runif(50, 0, 4*pi) y <- cos(x) + noise # plot(x, y) ``` <!-- From the plot we can see that $X$ has complex non-linear relation with Y. --> Obviously, $X$ is related to $Y$, but the relationship is non-linear. We use **bcov.test** to perform the test of independence between $X$ and $Y$. ```{r} bcov.test(x = x, y = y) ``` The result of **bcov.test** is that *p*-value < 0.05, so we conclude that $X$ and $Y$ are not independent, which means there is some kind of correlation between X and Y. ### Quick Start: Multivariate Test of Independence For multivariate independence test, we will demonstrate the usage of **bcov.test** with the following example: $X=(x_{1}, x_{2})$ come from the bivariate normal distribution. The relation between $Y$ and $X$ is: $$Y=2\sin(x_{1} + x_{2})+ \epsilon, \quad \epsilon \sim U(-0.1, 0.1)$$ ```{r} x <- matrix(runif(50 * 2, -pi, pi), nrow = 50, ncol = 2) noise <- runif(50, min = -0.1, max = 0.1) y <- 2 * sin(x[,1] + x[,2]) + noise ``` <!-- The following image shows the distribution of the data: --> <!-- ![](QuickStartBCov.png) --> We use **bcov.test** to perform multivariate independence test: ```{r} bcov.test(x = x, y = y, weight = "prob") ``` The result of **bcov.test** is that *p*-value < 0.05, so we conclude that multivariate random variable $X$ and $Y$ are associated. <!-- *** --> <!-- # Introduction --> <!-- ### Ball Divergence --> <!-- We want to determine whether distributions of two samples $\mathcal{X} = \lbrace x_{1},...,x_{n} \rbrace$ and $\mathcal{Y} = \lbrace y_{1},...,y_{m} \rbrace$ are distinct. --> <!-- Intuitively, if $\mathcal{X}$ and $\mathcal{Y}$ come from identical distribution and we use any two points $x_{i}, x_{j} \in \mathcal{X}$ to construct a ball, then the ratio that elements of $\mathcal{X}$ and $\mathcal{Y}$ in the ball will be close to each other, which means: --> <!-- $$A^{X}_{ij} \approx A^{Y}_{ij}$$ --> <!-- $$A^{X}_{ij} = \frac{1}{n}\sum_{u=1}^{n}{I(x_{u} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j}))}$$ --> <!-- $$A^{Y}_{ij} = \frac{1}{m}\sum_{v=1}^{m}{I(y_{v} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j}))}$$ --> <!-- where $\bar{B}(x_{i}, \rho(x_{i}, x_{j}))$ is a closed ball with center $x_{i}$, --> <!-- and radius $\rho(x_{i}, x_{j})$ and $I$ is indicator function. --> <!-- ![](BDPlot.jpeg) --> <!-- In a similar way, for any two points $y_{i}, y_{j} \in \mathcal{Y}$, --> <!-- they should also have the property: --> <!-- $$C^{X}_{ij} \approx C^{Y}_{ij}$$ --> <!-- $$C^{X}_{ij} = \frac{1}{n}\sum_{u=1}^{n}{I(x_{u} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j}))}$$ --> <!-- $$C^{Y}_{ij}=\frac{1}{m}\sum_{v=1}^{m}{I(y_{v} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j}))}$$ --> <!-- We combine the difference between $A^{X}_{ij}, A^{Y}_{ij}$ and the difference between --> <!-- $C^{X}_{ij}, C^{Y}_{ij}$ together in the following way: --> <!-- $$D_{n,m} = A_{n,m} + C_{n,m}$$ --> <!-- where: --> <!-- $$A_{n,m} = \frac{1}{n^{2}}\sum_{i,j=1}^{n}{(A_{ij}^{X}-A_{ij}^{Y})^{2}}$$ $$C_{n,m}=\frac{1}{m^{2}}\sum_{k,l=1}^{m}{(C_{kl}^{X}-C_{kl}^{Y})^{2}}$$ --> <!-- $D_{n,m}$ is the sample version of Ball Divergence, defined by Pan et.al(2017). Techinical proofs provided by Pan et.al ensure $D_{n, m}$ converges to $D(\mu, \nu)$ when $n, m$ increase to infinity so long as: --> <!-- $$\frac{n}{m+n} \to \tau, \tau \in [0, 1].$$ --> <!-- [Pan et al. (2017)](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0ahUKEwjBvbb7gdTWAhUEoZQKHYvODxkQFggmMAA&url=https%3A%2F%2Fwww.e-publications.org%2Fims%2Fsubmission%2FAOS%2Fuser%2FsubmissionFile%2F24632%3Fconfirm%3D9219c1d0&usg=AOvVaw3I3Tad92DvETqhJEnJ7FyN) had proven that sample version of ball divergence $D(\mu, \nu) \geq 0$ where the equality holds if and only if $\mu=\nu$ where $\mu, \nu$ are induced measure corresponding to distribution of sample $\mathcal{X}$ and $\mathcal{Y}$. Theory and numerical result guarantee Two-Sample Test based on ball divergence have following advantages: --> <!-- - It is applicable to the univariate or multivariate data in Banach Space. --> <!-- - Robust to heavy-tail data or outliers. --> <!-- - Cope well for imbalanced data. --> <!-- - Works fine for most problems without tuning a variety of parameters. --> <!-- ### Ball Covariance --> <!-- Investigate the dependence between variables is a fundamental step in statistical inference and data mining. Suppose, We are given pairs of independent observations $\{(x_1, y_1),\ldots,(x_n,y_n)\}$, where $x_i$ and $y_i$ can be of any dimension and the dimensionality of $x_i$ and $y_i$ need not be the same. Let $X = (x_1,\ldots,x_n)$ be the $X$ vector, and $Y=(y_1,\ldots,y_n)$ the --> <!-- $Y$ vector and we want to determine whether $X$ and $Y$ are dependent. --> <!-- <!-- , which means whether $F_{XY}=F_{X}F_{Y}$ is valid or not. Where $F_{XY}$ is the joint distribution function of $X, Y$. --> <!-- To achieve the goal, we come up with **Ball Covariance** ($\mathbf{BCor}_{\omega, n}^{2}$), a generic measure of dependence in banach space. Moreover, the $\mathbf{BCor}_{\omega, n}^{2}$ based independence test utilized permutation technique to calculate *p*-value is also developed. Sample version of $\mathbf{BCor}_{\omega, n}^{2}$ is defined as follow: --> <!-- $$\mathbf{BCor}_{\omega, n}^{2}(X, Y)=\frac{1}{n^{2}}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{X,Y}-\Delta_{ij,n}^{X}\Delta_{ij,n}^{Y})^{2}}\hat{\omega}_1(X_i,X_j)\hat{\omega}_2(Y_i,Y_j)$$ --> <!-- where: --> <!-- $$ \Delta_{ij,n}^{X,Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X} \delta_{ij,k}^{Y}}$$ --> <!-- $$\Delta_{ij,n}^{X}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{X}}, --> <!-- \Delta_{ij,n}^{Y}=\frac{1}{n}\sum_{k=1}^{n}{\delta_{ij,k}^{Y}} $$ --> <!-- $$ \delta_{ij,k}^{X} = I(x_{k} \in \bar{B}(x_{i}, \rho(x_{i}, x_{j})))$$ --> <!-- $$\delta_{ij,k}^{Y} = I(y_{k} \in \bar{B}(y_{i}, \rho(y_{i}, y_{j})))$$ --> <!-- Generally, we define $\hat{\omega}_1(X_i,X_j) = \hat{\omega}_2(Y_i,Y_j) = 1$, and simplify the notation $\mathbf{BCor}^{2}_{\omega, n}$ as $\mathbf{BCor}^{2}_{n}$ --> <!-- ![](./BCovPlot.jpeg) --> <!-- As the image above shown, the joint probability that $X, Y$ are both in the ball are intuitively closed to the product of marginal probability that $X$ and $Y$ are in the ball when $X$ and $Y$ are independent, i.e.: --> <!-- $$\Delta_{ij,n}^{X,Y} \approx \Delta_{ij,n}^{X}\Delta_{ij,n}^{Y}$$ --> <!-- Consequently, if $\mathbf{BCor}_{\omega, n}^{2}$ is significantly larger than 0, then it indicates that $X$ and $Y$ are not independent. --> <!-- As Pan's paper proved theoretically and demonstrated numerically, the independence test based on $\mathbf{BCor}_{\omega, n}^{2}$ has several advantages: --> <!-- - It is applicable to the univariate and multivariate data in banach space. --> <!-- - Robust to heavy-tail data or outliers --> <!-- - Works fine for most problems without tuning a variety of parameters. --> <!-- *** --> # Advance Features The features below have been implemented to help you analyse diverse and complicated real data. ## Non-Hilbert Space Data During the scientific research, we always have to deal with Non-Hilbert space data. However, the traditional statistical inference methods usually depend on some assumptions, which are not able to perform statistical inference on this kind of data directly. Whereas ball divergence doesn't depend on the assumptions needed in traditional statistical inference method, and it's able to perform two-sample test for data from Non-Hilbert space. We will demonstrate how to use **Ball** package to perform statistical inference for data from Non-Hilbert space with three examples: #### Example 1: Simulated von Mises-Fisher distribution data ```{r} # load data: data("bdvmf") ``` The distribution of the data is shown in the following image: ```{r, eval=FALSE, echo=FALSE} library(scatterplot3d) scatterplot3d(bdvmf[["x"]], color = bdvmf[["group"]], xlab = "X1", ylab = "X2", zlab = "X3") ``` ![](./BDVmf.png) In the image, the black dots ($X$) and red dots ($Y$) respectively represent two group of simulated data with different distributions. The distributions are denoted by: $$X \sim M(\mu_{X}, \kappa), Y \sim M(\mu_{Y}, \kappa)$$ Where $M$ denotes [von Mises-Fisher distribution](https://en.wikipedia.org/wiki/Von_Mises%E2%80%93Fisher_distribution), $\mu_{X} = (1, 0, 0), \mu_{Y} = (1, 1, 1)$ are the orientation parameter of von Mises-Fisher distribution, $\kappa = 3$ denotes aggregation parameter. We can tell from the image that, red dots and black dots are not identically distributed. However, it is a tough task for the traditional statistical method to distinguish distribution because it is not a conventional data in Hilbert space. Fortunately, since the computation for sample version of ball divergence (ball covariance) only involves calculate distance matrix and counting the number of samples located in a ball, we can obtain empirical ball divergence so long as we can define the distance metric between observations. Therefore, ball divergence still work for this example. We apply ball divergence to this data by carrying out the following step. First, we calculate the geodesic distance matrix of the data, which have been implemented in function \code{nhdist}. Later, we pass the distance matrix to arguments \code{x} and let \code{distance = TRUE}, \code{num.permutations = 99}, and \code{size = c(150, 150)}. The detailed solution is demonstrated below: ```{r} # calculate geodesic distance between samples: dx <- nhdist(bdvmf[["x"]], method = "geodesic") # sample sizes in each group: 150, 150 # Two-Sample Test based on BD : bd.test(x = dx, size = c(150, 150), num.permutations = 99, distance = TRUE) ``` In this example, we firstly calculate the geodesic distance matrix using **nhdist** function in *Ball* package. Then, pass *dx* to arguments *x* and set *distance = TRUE* to indicate that the *x* parameter is a distance matrix. Meanwhile, we set the size of each sample *size = c(150, 150)* and set the replication times *num.permutations = 99*. The result is that *p*-value < 0.05, which means that red dots and black dots are not identically distributed. #### Example 2: Macaques Data Based on Macaques data provided by dryden, scientists want to figure out whether there are differences in the shape of skull between Macaques of different genders. In a similar way, we can calculate the distance matrix of the data and transform this problem into two-sample test that can be solved by BD. Riemann shape distance is always used to describe the distance between shape data. By setting *method = "riemann"* in the **nhdist** function, we are able to calculate the riemann shape distance between shape data. The detailed procedure is demonstrated below: ```{r} # load data: data("macaques") # number of femala and male Macaca fascicularis: # table(macaques[["group"]]) # f: 9; m: 9 # calculate Riemannian shape distance matrix: dx <- nhdist(macaques[["x"]], method = "riemann") # hypothesis test with BD: bd.test(x = dx, num.permutations = 99, size = c(9, 9), distance = TRUE) ``` *p*-value is under 0.05, which means the skull shape differs between male macaques and female macaques. #### Example 3: ArcticLake Data **bcov.test** is related to calculating the distance between samples of two multivariate random variables. Therefore, we can examine independence assumption by employing **bcov.test** to non-Hilbert space real data so long as we obtain the distance matrix of the samples. We take a data in the Book, **The Statistical Analysis of Compositional Data**, as an example to demonstrate how to use **bcov.test** to determine the dependence of non-Hilbert space data. Scientists collect Sand, silt and clay compositions of 39 sediment samples of different water depth in an Arctic lake. They want to figure out whether the compositions of sediment samples of different water depth are identical or not. To achieve the goal, we use **bcov.test** to perform the test of independence. The detailed procedure is demonstrated below: ```{r} data("ArcticLake") # Distance matrix between y: dy <- nhdist(ArcticLake[["x"]], method = "compositional") # Distance matrix between x: dx <- dist(ArcticLake[["depth"]]) # hypothesis test with BCov: bcov.test(x = dx, y = dy, num.permutations = 99, distance = TRUE) ``` We first calculate the distance matrix *dy* and *dx*. Then, we pass *dx* to arguments *x*, *dy* to arguments *y*, and set the replication times *num.permutations = 99*, *distance = TRUE* to indicate that the *x* and *y* parameters are distance matrices. The result shows that *p*-value is less than 0.05, an usual significance level, so we conclude that the compositions of sediment is associated with the water depth. In the example above, we use the square root transformed data to calculate the geodesic distance as a measurement of the difference between different compositions of sediment samples (*Dy*). Meanwhile, we use euclidean distance to measure the difference of different water depth (*Dx*). For different data, we can use different measurements to cope with the different features in data. ## K-Sample Test **bd.test** is also applicable for testing of multiple samples. We generate three random normal samples of size 50, which are sampled from the same normal distribution. As an example, we use **bd.test** to test whether these samples are identically distributed. ```{r} n <- 150 bd.test(rnorm(n), size = rep(50, 3)) ``` As the result shown, *p*-value>0.05, which means we can't reject the null hypothesis. We can also utilize **bd.test** to deal with $K$-Sample problem in non-Hilbert space following the aforementioned procedure. At the same time, remember to assign size vector to parameter *size* arguments and set *distance = TRUE*. <!-- Independent test based on ball correlation, which is a normalized coefficient of ball covariance also available now. Ball correlation statistic will be used when setting --> <!-- *type = "Bcor"* in **bcov.test**. --> ## Weighted Ball Covariance Test <!-- Moreover, we can extend defintion of $\hat{\omega}_1(X_i,X_j), \hat{\omega}_2(Y_i,Y_j)$. For example, we let: --> <!-- $$\hat{\omega}_1(X_i,X_j) = \frac{1}{\rho(X_{i}, X_{j})}, \hat{\omega}_2(Y_i,Y_j) = \frac{1}{\rho(Y_{i}, Y_{j})}$$ --> <!-- and calculate the weighted ball covariance: --> <!-- $$\mathbf{BCov}^2_{\omega,n}(\mathbf{X},\mathbf{Y}):=\frac{1}{n^2}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{X,Y}-\Delta_{ij,n}^{X}\Delta_{ij,n}^{Y})^2\hat{\omega}_1(X_i,X_j)\hat{\omega}_2(Y_i,Y_j)}$$ --> Pan et. al(2017) show that the weighted ball covariance based independence test is statistical consistent against all dependence alternatives without any moment conditions and some times superior to standard version of ball covariance. We have been implemented weighted ball covariance test in **Ball** package and we can employ it to data analysis by just setting *weight = TRUE* in **bcov.test**. Take *ArcticLake* data as example: ```{r} data("ArcticLake") Dy <- nhdist(ArcticLake[["x"]], method = "compositional") Dx <- dist(ArcticLake[["depth"]]) # hypothesis test with weighted BCov: bcov.test(x = Dx, y = Dy, num.permutations = 99, distance = TRUE, weight = "constant") ``` ## Ball Covariance Mutual Independence Test Apart from the relationships between two random variables, another important dependence concept for a set of variables is mutual (or joint) independence, which says that any two disjoint subsets of variables are independent from each other. For instance, we know to investigate whether air temperature, soil temperature, humidity, wind and evaporation are correlated. It is natural to extend ball covariance to measure mutual independence between $K$ random variables. <!-- as follows: --> <!-- $$\mathbf{BCor}_{\omega, n}^{2}(R_{1}, ..., R_{K})=\frac{1}{n^{2}}\sum_{i,j=1}^{n}{(\Delta_{ij,n}^{R_{1}, ..., R_{K}}-\prod_{k=1}^{K}\Delta_{ij,n}^{R_{k}})^{2}\prod_{k=1}^{K}{\hat{\omega}_{k}(R_{ki},R_{kj})}}$$ --> <!-- where $R_{k}, k=1,...K$ indicate random variables and $R_{ki}, i=1,...,n$ denote $n$ random samples of $R_{k}$. --> More importantly, Mutual independence test based on ball covariance have been implemented in **Ball** package. We give two simply example in the following to demonstrate its usage. The first example, $X, \epsilon_{1}, \epsilon_{2}$ are independent from the standard normal distribution $N(0,1)$, and $$Y = \max(X, 0) + \epsilon_{1}, \; Z = \min(X, 0) + \epsilon_{2}$$ ```{r} x <- rnorm(50) y <- (x > 0) * x + rnorm(50) z <- (x <= 0) * x + rnorm(50) example1 <- list(x, y, z) ``` The Second example, $W, X, Y, Z$ are connected by a latent random variable $H \sim N(0,1)$, and $$W = H^{2}; X = |H|, Y = min(H, 0)$$ $$Z = (Z_{1}, Z_{2}), Z_{1}=I(H<0.5)H, Z_{2}=I(H>-0.5)H$$ ```{r} h <- rnorm(50) w <- (h)^2 x <- abs(h) y <- h * (h < 0) z1 <- h * (h < 0.5) z2 <- h * (h > -0.5) z <- cbind(z1, z2) example2 <- list(w, x, y, z) ``` We bind these data to list *example1* and *example2* and pass them to arguments *x* in **bcov.test** to carry out ball covariance mutual independence test. ```{r} bcov.test(x = example1, num.permutations = 199) bcov.test(x = example2, num.permutations = 199) ``` The hypothesis test result for two examples show that *p*-value < 0.05, coinciding with the simulation setting. ## Ball Correlation Based Sure Independence Screening Recent technological advances have made it possible to collect ultra high-dimensional data. A common feature of these data is that the number of variables $p$ is generally much larger than sample sizes $n$. For instance, the number of gene expression profiles is in the order of tens of thousands while the number of patient samples is in the order of tens or hundreds. However, traditional variable selection algorithms such as LASSO, SCAD may not perform well due to the statistical inaccuracy, and algorithmic instability. A new framework, sure independence screening (SIS), was proposed to tackle the challenges above. SIS tries to filtering out the features that have marginal correlation with the response, hence effectively reducing the dimensionality $p$ to a moderate scale so that performing statistical algorithm is feasible. BCor-SIS, a generic non-parametric sure independence screening procedure based on ball correlation, is able to pick out explanatory variables related to response. The linear, non-linear or linear interaction effect relationship can be captured by BCor-SIS even though data is heavy tail or existing outliers. More importantly, BCor-SIS is able to retain all of the important features in the model with probability tending to 1 under mild conditions. ### BCor-SIS: Quick Start Example In this example, we will utilize **bcorsis** function to carry out BCor-SIS procedure. We generate 150 high dimensional instances with 3000 independent standard gaussian explanatory variables $X$ and univariate response variable $Y$. The relation between $Y$ and $X$ is: $$Y=3 X_{1} + 5 X_{3}^{2} + \epsilon, \quad \epsilon \sim N(0, 1)$$ ```{r} set.seed(1) n <- 150 p <- 3000 x <- matrix(rnorm(n * p), nrow = n) noise <- rnorm(n) y <- 3*x[, 1] + 5*(x[, 3])^2 + noise ``` We perform BCor-SIS procedure and display the top 5 variables index selected by BCor-SIS. ```{r} res <- bcorsis(y = y, x = x) head(res[[1]], n = 5) ``` The **bcorsis** result shows that the first and the third variable are the two most important variables in 3000 explanatory variables which is consistent to simulation settings. ### Extension of BCor-SIS: A Censored Survival Data Survival analysis is a commonly used method for the analysis of censored data such as biological death and mechanical failure, which is usually subject to censoring. The main goal of survival analysis is to study the dependence of the survival time $T$ on covariate variables $X, X \in R^{p}$. With the remarkable development of modern technology, a huge amount of covariate information such as microarray and SNP data are collected. Consequently, SIS procedure designed for censored survival data is in need. Pan et al(2017) proposed a extend BCor-SIS procedure which is able to selected the significant variables for censored data. We implement BCor-SIS procedure for survival data in **Ball** package and use a publicly lung cancer genomic data from the Chemores Cohort Study to demonstrate its usage. The data outcome was the "Disease-Free Survival Time". Patients were followed until the first relapse occurred or administrative censoring. In this genomic dataset, the expression levels of mRNA, miRNA as well as clinical variables from the 123 samples were included. Moreover, this dataset include 944 biological covariates and 1056 artificial standard gaussian variables which are independence with response. We employ extension of Bcor-SIS on this data to hunt for efficient covariates and demonstrate detailed procedure in the following. ```{r} result <- bcorsis(x = genlung[["covariate"]], y = genlung[["survival"]], d = "small", method = "survival") top_gene <- colnames(genlung[["covariate"]])[result[["ix"]]] head(top_gene, n = 1) ``` We first pass covariates and censored information to arugments *x* and *y*, and set the *method = "survival"* to indicate that the *y* should be considered as a survival status containing event time and censored status. BCor-SIS asserts that *hsa.miR.564*, corresponding to gene *MIR564*, is strongly relevant to disease-free survival status. The conclusion is highly coincident with the statement in other public literature. <!-- ### Reference -->
/scratch/gouwar.j/cran-all/cranData/Ball/vignettes/Ball.Rmd
--- title: '`bd.gwas.test`: Fast Ball Divergence Test for Multiple Hypothesis Tests' author: "Yue Hu, Jin Zhu" date: "2021/8/25" output: html_document: toc: yes pdf_document: toc: yes vignette: | %\VignetteIndexEntry{Fast Ball Divergence Test for Multiple Hypothesis Tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Introduction The $K$-sample Ball Divergence (KBD) is a nonparametric method to test the differences between $K$ probability distributions. It is specially designed for metric-valued and imbalanced data, which is consistent with the characteristics of the GWAS data. It is computationally intensive for a large GWAS dataset because of the ultra-high dimensionality of the data. Therefore, a fast KBD Test for GWAS data implemented in function `bd.gwas.test` is developed and programmed to accelerate the computational speed. ## Faster implementation: quick start We use a synthetic data to demonstrate the usage of `bd.gwas.test`. In this example, phenotype data are generated from three multivariate normal distributions with the same dimension but heterogeneous mean and covariance matrix. The three multivariate normal distributions are: (i). $N\sim(\mu, \Sigma^{(1)})$, (ii) $N \sim (\mu + 0.1 \times d, \Sigma^{(2)})$, and (iii) $N \sim (\mu + 0.1 \times d, \Sigma^{(3)})$. Here, the mean $\mu$ is set to $\textbf{0}$ and the covariance matrix covariance matrices follow the auto-regressive structure with some perturbations: $$\Sigma_{ij}^{(1)}=\rho^{|i-j|}, ~~ \Sigma^{(2)}_{ij}=(\rho-0.1 \times d)^{|i-j|}, ~~ \Sigma^{3}_{ij}=(\rho+0.1 \times d)^{|i-j|}.$$ The dimension of phenotype $k$ is fixed as 100. ```{r, message=FALSE, warning=FALSE} library(mvtnorm) num <- 100 snp_num <- 200 k <- 100 rho <- 0.5 freq0 <- 0.75 d <- 3 set.seed(2021) ar1 <- function (p, rho = 0.5) { Sigma <- matrix(0, p, p) for (i in 1:p) { for (j in 1:p) { Sigma[i, j] <- rho^(abs(i - j)) } } return(Sigma) } mean0 <- rep(0, k) mean1 <- rep(0.1 * d, k) mean2 <- rep(-0.1 * d, k) cov0 <- ar1(p = k, rho = rho) cov1 <- ar1(p = k, rho = rho - 0.1 * d) cov2 <- ar1(p = k, rho = rho + 0.1 * d) p1 <- freq0 ^ 2 p2 <- 2 * freq0 * (1 - freq0) n1 <- round(num * p1) n2 <- round(num * p2) n3 <- num - n1 - n2 x0 <- rmvnorm(n1, mean = mean0, sigma = cov0) x1 <- rmvnorm(n2, mean = mean1, sigma = cov1) x2 <- rmvnorm(n3, mean = mean2, sigma = cov2) x <- rbind(x0, x1, x2) head(x[, 1:6]) ``` The number of SNPs is fixed as $200$ and the sample size is set to $100$. The sample sizes of the three groups follow the transmission ratio: $$n_1:n_2:n_3 \approx p^2:2pq:q^2,(p+q=1,n_1+n_2+n_3=100).$$ Here, $p$ is set to be $0.75$, representing a scenario that close to the real data. $d$ is a user-specific positive integer, indicating the differences between the three probability distributions. Here, we use $d=3$, aiming to show that the SNP which matched with the distribution can be identified, even when the differences between distribution is small. ```{r} effect_snp <- c(rep(0, n1), rep(1, n2), rep(2, n3)) noise_snp <- sapply(2:snp_num, function(j) { sample( 0:2, size = num, replace = TRUE, prob = c(p1, p2, 1 - p1 - p2) ) }) snp <- cbind(effect_snp, noise_snp) head(snp[, 1:6]) ``` Given the synthetic dataset `x` and `snp`, multiple KBD tests is conducted by: ```{r} library(Ball) res <- bd.gwas.test(x = x, snp = snp) ``` And we present the SNPs that is significant: ```{r} str(res) ``` ## Why `bd.gwas.test` is faster? Our faster implementation for multiple testing significantly speeds up the KBD test in two aspects. ### Two-step algorithm First, it uses a two-step algorithm for KBD. The algorithm first computes an empirical $p$-value for each SNP using a modest number of permutations which gives precise enough estimates of the $p$-values above a threshold. Then, the SNPs with first stage $p$-values being less than the threshold are moved to the second stage for a far greater number of permutations. ### Recycle permutation result Another key technique in `bd.test.gwas` is reusing the empirical KBD's distribution under the null hypothesis. This technique is particularly helpful for decreasing computational burden when the number of factors $p$ is very large and $K$ is a single digit. A typical case is the GWAS study, in which $p \approx 10^4$ or $10^5$ but $K = 3$. ## Power evaluation According to the simulations: - the empirical type I errors of KBD are reasonably controlled around $10^{-5}$; - the power of KBD increases as either the sample size or the difference between means or covariance matrices increases. The empirical power is close to $1$ when the difference between distributions is large enough. Furthermore, correlated responses may slightly decrease the power of the test compared to the case of independent responses. Moreover, KBD performs better when the data are not extremely imbalanced and it maintains reasonable power for the imbalanced setting. Compared to other methods, KBD performs better in most of the scenarios, especially when the simulation setting is close to the real data. Moreover, KBD is more computationally efficient in identifying significant variants. From Figures 1 and 3, we can notice that the power curves are similar after sample size of 500, when the minor allele frequency is not small. On the other hand, when the minor allele is rare, a larger sample size can lead to a higher power from Figures 2 and 4. The four figures show how sample size could affect the power of the KBD method, indicating that there is an inverse relationship between minor allele frequency and the sample sizes in order to get sufficient power. <!-- ![](./kbd_gwas.png) --> <p align="center"> <img src="./kbd_gwas.png" width="500" height="500"> </p> <!-- ### Summary plots of simulation results of heterogeneous settings of the KBD method --> ```{r, echo=FALSE, fig.align='center', eval=FALSE} library(ggplot2) library(ggpubr) size_15 <- 9 size_20 <- 12 df_1_75<-data.frame(d=c(1:5,1:5,1:5), power=c(0,0.98,1,1,1,0,1,1,1,1,0.04,1,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_1_75$group<-factor(df_1_75$group, levels = c("n=500", "n=750", "n=1000")) p1 <- ggplot(data=df_1_75, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0.5,p=0.75", caption = "Figure 1")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_1_95<-data.frame(d=c(1:5,1:5,1:5), power=c(0.01,0.03,0.5,1,1,0,0.15,0.81,1,1,0.01,0.21,0.99,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_1_95$group<-factor(df_1_95$group, levels = c("n=500", "n=750", "n=1000")) p2 <- ggplot(data=df_1_95, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power", title = "power curve \n rho=0.5,p=0.95", caption = "Figure 2")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_2_75<-data.frame(d=c(1:5,1:5,1:5), power=c(0,1,1,1,1,0,1,1,1,1,0,1,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_2_75$group<-factor(df_2_75$group, levels = c("n=500", "n=750", "n=1000")) p3 <- ggplot(data=df_2_75, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0,p=0.75", caption = "Figure 3")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") df_2_95<-data.frame(d=c(1:5,1:5,1:5), power=c(0,0.02,0.56,1,1,0,0.04,0.93,1,1,0,0.11,1,1,1), group=c(rep("n=500",5),rep("n=750",5),rep("n=1000",5))) df_2_95$group<-factor(df_2_95$group, levels = c("n=500", "n=750", "n=1000")) p4 <- ggplot(data=df_2_95, aes(x=d, y=power, colour=group,linetype=group)) + geom_point()+ geom_line(size=1) + scale_linetype_manual(values = c('dotdash', 'dotted', 'dashed'))+ labs(x="d",y="power",title = "power curve \n rho=0,p=0.95", caption = "Figure 4")+ labs(colour="sample size",linetype="sample size")+ theme_classic()+ theme(plot.title = element_text(hjust = 0.5,size=size_20), plot.caption = element_text(hjust = 0.5,size=size_15), axis.title.x =element_text(size=size_15), axis.title.y=element_text(size=size_15), axis.text.x = element_text(size=size_15), axis.text.y = element_text(size=size_15), legend.title = element_text(size=size_15), legend.text = element_text(size=size_15), legend.position = "bottom") p <- ggarrange(p1, p2, p3, p4, nrow = 2, widths = 8, heights = 8, common.legend = TRUE, ncol = 2, legend = "bottom") ggexport(p, filename = "kbd_gwas.png") ``` ## Conclusion We implement `bd.test.gwas` in Ball package for handling multiple KBD test. KBD is a powerful method that can detect the significant variants with a controllable type I error regardless if the data are balanced or not. ## Reference Yue Hu, Haizhu Tan, Cai Li, Heping Zhang. (2021). Identifying genetic risk variants associated with brain volumetric phenotypes via K-sample Ball Divergence method. Genetic Epidemiology, 1–11. https://doi.org/10.1002/gepi.22423
/scratch/gouwar.j/cran-all/cranData/Ball/vignettes/bd_gwas.Rmd
library(igraph) library(scales) library(networkD3) library(stringr) #'Create vertices and edges (with additional properties) of a Ball Mapper graph representation of the input data. Please be aware that the program will not perform any normalization on the data. As with cluster analysis we recommend that you consider whether to normalize the data prior to running the function. #' #'@param points, a collection of input points in a form of a data frame. These are typically points in Euclidean space. By default the Euclidean distance is used to construct the Ball Mapper. #'@param values, a collection of outcome values which apply to the data points. Mean values of this variable within any given ball will be used to color the Ball Mapper graph. If it is not available, please set it to a constant array with the same length as the number of observations in the dataset. #'@param epsilon, the value of radius of balls used in the Ball Mapper construction. #'@return The function returns a long list of outputs which are explained below: #'vertices, comprises two binded lists: First one which contains an increasing sequence of numbers starting from 1 to the number of vertices. Each of them corresponds to a landmark point. The second one contains the number of points covered by a ball of radius epsilon centered by the following landmark points. #'edges, a collection of not directed edges composed of the first and the second vertex. Ordering of vertices do not have meaning. #'edges_strength, For every edge [a,b] we define its strength as the number of points that are covered by both landmarks a and b. This array contains the strength of every edge in the Ball Mapper graph. #'points_covered_by_landmarks, is a list of vectors. I-th vector contains the positions of points covered by i-th landmark. #'landmarks, contains a list of positions of the landmark points used to construct the balls. #'coloring, is a vector having as many positions as the number of lanrmarks. It contains the averaged outcome values of the coloring variable corresponding to the points covered by each landmark. #' @examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #' @export BallMapper <- function( points , values , epsilon ) { #First we create an array of the same length as the collection of points. We will store here the numbers of landmarks that cover every given point. coverage <- list() for ( i in 1:length(points[,1])) { coverage[[i]] <- vector() } #In this vector we will be storing the ids of landmarks: landmarks <- vector() first_uncovered <- 1 number_of_landmark <- 1 #Now we will be adding landmark by landmark as long as all the points are not covered. while ( first_uncovered <= length(points[,1]) ) { landmarks <- c(landmarks, first_uncovered) #Now we will check which points are covered by the first_uncovered. for ( j in 1:length(points[,1]) ) { #this bit is not optimal as we are copying a lot of data here for high dimensional point clouds... distance <- stats::dist(rbind( points[j,] , points[first_uncovered,]) ) if ( distance <= epsilon ) { coverage[[j]] <- c( coverage[[j]] , number_of_landmark ) } } while ( TRUE ) { if (first_uncovered > length(points[,1])) break if (length(coverage[[first_uncovered]])==0)break first_uncovered = first_uncovered+1 } #print(paste0("first_uncovered: ", first_uncovered)) number_of_landmark <- number_of_landmark+1 } #To ballance the last additional increment. number_of_landmark <- number_of_landmark-1 #Over here we compute the list of elements which are covered by the following landmarks: points_covered_by_landmarks <- list() for ( i in 1:length(landmarks)) { points_covered_by_landmarks[[i]] = vector() } for ( i in 1:length(coverage) ) { for ( j in 1:length(coverage[[i]]) ) { points_covered_by_landmarks[[ coverage[[i]][j] ]] <- c(points_covered_by_landmarks[[ coverage[[i]][j] ]],i) } } #now we create a graph. Number of vertices is the same as number_of_landmark. #We will create a list storing the number of points covered by each landmark. numer_of_covered_points = vector( length=number_of_landmark ) for ( i in 1:length(points_covered_by_landmarks) ) { numer_of_covered_points[ i ] <- 2+length(points_covered_by_landmarks[[i]]) } #And for every landmark, we will consider all the points covered by it, and compute the average value of function therein. #It will be stored in the variable named coloring. coloring = vector( length=number_of_landmark ) for ( i in 1:length(points_covered_by_landmarks) ) { average_function_value <- 0 for ( j in 1:length(points_covered_by_landmarks[[i]]) ) { average_function_value <- average_function_value+values[ points_covered_by_landmarks[[i]][j], ] } average_function_value <- average_function_value/length(points_covered_by_landmarks[[i]]) coloring[i] <- average_function_value } #Here we create the edges with weights: from = vector() to = vector() for ( i in 1:length(coverage) ) { for ( j in 1:length(coverage[[i]]) ) { for ( k in j:length(coverage[[i]]) ) { if ( j != k ) { from <- c( from,coverage[[i]][j] ) to <- c(to,coverage[[i]][k]) } } } } #and here we create the network. Nodes are weighted by the number of points covered by them nodes=cbind('id'=1:number_of_landmark,size=numer_of_covered_points) links = cbind(from,to) #We may want to remove repetitions from links: #links <- unique(links) #or to use the number of repetitions as a measure of a strength of an edge ToDo #this part of code compute number of repetitions of edges. This number can be used #as the edge's weight and utylized during the visualization. #NOTE THAT THIS IS QUADRATIC PROCEDURE THAT SHOULD BE OPTYMIZED!! unique_from = vector() unique_to = vector() strength_of_edges = vector() was_edge_counted <- vector( length=length(links[,1]) ) first_not_counted_edge = 1; while ( first_not_counted_edge <= length(links[,1]) ) { #print(paste0("Edge to consider: ", links[first_not_counted_edge,1], " " , links[first_not_counted_edge,2])) number_of_repetitions_of_this_edge <- 0 for ( i in first_not_counted_edge:length(links[,1]) ) { if ( (links[i,1] == links[first_not_counted_edge,1])&(links[i,2] == links[first_not_counted_edge,2]) ) { number_of_repetitions_of_this_edge <- number_of_repetitions_of_this_edge+1 was_edge_counted[ i ] = TRUE; } } unique_from = c( unique_from , links[first_not_counted_edge,1] ) unique_to = c( unique_to , links[first_not_counted_edge,2] ) strength_of_edges = c( strength_of_edges , number_of_repetitions_of_this_edge ) while ( first_not_counted_edge <= length(links[,1]) ) { if ( was_edge_counted[ first_not_counted_edge ] == TRUE ) { first_not_counted_edge <- first_not_counted_edge+1; } else { break } } } links = cbind(unique_from,unique_to) return_list <- list( "vertices" = nodes , "edges" = links , "edges_strength" = strength_of_edges , "points_covered_by_landmarks" = points_covered_by_landmarks, "landmarks" = landmarks , "coloring" = coloring , "coverage" = coverage ) return(return_list) }#BallMapper #'Produce a static color visualization of the Ball Mapper graph. It is based on the output from BallMapper function. #' #'@param outputFromBallMapper, an output from the BallMapper function #'@param showVertexLabels, a boolean value determining if the vertex labels are to be shown (TRUE by default). #'@param showLegend, a boolean value determining if the legend is to be shown (FALSE by default). #'@param minimal_ball_radius, provide a minimal value of the radius of balls used in visualization (7 by default). #'@param maximal_ball_scale, provide a maximal value of the radius of balls used in visualization (20 by default). #'@param maximal_color_scale, Provide a maximal value (starting from 0) of the color of a ball (10 by default). #'@param seed_for_plotting, if set to the same number will suspend the fandom argument in the ploting rountine and produce plots with the same layout everytime. #'@param store_in_file if set to a string, will open a png file, and store the plot therein. By default it is set to an empty string. #'@param default_x_image_resolution store a default resolution of image in x direction. Set to 512 by default. #'@param default_y_image_resolution store a default resolution of image in y direction. Set to 512 by default. #'@param number_of_colors store a number of colors used in the plot. #'@return None. #'@examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #' ColorIgraphPlot(l) #' @export ColorIgraphPlot <- function( outputFromBallMapper, showVertexLabels = TRUE , showLegend = FALSE , minimal_ball_radius = 7 , maximal_ball_scale=20, maximal_color_scale=10 , seed_for_plotting = -1 , store_in_file = "" , default_x_image_resolution = 512 , default_y_image_resolution = 512 , number_of_colors = 100) { vertices = outputFromBallMapper$vertices vertices[,2] <- maximal_ball_scale*vertices[,2]/max(vertices[,2])+minimal_ball_radius net = igraph::graph_from_data_frame(outputFromBallMapper$edges,vertices = vertices,directed = F) jet.colors <- grDevices::colorRampPalette(c("red","orange","yellow","green","cyan","blue","violet")) color_spectrum <- jet.colors( number_of_colors ) #and over here we map the pallete to the order of values on vertices min_ <- min(outputFromBallMapper$coloring) max_ <- max(outputFromBallMapper$coloring) color <- vector(length = length(outputFromBallMapper$coloring),mode="double") for ( i in 1:length( outputFromBallMapper$coloring ) ) { position <- base::max(base::ceiling(number_of_colors*(outputFromBallMapper$coloring[i]-min_)/(max_-min_)),1) color[ i ] <- color_spectrum [ position ] } igraph::V(net)$color <- color if ( showVertexLabels == FALSE )igraph::V(net)$label = NA if ( seed_for_plotting != -1 )base::set.seed(seed_for_plotting) if ( store_in_file != "" ) grDevices::png(store_in_file, default_x_image_resolution, default_y_image_resolution) #igraph::V(net)$label.cex = 1.3 #Change this line if you would like to have labels of different sizes. graphics::plot(net) fields::image.plot(legend.only=T, zlim=range(outputFromBallMapper$coloring), col=color_spectrum ) if ( store_in_file != "" )grDevices::dev.off() }#ColorIgraphPlot #'Produce a static grayscale visualization of the Ball Mapper graph. It is based on the output from the BallMapper function. #' #'@param outputFromBallMapper, an output from the BallMapper function #'@param showVertexLabels, a boolean value determining if vertex labels are to be shown (TRUE by default). #'@param minimal_ball_radius, provide a minimal value of the radius of balls used in visualization (7 by default). #'@param maximal_ball_scale, provides a maximal value of the radius of the balls used in visualization (20 by default). #'@param seed_for_plotting, if set to the same number will suspend the fandom argument in the ploting rountine and produce plots with the same layout everytime. #'@param store_in_file if set to a string, will open a png file, and store the plot therein. By default it is set to an empty string. #'@param default_x_image_resolution store a default resolution of image in x direction. Set to 512 by default. #'@param default_y_image_resolution store a default resolution of image in y direction. Set to 512 by default. #'@return None. #'@examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #' GrayscaleIgraphPlot(l) #' @export GrayscaleIgraphPlot <- function( outputFromBallMapper , showVertexLabels = TRUE , minimal_ball_radius = 7 , maximal_ball_scale=20 , seed_for_plotting = -1 , store_in_file = "" , default_x_image_resolution = 512 , default_y_image_resolution = 512) { vertices = outputFromBallMapper$vertices vertices[,2] <- maximal_ball_scale*vertices[,2]/max(vertices[,2])+minimal_ball_radius net = igraph::graph_from_data_frame(outputFromBallMapper$edges,vertices = vertices,directed = F) coloring = outputFromBallMapper$coloring coloring <- grDevices::gray(scales::rescale(outputFromBallMapper$coloring, c(0, 1))) igraph::V(net)$color = coloring if ( showVertexLabels == FALSE )igraph::V(net)$label = NA #this command sents up a fancy background, remove if not needed. #par(bg="grey32", mar=c(0,0,0,0)) if ( seed_for_plotting != -1 )base::set.seed(seed_for_plotting) if ( store_in_file != "" ) grDevices::png(store_in_file, default_x_image_resolution, default_y_image_resolution) graphics::plot(net) if ( store_in_file != "" )grDevices::dev.off() #return(net) }#GrayscaleIgraphPlot #'Produce a two column list. The first column contain the number of point (possibly with repetitions), the second one contains the number of landmark points that cover it. #'For example, let us assume that point 1 is covered by landmark 1 and 2, and point 2 is covered by the landmark 2. In this case the obtained list is of a form: #'1 1 #'1 2 #'2 2 #'This list can be used for a further analysis of various parts of Ball Mapper graph. #'@param coverageFromBallMapper, a coverage parameter of an output from BallMapper function #'@return List of landmarks covering each point, as described above. #'@examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #'list <- pointToBallList(l$coverage) #' @export pointToBallList <- function( coverageFromBallMapper ) { vertices <- vector() coveringBalls <- vector() for ( i in 1:length(coverageFromBallMapper) ) { v <- unlist(coverageFromBallMapper[i]) for ( j in 1:length(v) ) { vertices <- c( vertices , i ) coveringBalls <- c( coveringBalls , v[j] ) } } return(cbind( vertices , coveringBalls )) } #'This is a simple example of dynamic visualization using networkD3 library. #'This version do not implement coloring of vertices, just give a general overview of the edges. #'@param outputFromBallMapper, an output from BallMapper function. #'@param storeAsHtml, if set true, it will store the graph in HTML file. #' @return None #' @examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #' simpleDynamicNetwork(l) #' @export simpleDynamicNetwork <- function( outputFromBallMapper , storeAsHtml = FALSE ) { networkData <- data.frame(outputFromBallMapper$edges-1) sn <- networkD3::simpleNetwork(networkData,zoom=T) methods::show(sn) if ( storeAsHtml == TRUE )networkD3::saveNetwork(file = 'Net1.html') }#simpleDynamicNetwork #'This procedure produces a dynamic graph with colors. It allows zoom-in operation and displays information about vertices when they are clicked upon. #' #'@param outputOfBallMapper, an output from the BallMapper function #'@param showLegend, if set to TRUE a legend will be displayed indicating the coloring of the values of vertices. #' @return None #'@examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame( sin(var) ) #' epsilon <- 0.25 #' l <- BallMapper(points,values,epsilon) #' coloredDynamicNetwork(l) #' @export coloredDynamicNetwork <- function( outputOfBallMapper , showLegend = FALSE ) { #preparation of links source <- outputOfBallMapper$edges[,1]-1 target <- outputOfBallMapper$edges[,2]-1 value <- outputOfBallMapper$edges_strength links = cbind(source,target,value) links <- as.data.frame(links) #preparation of nodes vert <- paste('id:',as.character(outputOfBallMapper$vertices[,1]),',val:',as.character(outputOfBallMapper$coloring)) nodeSize <- outputOfBallMapper$vertices[,2] color <- outputOfBallMapper$coloring vertices <- cbind(vert,color,nodeSize) vertices<- as.data.frame(vertices) fn <- networkD3::forceNetwork( Links = links, Nodes = vertices, Source = "source", Target = "target", NodeID = "vert", Value = "value", Group = "color" , opacity = 1, opacityNoHover = 0.1, zoom = T, Nodesize = "nodeSize", legend = showLegend ) methods::show(fn) }#coloredDynamicNetwork #'Produce a collection of png files with mapper graphs colored by following coordinates (so that the number of files is the same as the number of coordinates). #' #'@param outputFromBallMapper an output from the BallMapper function #'@param points, a collection of input points in a form of a data frame used to create Ball Mapper graph. #'@param fileNamePrefix a prefix of a file name. A plot that uses i-th variable as a coloring will contain this string as a prefix followed by the number i. Set to "output_" by default. #'@param defaultXResolution store a default resolution of image in x direction. Set to 512 by default. #'@param defaultYResolution store a default resolution of image in y direction. Set to 512 by default. #'@return none. #var <- seq(from=0,to=6.3,by=0.1) #points <- as.data.frame( cbind( sin(var),cos(var) ) ) #values <- as.data.frame( sin(var) ) #epsilon <- 0.25 #l <- BallMapper(points,values,epsilon) #colorByAllVariables(l,points,"your_favorite_file_name") #'@export colorByAllVariables<- function( outputFromBallMapper , points , fileNamePrefix = "output_" , defaultXResolution = 512 , defaultYResolution = 512 ) { oldColoring <- outputFromBallMapper$coloring #for every dimension in points vector:\ for ( i in 1:length(points) ) { val <- as.data.frame(points[,i]) newColoring <- vector( length=length(outputFromBallMapper$points_covered_by_landmarks) ) #for every landmark point for ( land in 1:length(outputFromBallMapper$points_covered_by_landmarks)) { average <- 0 for ( coveredPoint in 1:length( outputFromBallMapper$points_covered_by_landmarks[[land]] ) ) { average <- average + val[ outputFromBallMapper$points_covered_by_landmarks[[land]][coveredPoint ] , ] } average <- average/length( outputFromBallMapper$points_covered_by_landmarks[[land]] ) newColoring[land] <- average } #compute and set up the new coloring of it outputFromBallMapper$coloring <- newColoring #Here we set up the name of a file. filename_ <- cbind( fileNamePrefix , toString(i) , ".png" ) filename <- stringr::str_c( filename_ , collapse = "") ColorIgraphPlot(outputFromBallMapper, seed_for_plotting = 123 , store_in_file = filename , default_x_image_resolution = defaultXResolution , default_y_image_resolution = defaultYResolution) #GrayscaleIgraphPlot(outputFromBallMapper, seed_for_plotting = 123 , store_in_file = filename , default_image_resolution = defaultResolution) } outputFromBallMapper$coloring <- oldColoring; }#colorByAllVariables #' This function normalize each column (variable) of the input dataset so that the maximum is mapped to one, minimum to zero, and the intermediate values linearly to the appropriate points in the interval (0,1). #' @param points, a collection of input points in a form of a data frame. #' @return Normalized collection of points. #' @examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' normalized_points <- normalize_to_min_0_max_1 (points) #' @export normalize_to_min_0_max_1 <- function( points ) { for ( i in 1:length(points) ) { points[,i] <- scales::rescale(points[,i], c(0, 1)) } return(points) }#normalize_to_min_0_max_1 #' This function normalize each column (variable) of the input dataset so that the the average of the normalized column is 0 and its standard deviation is 1. #' @param points, a collection of input points in a form of a data frame. #' @return Nowmalized collectpion of points. #' @examples #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' normalized_points <- normalize_to_average_0_stdev_1 (points) #' @export normalize_to_average_0_stdev_1 <- function( points ) { for ( i in 1:length(points) ) { av <- base::mean(points[,i]) stdev <- stats::sd(points[,i]) for ( j in 1:length( points[,i] ) ) { points[j,i] <- (points[j,i] - av )/stdev } } return(points) }#normalize_to_average_0_stdev_1 #' This function returns a list of points covered by the given collection of landmarks. #'@param outputFromBallMapper an output from the BallMapper function #'@param numbers_of_landmarks a vector containnig the numbers of landmarks under consideration. #'@return A vector of points covered by the landmarks given in numbers_of_landmarks. #'@example #'var <- seq(from=0,to=6.3,by=0.1) #'points <- as.data.frame( cbind( sin(var),cos(var) ) ) #'values <- as.data.frame(sin(var)) #'l <- BallMapper(points, values, 0.25) #'pts_indices <- points_covered_by_landmarks(l,c(1,2,3,4)) #'@export points_covered_by_landmarks <- function( outputFromBallMapper , numbers_of_landmarks ) { #find all the vertices covered by the given landmarks. They will be obtained by catenating points_covered_by_landmarks lists. all_vertices <- vector() for ( i in 1:length(numbers_of_landmarks ) ) { if ( numbers_of_landmarks[i] >length( outputFromBallMapper$vertices ) ) { base::warning( "Number of landmark out of range, it will not be taken into account.") } else { all_vertices <- c( all_vertices , outputFromBallMapper$points_covered_by_landmarks[ numbers_of_landmarks[i] ] ) } } all_vertices <- base::unlist(all_vertices) #sort all_vertices and remove repetitions. all_vertices <- base::sort( all_vertices ) all_vertices <- base::rle(all_vertices )$val }#points_covered_by_landmarks #' This is an auxiliery function. It take the coordinates of points, ids of subset of points, and number of coordinate, and return a sorted vector of the given coodrinate in the considered points. #' For instance, given the collection of points: #' 1 2 3 #' 4 5 6 #' 7 8 9 #' and which_subset = 2,3 #' and number_of_coordinate = 2 #' the procedure below will return the vector [2,5,8]. #'@param points is a collection of input points in a form of a data frame. The same one as on the input of the Ball Mapper. #'@param which_subset Indices of points in the given subset. #'@param number_of_coordinate which coordinate of the consired points to export. #'@return the sorted vector of values of a given variable at the collection of points. #'var <- seq(from=0,to=6.3,by=0.1) #'points <- as.data.frame( cbind( sin(var),cos(var) ) ) #'values <- as.data.frame(sin(var)) #'l <- BallMapper(points, values, 0.25) #'coordinates_of_points_in_subcollection(points,c(6,7,8),1) #'@export coordinates_of_points_in_subcollection <- function( points , which_subset, number_of_coordinate ) { if ( (number_of_coordinate<1) || (number_of_coordinate > length(points)) ) { base::warning( "Wrong number of coordinate in the coordinates_of_points_in_subcollection procedure.") } number_of_points <- length( points[,1] ) result <- vector( length=length(which_subset) ); for ( pt in 1:length(which_subset) ) { if ( (which_subset[pt] < 1) || (which_subset[pt] > number_of_points) ) { base::warning( "Wrong id of point in the coordinates_of_points_in_subcollection procedure.") } else { result[pt] <- points[ which_subset[pt] , number_of_coordinate ]; } } return(result) }#coordinates_of_points_in_subcollection #' This procedure take two subset of points (that come from the vertices of Ball Mapper) and return #' the coordinates on which the averages of those two collections differs most. To ballance the effect #' of potentially different orders of magnitude of data in column, we divide the difference in means by the mean of the whole column. #'@param points a collection of input points in a form of a data frame. The same one as on the input of the Ball Mapper. #'@param subset1 First subset of ids of points. #'@param subset2 Second subset of ids of points. #'@return Vector of corrdinate ids with the absolute value of difference between averages, ordered according to the second variable. #'var <- seq(from=0,to=6.3,by=0.1) #'points <- as.data.frame( cbind( sin(var),cos(var) ) ) #'values <- as.data.frame(sin(var)) #'l <- BallMapper(points, values, 0.25) #'g1 <- c(1,21 #'g2 <- c(11,12) #'find_dominant_difference_using_averages(points,g1,g2) #'@export find_dominant_difference_using_averages <- function( points , subset1 , subset2 ) { differences <- vector( length=length(points) ) coords <- vector( length=length(points) ) for ( coord in 1:length(points) ) { v1 <- coordinates_of_points_in_subcollection( points , subset1, coord ) v2 <- coordinates_of_points_in_subcollection( points , subset2, coord ) differences[coord] <- abs(mean(v1)-mean(v2))/mean( points[,coord] ) coords[coord] <- coord } result <- as.data.frame( cbind( coords,differences ) ) result <- result[ base::order( -result$differences ), ] return(result) }#find_dominant_difference_using_averages #' This procedure take two subset of points (that come from the vertices of Ball Mapper) and return #' the coordinates on which the averages of those two collections differs most. To ballance the effect #' of potentially different orders of magnitude of data in column, we divide the difference in means by the standard deviation of the whole column. #'@param points a collection of input points in a form of a data frame. The same one as on the input of the Ball Mapper. #'@param subset1 First subset of ids of points. #'@param subset2 Second subset of ids of points. #'@return Vector of corrdinate ids with the absolute value of difference between averages normalized by the standard deviation of the considered column, ordered according to the second variable. #'var <- seq(from=0,to=6.3,by=0.1) #'points <- as.data.frame( cbind( sin(var),cos(var) ) ) #'values <- as.data.frame(sin(var)) #'l <- BallMapper(points, values, 0.25) #'g1 <- c(1,21 #'g2 <- c(11,12) #'find_dominant_difference_using_averages(points,g1,g2) #'@export find_dominant_difference_using_averages_normalized_by_sd <- function( points , subset1 , subset2 ) { differences <- vector( length=length(points) ) coords <- vector( length=length(points) ) for ( coord in 1:length(points) ) { v1 <- coordinates_of_points_in_subcollection( points , subset1, coord ) v2 <- coordinates_of_points_in_subcollection( points , subset2, coord ) differences[coord] <- abs(mean(v1)-mean(v2))/stats::sd( points[,coord] ) coords[coord] <- coord } result <- as.data.frame( cbind( coords,differences ) ) result <- result[ base::order( -result$differences ), ] return(result) }#find_dominant_difference_using_averages_normalized_by_sd #' This function will provide a new coloring which is the minimal and average distance of points in the #' point cloud to the referece points. The output from this procedure can be used as an alternative coloring in BallMapper. #' @param allPoints is a collection of all points in the dataset. #' @param refPoints is a subset of all points. The function will compute the distance of each point from allPoints to referencePoints #' @return a pair of minimal and average distances. They can be used to color the BallMapper graph. #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame(sin(var)) #' l <- BallMapper(points, values, 0.25) #' pts <- as.data.frame(points_covered_by_landmarks(l,1)) #' new_coloring_function <- color_by_distance_to_reference_points( points, pts ) #' l$coloring <- new_coloring_function[,1] #' ColorIgraphPlot(l) #' l$coloring <- new_coloring_function[,2] #' ColorIgraphPlot(l) #' @export color_by_distance_to_reference_points <- function( allPoints , refPoints ) { newColoringMin <- vector( length=length(allPoints[,1]) ) newColoringAV <- vector( length=length(allPoints[,1]) ) #Now for every point: for ( pt in 1:length(allPoints[,1]) ) { min_distance <- .Machine$double.xmax sum_of_distances <- 0 for ( ref in 1:length(refPoints[,1]) ) { #compute a distance from allPoints[pt] and refPoints[ref] dist <- stats::dist(rbind( allPoints[pt,] , refPoints[ref,] ) ) if ( dist < min_distance )min_distance = dist sum_of_distances <- sum_of_distances + dist; } newColoringMin[pt] <- min_distance newColoringAV[pt] <- sum_of_distances/length(refPoints[,1]) } return( cbind(newColoringMin,newColoringAV) ) }#color_by_distance_to_reference_points #'This procedure store the Ball Mapper graph in a file in the following format: #'@param outputFromBallMapper output from the BallMapper procerure. #'@param filename the name of the file to store the data. #'@return None #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame(sin(var)) #' l <- BallMapper(points, values, 0.25) #' storeBallMapperGraphInFile(l,"my_favorite_BM_graph") #'@export storeBallMapperGraphInFile <- function( outputFromBallMapper , filename = "BM_graph" ) { #Writing vertices utils::write.table( outputFromBallMapper$vertices , file =paste(filename,"_vertices",sep=""), col.names = F, row.names = F) #Writing edges utils::write.table(outputFromBallMapper$edges, file=paste(filename,"_edges",sep=""), col.names = F, row.names = F) #Writing edge's strength utils::write.table(outputFromBallMapper$edges_strength, file=paste(filename,"_edges_strength",sep=""), col.names = F, row.names = F) #Writing points covered by landmarks. This part is a bit more tricky, as this is a list #In this case, I want to have points covered by landmark i in the i-th line of the file. To #achieve this, each line is store as a string, and those strings are grouped in a vector. output <- vector() for ( i in 1:length( outputFromBallMapper$points_covered_by_landmarks ) ) { line <- "" for ( j in 1:length( outputFromBallMapper$points_covered_by_landmarks[[i]] ) ) { line <- paste( line , outputFromBallMapper$points_covered_by_landmarks[[i]][j] ) } output <- c( output , line ) } fileConn<-file(paste(filename,"_points_covered_by_landmarks",sep="")) writeLines(output, fileConn) close(fileConn) #Writing landmarks utils::write.table(outputFromBallMapper$landmarks, file=paste(filename,"_landmarks",sep=""), col.names = F, row.names = F) #Writing coloring utils::write.table(outputFromBallMapper$coloring, file=paste(filename,"_coloring",sep=""), col.names = F, row.names = F) #At the moment we do not store coverage, as this unformation can be recovered from points covered by landmarks. }#storeBallMapperGraphInFile #' This procedure read the BallMapper object from file. The parameter of the file #' is filename. We assume that files: #' filename_vertices #' filename_edges #' filename_edges_strength #' filename_points_covered_by_landmarks #' filename_landmarks #' filename_coloring #' @param filename prefix of the name of the file containing elements of Ball Mapper graph. #' @return BallMapper object #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame(sin(var)) #' l <- BallMapper(points, values, 0.25) #' storeBallMapperGraphInFile(l,"my_favorite_BM_graph") #' l_prime <- readBallMapperGraphFromFile("my_favorite_BM_graph") #' @export readBallMapperGraphFromFile <- function( filename ) { vertices <- utils::read.table( file = base::paste(filename,"_vertices",sep="") ) colnames( vertices ) <- c( "id" , "size" ) edges <- utils::read.table( file = base::paste(filename,"_edges",sep="") ) colnames( edges ) <- c( "unique_from" , "unique_to" ) edges_strength <- as.integer(unlist( utils::read.table( file = base::paste(filename,"_edges_strength",sep="") ) ) ) landmarks <- as.integer(unlist( utils::read.table( file = base::paste(filename,"_landmarks",sep="") ) ) ) coloring <- as.double( unlist( utils::read.table( file = base::paste(filename,"_coloring",sep="") ) ) ) con = base::file( base::paste(filename,"_points_covered_by_landmarks",sep="") ) lines <- readLines(con) close(con) number_of_points <- 0 points_covered_by_landmarks <- list() for ( i in 1:length(lines) ) { v <- as.integer( unlist( strsplit(lines[i]," ")) ) v <- v[!is.na(v)] if ( base::max( v ) > number_of_points )number_of_points <- base::max( v ) points_covered_by_landmarks[[i]] <- v } #we do not have this one at the moment. coverage <- list() for ( i in 1:number_of_points) { coverage[[i]] <- vector() } for ( i in 1:length(points_covered_by_landmarks) ) { for ( j in 1:length(points_covered_by_landmarks[[i]]) ) { coverage[[ points_covered_by_landmarks[[i]][j] ]] <- c( coverage[[ points_covered_by_landmarks[[i]][j] ]] , i ) } } return_list <- list( "vertices" = vertices , "edges" = edges , "edges_strength" = edges_strength , "points_covered_by_landmarks" = points_covered_by_landmarks, "landmarks" = landmarks , "coloring" = coloring , "coverage" = coverage ) return(return_list) }#readBallMapperGraphFromFile #'Produce a new coloring vector being an average of values of given function at points covererd by each vertex of Ball Mapper graph. #' #'@param outputFromBallMapper an output from the BallMapper function #'@param newFunctionOnPoints values of function on points. #'@return Vector of function values on vertices on Ball Mapper graph. #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame(sin(var)) #' l <- BallMapper(points, values, 0.25) #' ColorIgraphPlot(l) #' new_coloring <- colorByAverageValueOfOtherVariable(l,cos(var)) #' l$coloring <- new_coloring #' ColorIgraphPlot(l) #'@export colorByAverageValueOfOtherVariable<- function( outputFromBallMapper , newFunctionOnPoints ) { newColoring <- vector( length=length(outputFromBallMapper$points_covered_by_landmarks) ) #for every landmark point for ( land in 1:length(outputFromBallMapper$points_covered_by_landmarks)) { #average the value of newFunctionOnPoints for all points covered by that landmark average <- 0 for ( coveredPoint in 1:length( outputFromBallMapper$points_covered_by_landmarks[[land]] ) ) { average <- average + newFunctionOnPoints[ outputFromBallMapper$points_covered_by_landmarks[[land]][coveredPoint ] ] } average <- average/length( outputFromBallMapper$points_covered_by_landmarks[[land]] ) newColoring[land] <- average } return(newColoring) }#colorByAverageValueOfOtherVariable #'Produce a new coloring vector being a standard deviation of values of given #'function at points covererd by each vertex of Ball Mapper graph. #' #'@param outputFromBallMapper an output from the BallMapper function #'@param newFunctionOnPoints values of function on points. #'@return Vector of function values on vertices on Ball Mapper graph. #' var <- seq(from=0,to=6.3,by=0.1) #' points <- as.data.frame( cbind( sin(var),cos(var) ) ) #' values <- as.data.frame(sin(var)) #' l <- BallMapper(points, values, 0.25) #' ColorIgraphPlot(l) #' new_coloring <- colorByStDevValueOfOtherVariable(l,sin(var)) #' l$coloring <- new_coloring #' ColorIgraphPlot(l) #'@export colorByStDevValueOfOtherVariable<- function( outputFromBallMapper , newFunctionOnPoints ) { newColoring <- vector( length=length(outputFromBallMapper$points_covered_by_landmarks) ) #for every landmark point for ( land in 1:length(outputFromBallMapper$points_covered_by_landmarks)) { #average the value of newFunctionOnPoints for all points covered by that landmark v <- vector() for ( coveredPoint in 1:length( outputFromBallMapper$points_covered_by_landmarks[[land]] ) ) { v <- c( v , newFunctionOnPoints[ outputFromBallMapper$points_covered_by_landmarks[[land]][coveredPoint ] ] ) } newColoring[land] <- stats::sd(v) } return(newColoring) }#colorByStDevValueOfOtherVariable
/scratch/gouwar.j/cran-all/cranData/BallMapper/R/BallMapper.R
library(testthat) source("R/BallMapper.R") test_results <- test_dir("R", reporter="summary")
/scratch/gouwar.j/cran-all/cranData/BallMapper/R/run_test.R
test_that("Test BallMapper of cyclic graph", { rm(list=ls()) #source("BallMapper.R") arg <- seq(from=0,to=6.2,by = 0.1) points <- as.data.frame( cbind( sin(arg),cos(arg) ) ) values <- as.data.frame( sin(arg) ) epsilon <- 0.25 l <- BallMapper(points,values,epsilon) #test vertices id <- c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21) size <- c(7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7) expected_vert = cbind( id , size ) expect_equal( all(expected_vert == l$vertices) , TRUE ) #test edges from = c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, 1) to = c(2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,21) expected_edges = cbind(from,to) expect_equal( all(expected_edges == l$edges) , TRUE ) #test of edges_strength expected_edges_strength = c(2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2) expect_equal( all(expected_edges_strength == l$edges_strength) , TRUE ) #test points_covered_by_landmarks expected_points_covered_by_landmarks <- list() expected_points_covered_by_landmarks[[1]]<-c(1,2,3,62,63) expected_points_covered_by_landmarks[[2]]<-c(2,3,4,5,6) expected_points_covered_by_landmarks[[3]]<-c(5,6,7,8,9) expected_points_covered_by_landmarks[[4]]<-c(8,9,10,11,12) expected_points_covered_by_landmarks[[5]]<-c(11,12,13,14,15) expected_points_covered_by_landmarks[[6]]<-c(14,15,16,17,18) expected_points_covered_by_landmarks[[7]]<-c(17,18,19,20,21) expected_points_covered_by_landmarks[[8]]<-c(20,21,22,23,24) expected_points_covered_by_landmarks[[9]]<-c(23,24,25,26,27) expected_points_covered_by_landmarks[[10]]<-c(26,27,28,29,30) expected_points_covered_by_landmarks[[11]]<-c(29,30,31,32,33) expected_points_covered_by_landmarks[[12]]<-c(32,33,34,35,36) expected_points_covered_by_landmarks[[13]]<-c(35,36,37,38,39) expected_points_covered_by_landmarks[[14]]<-c(38,39,40,41,42) expected_points_covered_by_landmarks[[15]]<-c(41,42,43,44,45) expected_points_covered_by_landmarks[[16]]<-c(44,45,46,47,48) expected_points_covered_by_landmarks[[17]]<-c(47,48,49,50,51) expected_points_covered_by_landmarks[[18]]<-c(50,51,52,53,54) expected_points_covered_by_landmarks[[19]]<-c(53,54,55,56,57) expected_points_covered_by_landmarks[[20]]<-c(56,57,58,59,60) expected_points_covered_by_landmarks[[21]]<-c(59,60,61,62,63) for ( i in 1:length(expected_points_covered_by_landmarks) ) { expect_equal( all(expected_points_covered_by_landmarks[[i]] == l$points_covered_by_landmarks[[i]]) , TRUE ) } #test landmarks: expected_landmarks <- c(1,4,7,10,13,16,19,22,25,28,31,34,37,40,43,46,49,52,55,58,61) expect_equal( all(expected_landmarks == l$landmarks) , TRUE ) #test coloring: expected_coloring <- c(0.006650168,0.292573367,0.559012026,0.775515807,0.922745069,0.987548263,0.964136712,0.854601699,0.668727662,0.423118175,0.139712801,-0.156172701,-0.438107761,-0.680907959,-0.862884678,-0.967782478,-0.986231151,-0.916582734,-0.765058710,-0.545194270,-0.276629250) expect_equal( all(abs(expected_coloring - l$coloring)<0.00000001) , TRUE ) #test for coverage: expected_coverage <- list() expected_coverage[[1]]<- c(1) expected_coverage[[2]]<- c(1,2) expected_coverage[[3]]<- c(1,2) expected_coverage[[4]]<- c(2) expected_coverage[[5]]<- c(2,3) expected_coverage[[6]]<- c(2,3) expected_coverage[[7]]<- c(3) expected_coverage[[8]]<- c(3,4) expected_coverage[[9]]<- c(3,4) expected_coverage[[10]]<- c(4) expected_coverage[[11]]<- c(4,5) expected_coverage[[12]]<- c(4,5) expected_coverage[[13]]<- c(5) expected_coverage[[14]]<- c(5,6) expected_coverage[[15]]<- c(5,6) expected_coverage[[16]]<- c(6) expected_coverage[[17]]<- c(6,7) expected_coverage[[18]]<- c(6,7) expected_coverage[[19]]<- c(7) expected_coverage[[20]]<- c(7,8) expected_coverage[[21]]<- c(7,8) expected_coverage[[22]]<- c(8) expected_coverage[[23]]<- c(8,9) expected_coverage[[24]]<- c(8,9) expected_coverage[[25]]<- c(9) expected_coverage[[26]]<- c(9,10) expected_coverage[[27]]<- c(9,10) expected_coverage[[28]]<- c(10) expected_coverage[[29]]<- c(10,11) expected_coverage[[30]]<- c(10,11) expected_coverage[[31]]<- c(11) expected_coverage[[32]]<- c(11,12) expected_coverage[[33]]<- c(11,12) expected_coverage[[34]]<- c(12) expected_coverage[[35]]<- c(12,13) expected_coverage[[36]]<- c(12,13) expected_coverage[[37]]<- c(13) expected_coverage[[38]]<- c(13,14) expected_coverage[[39]]<- c(13,14) expected_coverage[[40]]<- c(14) expected_coverage[[41]]<- c(14,15) expected_coverage[[42]]<- c(14,15) expected_coverage[[43]]<- c(15) expected_coverage[[44]]<- c(15,16) expected_coverage[[45]]<- c(15,16) expected_coverage[[46]]<- c(16) expected_coverage[[47]]<- c(16,17) expected_coverage[[48]]<- c(16,17) expected_coverage[[49]]<- c(17) expected_coverage[[50]]<- c(17,18) expected_coverage[[51]]<- c(17,18) expected_coverage[[52]]<- c(18) expected_coverage[[53]]<- c(18,19) expected_coverage[[54]]<- c(18,19) expected_coverage[[55]]<- c(19) expected_coverage[[56]]<- c(19,20) expected_coverage[[57]]<- c(19,20) expected_coverage[[58]]<- c(20) expected_coverage[[59]]<- c(20,21) expected_coverage[[60]]<- c(20,21) expected_coverage[[61]]<- c(21) expected_coverage[[62]]<- c(1,21) expected_coverage[[63]]<- c(1,21) for ( i in 1:length(expected_coverage) ) { expect_equal( all(expected_coverage[[i]] == l$coverage[[i]]) , TRUE ) } #Test of the function pointToBallList vertices <- c(1,2,2,3,3,4,5,5,6,6,7,8,8,9,9,10,11,11,12,12,13,14,14,15,15,16,17,17,18,18,19,20,20,21,21,22,23,23,24,24,25,26,26,27,27,28,29,29,30,30,31,32,32,33,33,34,35,35,36,36,37,38,38,39,39,40,41,41,42,42,43,44,44,45,45,46,47,47,48,48,49,50,50,51,51,52,53,53,54,54,55,56,56,57,57,58,59,59,60,60,61,62,62,63,63) coveringBalls <- c(1,1,2,1,2,2,2,3,2,3,3,3,4,3,4,4,4,5,4,5,5,5,6,5,6,6,6,7,6,7,7,7,8,7,8,8,8,9,8,9,9,9,10,9,10,10,10,11,10,11,11,11,12,11,12,12,12,13,12,13,13,13,14,13,14,14,14,15,14,15,15,15,16,15,16,16,16,17,16,17,17,17,18,17,18,18,18,19,18,19,19,19,20,19,20,20,20,21,20,21,21,1,21,1,21) expect_equal( all(pointToBallList(l$coverage)[,1] == vertices) , TRUE ) expect_equal( all(pointToBallList(l$coverage)[,2] == coveringBalls) , TRUE ) } ) test_that("Test coloring as a distance from reference points in BallMapper", { rm(list=ls()) arg <- seq(from=0,to=6.2,by = 0.1) points <- as.data.frame( cbind( sin(arg),cos(arg) ) ) reference_points <- as.data.frame(points[30:35,1:2]) new_coloring_functions <- color_by_distance_to_reference_points( points , reference_points ) newColoringMin <- (c(1.98332962,1.97089946,1.95144672,1.92711637,1.89796924,1.86407817,1.82552788,1.78241472,1.73484645,1.68294197,1.62683101,1.56665382,1.50256081,1.43471218,1.36327752 ,1.28843537, 1.21037281, 1.12928495, 1.04537446, 0.95885108, 0.86993107, 0.77883668, 0.68579561, 0.59104041, 0.49480792, 0.39733866, 0.29887626, 0.19966683,0.09995834, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.09995834, 0.19966683, 0.29887626, 0.39733866, 0.49480792, 0.59104041, 0.68579561, 0.77883668, 0.86993107, 0.95885108, 1.04537446, 1.12928495, 1.21037281, 1.28843537, 1.36327752, 1.43471218, 1.50256081, 1.56665382, 1.62683101, 1.68294197, 1.73484645, 1.78241472, 1.82552788, 1.86407817, 1.89796924, 1.92711637, 1.95144672, 1.97089946)) newColoringAV <- (c(1.9926984,1.9906267,1.9835795,1.9715743,1.9546413,1.9328227,1.9061730,1.8747588,1.8386588,1.7979631,1.7527734,1.7032026,1.6493748,1.5914244,1.5294962,1.4637451,1.3943354,1.3214406,1.2452429,1.1659327,1.0837083,0.9987752,0.9113456,0.8216382,0.7298771,0.6362917,0.5411159,0.4445876,0.3469481,0.2484413,0.1826331,0.1496878,0.1496878,0.1826331,0.2484413,0.3469481,0.4445876,0.5411159,0.6362917,0.7298771,0.8216382,0.9113456,0.9987752,1.0837083,1.1659327,1.2452429,1.3214406,1.3943354,1.4637451,1.5294962,1.5914244,1.6493748,1.7032026,1.7527734,1.7979631,1.8386588,1.8747588,1.9061730,1.9328227,1.9546413,1.9715743,1.9835795,1.9906267)) expect_equal( all(abs(new_coloring_functions[,1] - newColoringMin)<0.0000001) , TRUE ) expect_equal( all(abs(new_coloring_functions[,2] - newColoringAV)<0.0000001) , TRUE ) } ) #test_that("Test based on the procedure that write structure of Ball Mapper to file", #{ # rm(list=ls()) # arg <- seq(from=0,to=6.2,by = 0.1) # points <- as.data.frame( cbind( sin(arg),cos(arg) ) ) # values <- as.data.frame( sin(arg) ) # l <- BallMapper( points,values, 0.1 ) # storeBallMapperGraphInFile( l , "test" ) # r <- readBallMapperGraphFromFile( "test" ) # # # # expect_equal( all( l$vertices == r$vertices) , TRUE ) # expect_equal( all( l$edges == r$edges) , TRUE ) # expect_equal( all(abs(l$edges_strength-r$edges_strength)<0.0000001) , TRUE ) # for ( i in 1:length(l$points_covered_by_landmarks) ) # { # expect_equal( all( l$points_covered_by_landmarks[[i]] == r$points_covered_by_landmarks[[i]]) , TRUE ) # } # # expect_equal( all( l$landmarks == r$landmarks) , TRUE ) # expect_equal( all(abs(l$coloring-r$coloring)<0.0000001) , TRUE ) # # for ( i in 1:length(l$coverage) ) # { # expect_equal( all( l$coverage[[i]] == r$coverage[[i]]) , TRUE ) # } #} #)
/scratch/gouwar.j/cran-all/cranData/BallMapper/R/test_BallMapper.R
#' @import stats #' @export BarBor BarBor = function(exp,eps,x,v,n) { npar <- length(x) rx = 0 rgk = 0 if(eps < 0)(return (print("The eps cannot be negativ!"))) if(n == 0) (n = Inf) i = 0 dexp = deriv(exp,v) while(i < n) { i = i + 1 dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam = eval(dexp,dd) j = 0 vege = 0 for(j in 1:npar) { if (abs(attr(szam,"gradient")[j]) >= eps ) {vege = 1} } if(vege == 0) {print("Stationary point: "); print(x); print("Function value: "); print(eval(exp,dd)); print("Steps: "); print(i); break} gk = c(attr(szam,"gradient")[1:npar]) dxk = x - rx dgk = gk - rgk alph = ((dxk %*% dgk) / (dgk %*% dgk)) alph2 = ((dxk %*% dxk) / (dxk %*% dgk)) rx = x rgk = gk x = x - alph * gk } if(i == n){print(x);print(i)} } #' @export BarBorNoPrint = function(exp,eps,x,v,n) { npar <- length(x) rx = 0 rgk = 0 if(eps < 0)(return (print("The eps cannot be negativ!"))) if(n == 0) (n = Inf) i = 0 dexp = deriv(exp,v) while(i < n) { i = i + 1 dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam = eval(dexp,dd) j = 0 vege = 0 for(j in 1:npar) { if (abs(attr(szam,"gradient")[j]) >= eps ) {vege = 1} } if(vege == 0) {break} gk = c(attr(szam,"gradient")[1:npar]) dxk = x - rx dgk = gk - rgk alph = ((dxk %*% dgk) / (dgk %*% dgk)) alph2 = ((dxk %*% dxk) / (dxk %*% dgk)) rx = x rgk = gk x = x - alph * gk } if(i == n){print(x);print(i)} }
/scratch/gouwar.j/cran-all/cranData/BarBorGradient/R/BarBor.R
test1 = expression(x1*x1+10*x2*x2) test2 = expression(100 * (x2 - x1 * x1)^2 + (1 - x1)^2) test3 = expression(100*(x1*x1-x2)*(x1*x1-x2)+(1-x1)*(1-x1)) test4 = expression((x2-(5.1/4*pi^2)*x1^2+(5/pi)*x1-6)^2+10*(1-(1/8*pi))*cos(x1)+10) test5 = expression(4*x1^2-2.1*x1^4+1/3*x1^6+x1*x2-4*x2^2+4*x2^4) test6 = expression(100 * (x1^2-x2)^2 + (x1-1)^2 + (x3-1)^2 + 90 * (x3^2-x4)^2 + 10.1 * ((x2-1)^2 + (x4-1)^2) + 19.8*(x2-1)*(x4-1)) #Drop-Wave Function: DropWave = expression(-1 * ((1 + cos(12*sqrt(x1^2+x2^2))) / (0.5*(x1^2+x2^2) + 2))) #Levy N. 13: Levy = expression((sin(3*pi*x1))^2 + (x1-1)^2 * (1+(sin(3*pi*x2))^2) + (x2-1)^2 * (1+(sin(2*pi*x2))^2)) #Schaffer N. 2: Schaffer = expression(0.5 + ((sin(x1^2-x2^2))^2 - 0.5) / ((1 + 0.001*(x1^2+x2^2))^2)) #Booth: Booth = expression((x1 + 2*x2 - 7)^2 + (2*x1 + x2 - 5)^2) #Three-Hump Camel: ThreeHumpCamel = expression(2*x1^2 + -1.05*x1^4 + x1^6 / 6 + x1*x2 + x2^2) #Easom function: Easom = expression(-cos(x1)*cos(x2)*(exp(-(x1-pi)^2-(x2-pi)^2))) #Beale function: Beale = expression(((1.5 - x1 + x1*x2)^2) + ((2.25 - x1 + x1*x2^2)^2) + ((2.625 - x1 + x1*x2^3)^2)) #Colville function: Colville = expression((100 * (x1^2-x2)^2) + ((x1-1)^2) + ((x3-1)^2) + (90 * (x3^2-x4)^2) + (10.1 * ((x2-1)^2 + (x4-1)^2)) + (19.8*(x2-1)*(x4-1))) #Styblinski-Tang function 3D: StyblinskiTang3D = expression(((x1^4 - 16*x1^2 + 5*x1) + (x2^4 - 16*x2^2 + 5*x2) + (x3^4 - 16*x3^2 + 5*x3))/2) #Styblinski-Tang function 4D: StyblinskiTang4D = expression(((x1^4 - 16*x1^2 + 5*x1) + (x2^4 - 16*x2^2 + 5*x2) + (x3^4 - 16*x3^2 + 5*x3) + (x4^4 - 16*x4^2 + 5*x4))/2) #Rastrigin function 3D: Rastrigin3D = expression(10*3 + (x1^2 - 10*cos(2*pi*x1)) + (x2^2 - 10*cos(2*pi*x2)) + (x3^2 - 10*cos(2*pi*x3))) #Rastrigin function 4D: Rastrigin4D = expression(10*3 + (x1^2 - 10*cos(2*pi*x1)) + (x2^2 - 10*cos(2*pi*x2)) + (x3^2 - 10*cos(2*pi*x3)) + (x4^2 - 10*cos(2*pi*x4))) #Rosenbrock function 3D: Rosenbrock3D = expression((100*(x2-x1^2)^2 + (x1-1)^2) + (100*(x3-x2^2)^2 + (x2-1)^2)) #Rosenbrock function 4D: Rosenbrock4D = expression((100*(x2-x1^2)^2 + (x1-1)^2) + (100*(x3-x2^2)^2 + (x2-1)^2) + (100*(x4-x3^2)^2 + (x3-1)^2)) #Sum of squares 3D: SumOfSquares3D = expression((1*x1^2) + (2*x2^2) + (3*x3^2)) #Sum of squares 4D: SumOfSquares4D = expression((1*x1^2) + (2*x2^2) + (3*x3^2) + (4*x4^2)) #Dixon-price function 3D: DixonPrice3D =expression(((x1-1)^2) + (2 * (2*x2^2 - x1)^2) + ((3 * (2*x3^2 - x2)^2))) #Dixon-price function 4D: DixonPrice4D =expression(((x1-1)^2) + (2 * (2*x2^2 - x1)^2) + ((3 * (2*x3^2 - x2)^2)) + (4* (2*x4 - x3)^2)) #Rotated hyper-ellipsoid function 2D: RotatedHyperEllipsoid2D = expression((x1^2) + (x1^2) + (x2^2)) #Rotated hyper-ellipsoid function 3D: RotatedHyperEllipsoid3D = expression((x1^2) + (x1^2) + (x2^2) + (x1^2) + (x2^2) + (x3^2)) #Rotated hyper-ellipsoid function 4D: RotatedHyperEllipsoid4D = expression((x1^2) + (x1^2) + (x2^2) + (x1^2) + (x2^2) + (x3^2) + (x1^2) + (x2^2) + (x3^2) + (x4^2)) #Sphere Function 2D: Sphere2D = expression((x1^2) + (x2^2)) #Sphere Function 3D: Sphere3D = expression((x1^2) + (x2^2) + (x3^2)) #Sphere Function 4D: Sphere4D = expression((x1^2) + (x2^2) + (x3^2) + (x4^2)) #Trid function 2D: Trid2D = expression(((x1 - 1)^2 + (x2 - 1)^2) - (x2 * x1)) #Trid function 3D: Trid3D = expression(((x1 - 1)^2 + (x2 - 1)^2 + (x3 - 1)^2) - ((x2 * x1) + (x3 * x2))) #Trid function 4D: Trid4D = expression(((x1 - 1)^2 + (x2 - 1)^2 + (x3 - 1)^2 + (x4 - 1)^2) - ((x2 * x1) + (x3 * x2) + (x4 * x3))) #Trid function 6D: Trid6D = expression(((x1 - 1)^2 + (x2 - 1)^2 + (x3 - 1)^2 + (x4 - 1)^2 + (x5 - 1)^2 + (x6 - 1)^2) - ((x2 * x1) + (x3 * x2) + (x4 * x3) + (x5 * x4) + (x6 * x5))) #' @export test1 #' @export test2 #' @export test3 #' @export test4 #' @export test5 #' @export test6
/scratch/gouwar.j/cran-all/cranData/BarBorGradient/R/Functions.R
#' @export Gradmod Gradmod = function(exp,eps,G,B,m,x,v,n) { npar <- length(x) rx = 0 if(eps < 0)(return (print("Az eps nem lehet negativ"))) if(n == 0) (n = Inf) i = 0 dexp = deriv(exp,v) while(i < n) { i = i + 1 dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam = eval(dexp,dd) j = 0 vege = 0 for(j in 1:npar) { if (abs(attr(szam,"gradient")[j]) >= eps ) {vege = 1} } if(vege == 0) {print("Stacionarius pont: "); print(x); print("Fuggveny ertek: "); print(eval(exp,dd)); print("Lepesszam: "); print(i); break} dk = - c(attr(szam,"gradient")[1:npar]) maxB = 0 for(j in 0:m){ aktB = B^j rx = x x = x + aktB * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz1 = eval(exp,dd) #f(xk+szig*dk) x = rx dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz2 = eval(exp,dd) # f(xk) a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } if(sz1 - sz2 <= aktB * G * a) {maxB = aktB; break} if(j == m & maxB == 0) {maxB = aktB; print("A legkisebb betat kellett valaszatni!")} } x = x + maxB * dk } if(i == n){print(x);print(i)} }
/scratch/gouwar.j/cran-all/cranData/BarBorGradient/R/Gradmod.R
#' @export Powell Powell = function(exp,eps,G,eta,m,k,x,v,n) { npar <- length(x) rx = 0 if(eps < 0)(return (print("Az eps nem lehet negativ"))) if(G > eta | eta > 1)(return (print("Az eta-nak G es 1 kozott kell lenni"))) if(n == 0) (n = Inf) i = 0 dexp = deriv(exp, v) while(i < n) { i = i + 1 dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam = eval(dexp,dd) j = 0 vege = 0 for(j in 1:npar) { if (abs(attr(szam,"gradient")[j]) >= eps ) {vege = 1} } if(vege == 0) {print("Stacionarius pont: "); print(x); print("Fuggveny ertek: "); print(eval(exp,dd)); print("Lepesszam: "); print(i); break} dk = - c(attr(szam,"gradient")[1:npar]) szig = szigkival(dk,szam,x,dexp,npar,G,eta,k,m,v) x = x + szig * dk } if(i == n){print(x);print(i)} } szigkival = function(dk,szam,x,dexp,npar,G,eta,k,m,v) { szig = 1 sziga = 0 szigf = 0 rx = x x = x + szig * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam2 = eval(dexp,dd) sz1 = eval(exp,dd) #f(xk+szig*dk) x = rx dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz2 = eval(exp,dd) # f(xk) a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } if(sz1 - sz2 <= szig * G * a) { a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } b = 0 for(l in 1:npar) { b = b + attr(szam2,"gradient")[l] * dk[l] } if(eta * a <= b) (return (szig)) for(j in 1:k) { szigf = 2^j rx = x x = x + szigf * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz1 = eval(exp,dd) #f(xk+szigf*dk) x = rx a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } if(sz1 - sz2 <= szigf * G * a) {szig = szigf; sziga = szigf/2;break} } }else { for(j in 1:m) { sziga = (1/2)^j rx = x x = x + sziga * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz1 = eval(exp,dd) #f(xk+sziga*dk) x = rx a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } if(sz1 - sz2 <= sziga * G * a) {szig = sziga; szigf = sziga*2;break} } } h = 0 while(1) { h = h + 1 if(h == max) {print("Max lepesszam!");break} szig = sziga rx = x x = x + szig * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v szam2 = eval(dexp,dd) x = rx a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } b = 0 for(l in 1:npar) { b = b + attr(szam2,"gradient")[l] * dk[l] } if(eta * a <= b) {break} szig = (sziga + szigf)/2 rx = x x = x + szig * dk dd <- data.frame(rbind(c(x))) colnames(dd) <- v sz1 = eval(exp,dd) #f(xk+szig*dk) x = rx a = 0 for(l in 1:npar) { a = a + attr(szam,"gradient")[l] * dk[l] } if(sz1 - sz2 <= szig * G * a) { sziga = szig }else { szigf = szig } } return (sziga) }
/scratch/gouwar.j/cran-all/cranData/BarBorGradient/R/Powell.R
SetVariablesForGradmod = function() { eps = 10^-10 G = 10^-2 B = 0.5 m = 20 n = 10000 x = c(1,1) v = c("x1","x2") } SetVariablesForPowell = function() { eps = 10^-10 G = 10^-2 eta = G *2 m = 20 k = 20 n = 10000 max = 100 x = c(1,1) v = c("x1","x2") } SetVariablesForBarBor = function() { eps = 10^-10 n = 10000 x = c(1,2) v = c("x1","x2") } SetVectorTo2D = function() { v = c("x1","x2") } SetVectorTo3D = function() { v = c("x1","x2", "x3") } SetVectorTo4D = function() { v = c("x1","x2","x3", "x4") } SetVectorTo5D = function() { v = c("x1","x2","x3", "x4", "x5") } SetVectorTo6D = function() { v = c("x1","x2","x3", "x4", "x5", "x6") }
/scratch/gouwar.j/cran-all/cranData/BarBorGradient/R/SetVariables.R
#' Calculation of Kmer Frequency Matrix from DNAbin for Both Reference and Query Sequences #' #' @description Calculation of kmer frequency matrices from DNAbin for both reference and query sequences. #' #' @param ref Object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param que Object of class "DNAbin", which needs to be inferred. #' @param kmer a numeric to indicate the length of kmer used. #' @return kmer frequency matrices for both ref and que sequences, but only based on kmers found in ref!!! #' new kmers in que will be ignored. #' @keywords DNAbin2kmerFreqMatrix #' @export #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. zhangab2008(at)mail.cnu.edu.cn #' @references #' zhangab2008(at)mail.cnu.edu.cn #' #' #' #' #' #' @examples #' data(TibetanMoth) #' ref<-as.DNAbin(as.character(TibetanMoth[1:50,])) #' que<-as.DNAbin(as.character(TibetanMoth[51:60,])) #' out<-DNAbin2kmerFreqMatrix(ref,que,kmer=3) #' out #' #' DNAbin2kmerFreqMatrix<-function(ref,que,kmer=kmer){ ### return kmer frequency matrices for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } ########## c2s<-function (chars = c("m", "e", "r", "g", "e", "d")) { return(paste(chars, collapse = "")) } ########## ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### do some cleaning by removing IUPAC codes, "-" mpattern<-"-+[a-z]*" u.s<-gsub(mpattern,NA,u.s) # strings with "-" mpattern<-"[rymkswhbvdn]+" u.s<-gsub(mpattern,NA,u.s) # strings with "-" u.s<- u.s[!is.na(u.s)] ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? #rownames(kmer.freq.matrix)<-rownames(ref) rownames(kmer.freq.matrix)<-NAMES(ref) ### 4. calculate kmer frequency for each sequence in que kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector2)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix2<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? #rownames(kmer.freq.matrix2)<-rownames(que) rownames(kmer.freq.matrix2)<-NAMES(que) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix, kmer.Freq.que=kmer.freq.matrix2) #cat(kmer.freq.matrix) #return(kmer.freq.matrix) return(out) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/DNAbin2kmerFreqMatrix.R
#' Fuzzy Membership Function Value #' #' @description Calculation fuzzy membership function value given a distance from query to a potenial #' species, maximal intraspecific variation of the potential species theta1, and minimal interspecific #' distance (here, the distance between the potential species and its nearest neighbor theta2) #' (fuzzy-set based method, Zhang et al. 2012), different definition of distances could also be used. #' #' @param xtheta12 a numerical vector containing three elements, a distance from query to a potenial species, #' maximal or sd of intraspecific variation of the potential species theta1,minimal or mean interspecific #' distance. #' @return a numeric between 0 and 1. #' @keywords FMF #' @export #' @author Ai-bing ZHANG, Zhi-yong SHI. CNU, Beijing, CHINA, contact at zhangab2008(at)mail.cnu.edu.cn #' @note different definitions of distances could also be used. #' @references #' Zhang, A. B., C. Muster, H.B. Liang, C.D. Zhu, R. Crozier, P. Wan, J. Feng, R. D. Ward.(2012). A fuzzy-set-theory-based approach #' to analyse species membership in DNA barcoding. Molecular Ecology, 21(8):1848-63. #' @examples #' #' xtheta12<-c(0.6289163,0.1465522,0.6379375) #' FMF.out<-FMF(xtheta12) #' FMF.out FMF<-function(xtheta12){ ### xtheta12<-as.numeric(xtheta12) if (class(xtheta12)!="numeric" ||length(xtheta12)!=3) stop("input should be a numeric vector with length of 3!!!") x<-xtheta12[1] theta1<-xtheta12[2] theta2<-xtheta12[3] if (x<=theta1) FMF<-1 if (x>theta1 && x<=(theta2+theta1)/2) FMF<-1-2*((x-theta1)/(theta2-theta1))^2 if (x<=theta2 && x>(theta2+theta1)/2) FMF<-2*((x-theta2)/(theta2-theta1))^2 if (x>=theta2) FMF<-0 return(FMF) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/FMF.R
#' Calculate Intraspecific and Interspecific Variation #' #' @description Calculation intraspecific variation (sd) of the potential species theta1, and mean interspecific #' distance (here, the mean distance between the potential species and its nearest neighbor theta2) #' (fuzzy-set based method,slightly modified from Zhang et al. 2012). The calculation was done for all species in the #' reference dataset. #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @return a data frame containing intraspecific (sd, theta1) and interspefic variation (mean) of all species, #' and their corresponding nearest neighbor (NN). #' @keywords FMFtheta12 #' @export #' @import stats #' @import graphics #' @import utils #' @import sp #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA, contact at zhangab2008 (at) mail.cnu.edu.cn. #' @references #' Zhang, A. B., C. Muster, H.B. Liang, C.D. Zhu, R. Crozier, P. Wan, J. Feng, R. D. Ward.(2012). A fuzzy-set-theory-based approach #' to analyse species membership in DNA barcoding. Molecular Ecology, 21(8):1848-63. #' @examples #' #' data(TibetanMoth) #' ref<-as.DNAbin(as.character(TibetanMoth[1:50,])) #' FMF.theta12<-FMFtheta12(ref) #' FMF.theta12 FMFtheta12<-function(ref){ NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } digitize.DNA<-function(seqs){ locus<-toupper(as.character(seqs)) digitized.DNA<-locus digitized.DNA[digitized.DNA=="A"]<-0.1 digitized.DNA[digitized.DNA=="T"]<-0.2 digitized.DNA[digitized.DNA=="G"]<-0.3 digitized.DNA[digitized.DNA=="C"]<-0.4 digitized.DNA[digitized.DNA=="-"]<-0.5 digitized.DNA[digitized.DNA=="N"]<-0.6 digitized.DNA[digitized.DNA=="R"]<-0 digitized.DNA[digitized.DNA=="Y"]<-0 digitized.DNA[digitized.DNA=="M"]<-0 digitized.DNA[digitized.DNA=="K"]<-0 digitized.DNA[digitized.DNA=="S"]<-0 digitized.DNA[digitized.DNA=="W"]<-0 digitized.DNA[digitized.DNA=="H"]<-0 digitized.DNA[digitized.DNA=="B"]<-0 digitized.DNA[digitized.DNA=="V"]<-0 digitized.DNA[digitized.DNA=="D"]<-0 digitized.DNA<-as.numeric(digitized.DNA) #digitized.DNA<-as.matrix(digitized.DNA) digitized.DNA2<-array(digitized.DNA,dim=dim(seqs)) dim(digitized.DNA2) return(digitized.DNA2) } eucl.dist.two.vect<-function(v1,v2){ v1minusv2<-v1-v2 squared.v1minusv2<-v1minusv2*v1minusv2 out.sqrt<-sqrt(sum(squared.v1minusv2)) return(out.sqrt) }### end of fucntion ### 2.1 dealing with input DNA data! ### 2.2 calculate species center vectors #morph.spe<-gsub(".+,","",rownames(Ref)) # remove sequence ID before "," morph.spe<-gsub(".+,","",NAMES(ref)) # remove sequence ID before "," #no.morph.spe<-length(unique(morph.spe)) ref<-ref[,seg.sites(ref)] ref<-digitize.DNA(ref) rownames(ref)<-morph.spe #sampleSpeNames #species.centers<-aggregate(scale(Ref),by=list(morph.spe),FUN="mean") species.centers<-aggregate(ref,by=list(morph.spe),FUN="mean") list.spe<-species.centers[,1] species.centers<-species.centers[,-1] ### 2.3 seek NN for PS (all species in this case!) ### 2.3.1.calculate pair distance of species centers units.dist<-dist(species.centers, method = "euclidean", diag = F, upper = T, p = 2) units.dist0<-units.dist units.dist<-as.matrix(units.dist) ### important! #units.dist0<-units.dist for (i in 1: nrow(units.dist)) {units.dist[i,i]<-NA} ##### ### 2.4. look for elements (their indices) with minimal distance to each other index1<-numeric(length(unique(morph.spe))) index2<-index1 min.dist<-index1 for (i in 1:nrow(units.dist)){ # i<-1 index1[i]<-i b<-which.min(units.dist[i,]) if (length(b)==0) {index2[i]<-NA min.dist[i]<-NA} else {index2[i]<-b min.dist[i]<-min(units.dist[i,],na.rm=T)} } ### for loop pairs<-rbind(index1,index2) pairs<-t(pairs) #class(pairs) pairs<-subset(pairs,subset=!is.na(pairs[,2])) theta1.tmp<-numeric(length(unique(morph.spe))) Spp<-morph.spe ### seqsRef$unit.classif #seqs<-scale(digitized.locus) ### seqsRef$data uniSpeNames<-unique(Spp) f<-factor(Spp) ##### ################################### ### popSize calculation ################################### popSize.PS<-as.numeric(table(Spp)) ################################### ### theta1,2 calculation ################################### ### for i<-1 #source("eucl.dist.two.vect.R") seqInOneSpe<-ref[grep(levels(f)[1], Spp, value = FALSE,fixed = TRUE),] #dim(seqInOneSpe)==NULL ifelse(popSize.PS[1]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ### ifelse(popSize.PS[1]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ### centroid.spe0 - length.sites<-length(centroid.spe) ifelse(popSize.PS[1]==1,intra.dist<-0,intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe)) ### to the centroid ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-3*sd(intra.dist)) ### theta1 is sligthly different from #ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-max(intra.dist)) ### theta1 is sligthly different from for(i in 2:length(levels(f))){ cat(paste("i=",i),"\n") #seqInOneSpe<-sDNAbin[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] seqInOneSpe<-ref[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] ifelse(popSize.PS[i]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) #ifelse(popSize.PS[i]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ifelse(popSize.PS[i]==1,centroid.spe<-c(centroid.spe,seqInOneSpe),centroid.spe<-c(centroid.spe,apply(seqInOneSpe,2,mean))) #ifelse(dim(seqInOneSpe)==NULL,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) # error! #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) ifelse(popSize.PS[i]==1,intra.dist<-eucl.dist.two.vect(seqInOneSpe,centroid.spe0),intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0)) #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) ### to the centroid #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(intra.dist)) ### theta1 is sligthly different from ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-3*sd(intra.dist)) ### theta1 is sligthly different from } centroid.spe.matrix<-t(array(centroid.spe,dim=c(length.sites,length(centroid.spe)%/%length.sites))) ### 1.2 calculate theta2 for each pairt of species ################### #codes<-out.somu$out.som.unique$codes ### seqsRef$codes codes<-centroid.spe.matrix theta12.all<-data.frame(list.spe=list.spe,PS=pairs[,1],NN=pairs[,2],stringsAsFactors=TRUE) #source("eucl.dist.two.vect.R") theta2.tmp<-numeric(dim(pairs)[1]) for (i in 1:dim(pairs)[1]){ v1<-codes[pairs[i,1],] v2<-codes[pairs[i,2],] theta2.tmp[i]<-eucl.dist.two.vect(v1,v2) } theta12.all$theta1<-with(theta12.all,theta1.tmp) theta12.all$theta2<-with(theta12.all,theta2.tmp) theta12.all$popSize.PS<-with(theta12.all,popSize.PS) return(theta12.all) } ### the end of the function
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/FMFtheta12.R
#' Extracts Lables of Samples #' #' @description Extract sequence names from different objects of DNAbin, including generated from fasta2DNAbin() (package:adegenet), #' and read.dna() (package:ape). #' #' @param seqs object of class "DNAbin", generated from fasta2DNAbin() (package:adegenet), and read.dna() (package:ape). #' @return a character string array/vector. #' @keywords NAMES #' @export #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references zhangab2008(at)mail.cnu.edu.cn #' @examples #' #' data(TibetanMoth) #' seqNames<-NAMES(TibetanMoth) #' seqNames NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/NAMES.R
#' TDR2 Species Membership Value #' #' @description To calculate TDR value for a set of queries and one potential species. Its value is in the range of [0,1], #' 0 indicates extremly weak species membership, values close 1 indicating strong species membership. #' #' @param oneSpe object of class "DNAbin" which contains DNA sequences from one species #' @param que object of class "DNAbin" which contains DNA sequences different samples #' @param boot a numeric value indicating times of resampling along sequence columns #' @param boot2 a numeric value indicating times of resampling along sequence rows (different samples) #' @return a numeric vector represents TDR values for each query against the species #' @keywords TDR2 #' @export #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA, contact at zhangab2008(at)mail.cnu.edu.cn #' @references Jin Q, L,J.He, A.B. Zhang* (2012). A Simple 2D Non-Parametric Resampling Statistical Approach to #' Assess Confidence in Species Identification in DNA Barcoding-An Alternative to Likelihood and Bayesian Approaches. #' PLoS ONE 7(12): e50831. doi:10.1371/ journal. pone. 0050831. http://dx.plos.org/ 10.1371/ journal. pone. 0050831. #' @note oneSpe and que should be the same in sequence length, i.e., they should be aligned in prior. #' It's strongly recommended that oneSpe should have large enough sample size,e.g., 20. #' #' @examples #' #' data(TibetanMoth) #' sampleSpeNames<-NAMES(TibetanMoth) #' Spp<-gsub(".+,","",sampleSpeNames) #' oneSpe<-TibetanMoth[grep("Macdunnoughia_crassisigna", Spp, value = FALSE,fixed = TRUE),] #' oneSpe<-as.DNAbin(as.character(oneSpe[1:5,])) #' que<-TibetanMoth[grep("Agrotis_justa", Spp, value = FALSE,fixed = TRUE),] #' que2<-oneSpe[1:2,] #' out<-TDR2(oneSpe,que, boot=10,boot2=10) ### true false identification TDR2<-function (oneSpe,que, boot,boot2){ if (class(oneSpe)!="DNAbin"||class(que)!="DNAbin") stop("invalid sequence format! DNAbin format is required for DNA seqs!") if (dim(que)[2] != dim(oneSpe)[2]) warning("sequences in ref and que are different in length!") no.indi<-dim(oneSpe)[1] no.que<-dim(que)[1] if (no.indi<1) stop("oneSpe has at least 2 individuals, 20 recommended!") digitize.DNA<-function(seqs){ locus<-toupper(as.character(seqs)) digitized.DNA<-locus digitized.DNA[digitized.DNA=="A"]<-0.1 digitized.DNA[digitized.DNA=="T"]<-0.2 digitized.DNA[digitized.DNA=="G"]<-0.3 digitized.DNA[digitized.DNA=="C"]<-0.4 digitized.DNA[digitized.DNA=="-"]<-0.5 digitized.DNA[digitized.DNA=="N"]<-0.6 digitized.DNA[digitized.DNA=="R"]<-0 digitized.DNA[digitized.DNA=="Y"]<-0 digitized.DNA[digitized.DNA=="M"]<-0 digitized.DNA[digitized.DNA=="K"]<-0 digitized.DNA[digitized.DNA=="S"]<-0 digitized.DNA[digitized.DNA=="W"]<-0 digitized.DNA[digitized.DNA=="H"]<-0 digitized.DNA[digitized.DNA=="B"]<-0 digitized.DNA[digitized.DNA=="V"]<-0 digitized.DNA[digitized.DNA=="D"]<-0 digitized.DNA<-as.numeric(digitized.DNA) #digitized.DNA<-as.matrix(digitized.DNA) digitized.DNA2<-array(digitized.DNA,dim=dim(seqs)) dim(digitized.DNA2) return(digitized.DNA2) } eucl.dist.two.vect<-function(v1,v2){ v1minusv2<-v1-v2 squared.v1minusv2<-v1minusv2*v1minusv2 out.sqrt<-sqrt(sum(squared.v1minusv2)) return(out.sqrt) }### end of fucntion locus.as.character<-as.character(oneSpe) locus.as.character<-array(locus.as.character,c(dim(oneSpe)[1],dim(oneSpe)[2])) locus.as.character2<-as.character(que) locus.as.character2<-array(locus.as.character2,c(dim(que)[1],dim(que)[2])) #ks.best<-numeric(boot) #sil.ks.best<-numeric(boot) ### TDR<-numeric(no.que) for (j in 1:no.que){ # j<-1 oneSpe.que<-rbind(locus.as.character,locus.as.character2[j,]) ### oneSpe + one que accepted.times<-0 for (i in 1:boot) { ### i for oneSpe # i<-1 cat("i:",i,"\n") ### generate each resampled DNA sequence matrix for the species #resampled.locus.as.character<-locus.as.character[,sample(ncol(locus.as.character), replace = TRUE)] resampled.locus.as.character<-oneSpe.que[,sample(dim(oneSpe)[2], replace = TRUE)] # # j<-1 # resampled.locus.as.character.plus.one.query<-rbind(resampled.locus.as.character, # locus.as.character2[j,] # ) #dim(resampled.locus.as.character.plus.one.query) #resampled.locus.as.character.plus.one.query2<-as.DNAbin(resampled.locus.as.character.plus.one.query) resampled.locus.as.character.plus.one.query2<-as.DNAbin(resampled.locus.as.character) #rlacpoq.digitized ### calculate the distance of the que to the species firstly! rlacpoq.digitized<-digitize.DNA(resampled.locus.as.character.plus.one.query2) seqInOneSpe<-rlacpoq.digitized[1:no.indi,] que.tmp<-rlacpoq.digitized[(no.indi+1),] #centroid.spe<-apply(seqInOneSpe,2,mean) inter.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=que.tmp) dist.que2spe<-min(inter.dist) #dist.que2spe<-eucl.dist.two.vect(que.tmp,centroid.spe) cat("dist.que2spe:",dist.que2spe,"\n") ### vertical sampling for boot2 times with replacement dist.simu<-numeric(boot2) for (k in 1:boot2){ resampled2.locus.as.character.plus.one.query<-resampled.locus.as.character[sample(dim(oneSpe)[1]+1, replace = TRUE),] #resampled2.locus.as.character.plus.one.query<-resampled.locus.as.character.plus.one.query[sample(dim(oneSpe)[1]+1, replace = F),] resampled2.locus.as.character.plus.one.query2<-as.DNAbin(resampled2.locus.as.character.plus.one.query) #rlacpoq.digitized ### calculate the distance of the que to the species firstly! rlacpoq.digitized<-digitize.DNA(resampled2.locus.as.character.plus.one.query2) seqInOneSpe<-rlacpoq.digitized[1:no.indi,] que.tmp2<-rlacpoq.digitized[(no.indi+1),] #centroid.spe<-apply(seqInOneSpe,2,mean) inter.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=que.tmp2) dist.simu[k]<-min(inter.dist) #dist.simu[k]<-eucl.dist.two.vect(que.tmp2,centroid.spe) } ### the end of k-loop #ifelse(popSize.PS[1]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ### ci_dist.simu<-quantile(dist.simu,prob=c(0.025,0.975)) #ci_dist.simu[1] cat("ci_dist.simu",ci_dist.simu,"\n") ifelse(ci_dist.simu[1]<=dist.que2spe&&dist.que2spe<=ci_dist.simu[2], accepted.times<-accepted.times+1, accepted.times<-accepted.times) dist.simu }### the end of i-loop TDR[j]<-accepted.times/boot cat(j,":\n") cat("accepted.times",accepted.times,"\n") } ### the end of j-loop return(TDR) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/TDR2.R
#' Barcodes Evaluation #' #' @description Evaluate two barcodes using species identification success rate criteria. #' #' @param barcode1 object of class "DNAbin" based on barcode1, which contains taxon information. #' @param barcode2 object of class "DNAbin" based on barcode2, which contains taxon information. #' @param kmer1 a numeric to indicate the length of kmer1 for barcode1, the optimal kmer could be found by the function #' optimize.kmer() before running this function. #' @param kmer2 a numeric to indicate the length of kmer2 for barcode2, see above. #' #' @return a list containing p_value of prop.test(), and so on. #' @keywords barcodes.eval #' @export #' @import ape #' @import class #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references zhangab2008 (at) mail. cnu. edu. cn. #' @seealso prop.test() #' @examples #' #' data(TibetanMoth) #' barcode1<-as.DNAbin(as.character(TibetanMoth[1:30,])) #' barcode2<-barcode1 #' b.eval<-barcodes.eval(barcode1,barcode2,kmer1=1,kmer2=3) #' b.eval barcodes.eval<-function (barcode1,barcode2,kmer1=kmer1,kmer2=kmer2){ set.seed(7) ### general #barcode1.IDs<-rownames(barcode1) #barcode2.IDs<-rownames(barcode2) NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } barcode1.IDs<-NAMES(barcode1) barcode2.IDs<-NAMES(barcode2) barcode1<-del.gaps(barcode1) barcode2<-del.gaps(barcode2) names(barcode1)<-barcode1.IDs names(barcode2)<-barcode2.IDs ### functions: strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} ########## c2s<-function (chars = c("m", "e", "r", "g", "e", "d")) { return(paste(chars, collapse = "")) } ########## DNAbin2kmerFreqMatrix<-function(ref,kmer=kmer){ ### return kmer frequency matricies for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) #kmer<-3 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) #seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) #ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? ifelse(length(names(ref))!=0, rownames(kmer.freq.matrix)<-names(ref), rownames(kmer.freq.matrix)<-rownames(ref)) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix) #cat(kmer.freq.matrix) return(kmer.freq.matrix) #return(out) }# ### marker1 success.b1<-0 morph.spe<-gsub(".+,","",barcode1.IDs) # remove sequence ID before "," no.samples.b1<-length(barcode1) barcode0<-barcode1 for(i in 1:no.samples.b1){ # i<-1 barcode1<-barcode0 #barcode1.minus1<-barcode1[-i,] #barcode1<-as.matrix(barcode1) #barcode1.minus1<-barcode1[-i,] morph.spe.minus1<-morph.spe[-i] #length(morph.spe.minus1) kmer.freq.b0<-DNAbin2kmerFreqMatrix(barcode1,kmer=kmer1) #kmer.freq.b1<-DNAbin2kmerFreqMatrix(barcode1.minus1,kmer=kmer1) test<-kmer.freq.b0[i,] kmer.freq.b0.minus1<-kmer.freq.b0[-i,] #cat("i:",i,"\n") #cat("size.kmer.freq.b1=",dim(kmer.freq.b0)[1],"\n") Spp2<-as.factor(morph.spe.minus1) knn1<-knn(kmer.freq.b0.minus1, test, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) spe.Identified<-as.character(knn1) #success.b1<-strings.equal(as.character(Spp2)[i],spe.Identified)+ success.b1 success.b1<-strings.equal(morph.spe[i],spe.Identified)+ success.b1 } #success.b1 ### marker2 success.b2<-0 morph.spe2<-gsub(".+,","",barcode2.IDs) # remove sequence ID before "," no.samples.b2<-length(barcode2) barcode0<-barcode2 for(i in 1:no.samples.b2){ # i<-1 barcode1<-barcode0 #barcode1.minus1<-barcode1[-i,] #barcode1<-as.matrix(barcode1) #barcode1.minus1<-barcode1[-i,] morph.spe.minus1<-morph.spe2[-i] #length(morph.spe.minus1) kmer.freq.b0<-DNAbin2kmerFreqMatrix(barcode1,kmer=kmer1) #kmer.freq.b1<-DNAbin2kmerFreqMatrix(barcode1.minus1,kmer=kmer1) test<-kmer.freq.b0[i,] kmer.freq.b0.minus1<-kmer.freq.b0[-i,] cat("i:",i,"\n") #cat("size.kmer.freq.b1=",dim(kmer.freq.b0)[1],"\n") Spp2<-as.factor(morph.spe.minus1) knn1<-knn(kmer.freq.b0.minus1, test, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) spe.Identified<-as.character(knn1) #success.b1<-strings.equal(as.character(Spp2)[i],spe.Identified)+ success.b1 success.b2<-strings.equal(morph.spe2[i],spe.Identified)+ success.b2 } #success.b2 ### prop.test() success<-c(success.b1,success.b2) total<-c(no.samples.b1,no.samples.b2) out<-prop.test(success,total,alternative ="greater") out2<-list(X_squared = out$statistic, p.value = out$p.value, estimate = out$estimate, conf.int = out$conf.int) return(out2) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/barcodes.eval.R
#' Barcoding Gap Calculation #' #' @description Calculation of DNA barcoding gap. Besides K2P distance, raw distance and euclidean could #' also be used for calculation DNA barcoding gap. #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param dist a character string which takes one of ("raw","K80","euclidean"). #' @return a list indicates the summary statistics of interspecific and intraspecific genetic distance, #' such as k2P distance. #' @keywords barcoding gap #' @export #' @import ape #' @import stats #' #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA, contact at zhangab2008(at)mail.cnu.edu.cn #' @note the current version of the function can only be used for protein-coding barcodes, #' such as, COI. The futuren version may incorporate calculation for non-coding barcodes,for #' instance, ITS1, ITS2. #' @references #' Meyer, Christopher P., and Gustav Paulay. (2005). ''DNA barcoding: error rates based on #' comprehensive sampling.''.PLoS biology 3.12: e422. #' #' F.Jiang, Q. Jin, L. Liang, A.B. Zhang,and Z.H. Li.(2014). Existence of Species #' Complex Largely Reduced Barcoding Success for Invasive Species of Tephritidae: A Case Study in #' Bactrocera spp. Mol Ecol Resour. 14(6):1114-1128 DOI: 10.1111/1755-0998.12259. #' #' #' @examples #' #' data(TibetanMoth) #' TibetanMoth<-as.DNAbin(as.character(TibetanMoth[1:20,])) #' b.gap<-barcoding.gap(ref=TibetanMoth,dist="K80") #' b.gap barcoding.gap<-function (ref,dist=dist){ ### raw/k2p,"raw" "k2p", "euclidean" maxInDist <-function(distobj, sppVector = NULL, propZero = FALSE, rmNA = FALSE){ dat <- as.matrix(distobj) if(length(sppVector) > 0) dimnames(dat)[[1]] <- sppVector conSpecDists <- list() for (i in 1:length(dimnames(dat)[[1]])) { conSpec <- dimnames(dat)[[1]] == dimnames(dat)[[1]][i] conSpecDists[[i]] <- max(dat[conSpec, i], na.rm = rmNA) } if (propZero) output <- length(which(unlist(conSpecDists) == 0))/length(unlist(conSpecDists)) else output <- unlist(conSpecDists) output } nonConDist <-function(distobj, sppVector = NULL, propZero = FALSE, rmNA = FALSE){ distobj <- as.matrix(distobj) if(length(sppVector) > 0) dimnames(distobj)[[1]] <- sppVector nonSpecDists <- list() for(i in 1:length(dimnames(distobj)[[1]])){ nonSpec <- dimnames(distobj)[[1]] != dimnames(distobj)[[1]][i] nonSpecDists[[i]] <- min(distobj[nonSpec,i] , na.rm = rmNA) } if(propZero) output <- length(which(unlist(nonSpecDists) == 0))/length(unlist(nonSpecDists)) else output <- unlist(nonSpecDists) output } NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } twoSpeDist<-function(sp1,sp2,dist=dist){ ### "raw" "k2p", "euclidean" ### sp1,sp2, DNAbin ### n1<-dim(sp1)[1] n2<-dim(sp2)[1] sp12<-rbind(sp1,sp2) if(dist=="euclidean"){ digitize.DNA<-function(seqs){ locus<-toupper(as.character(seqs)) digitized.DNA<-locus digitized.DNA[digitized.DNA=="A"]<-0.1 digitized.DNA[digitized.DNA=="T"]<-0.2 digitized.DNA[digitized.DNA=="G"]<-0.3 digitized.DNA[digitized.DNA=="C"]<-0.4 digitized.DNA[digitized.DNA=="-"]<-0.5 digitized.DNA[digitized.DNA=="N"]<-0 digitized.DNA[digitized.DNA=="R"]<-0 digitized.DNA[digitized.DNA=="Y"]<-0 digitized.DNA[digitized.DNA=="M"]<-0 digitized.DNA[digitized.DNA=="K"]<-0 digitized.DNA[digitized.DNA=="S"]<-0 digitized.DNA[digitized.DNA=="W"]<-0 digitized.DNA[digitized.DNA=="H"]<-0 digitized.DNA[digitized.DNA=="B"]<-0 digitized.DNA[digitized.DNA=="V"]<-0 digitized.DNA[digitized.DNA=="D"]<-0 digitized.DNA<-as.numeric(digitized.DNA) #digitized.DNA<-as.matrix(digitized.DNA) digitized.DNA2<-array(digitized.DNA,dim=dim(seqs)) dim(digitized.DNA2) return(digitized.DNA2) } sp12.digitized<-digitize.DNA(sp12) #dist<-dist(sp12.digitized, method = "euclidean", diag = FALSE, upper = FALSE, p = 2) dist<-dist(sp12.digitized, method = "euclidean", diag = F, upper = F, p = 2) #dist<-dist.dna(sp12,model = "raw") ### "raw" "K80" #dist # class(dist) dist<-as.matrix(dist) diag(dist) <- NA #lower.tri(dist,diag = FALSE) inter<-dist[(n1+1):(n1+n2),1:n1] #inter inter<-list(inter) inter<-unlist(inter) #inter intra1<-dist[1:n1,1:n1] intra1[upper.tri(intra1)] <- NA #intra1 intra1<-list(intra1) intra1<-unlist(intra1) #intra1 intra2<-dist[(n1+1):(n1+n2),(n1+1):(n1+n2)] intra2[upper.tri(intra2)] <- NA # intra2 intra2<-list(intra2) intra2<-unlist(intra2) intra12<-c(intra1,intra2) intra12<-intra12[!is.na(intra12)] #intra12 out<-list(intra12,inter) }else{ ### "raw" "k2p" #dist<-dist.dna(sp12,model = "K80") ### "raw" "K80" dist<-dist.dna(sp12,model = dist) ### "raw" "K80" dist<-as.matrix(dist) diag(dist) <- NA inter<-dist[(n1+1):(n1+n2),1:n1] inter<-list(inter) inter<-unlist(inter) intra1<-dist[1:n1,1:n1] intra1[upper.tri(intra1)] <- NA #intra1 intra1<-list(intra1) intra1<-unlist(intra1) #intra1 intra2<-dist[(n1+1):(n1+n2),(n1+1):(n1+n2)] intra2[upper.tri(intra2)] <- NA # intra2 intra2<-list(intra2) intra2<-unlist(intra2) intra12<-c(intra1,intra2) intra12<-intra12[!is.na(intra12)] #intra12 out<-list(intra12,inter) } return(out) } ### the end of function twoSpeDist() ### "euclidean" multSpeDist<-function(ref,dist=dist){ ###iple sampleSpeNames<-attr(ref,"dimnames")[[1]] mpattern<-".+," #mpattern<-".+,Noctuidae_" #mpattern<-"Noctuidae_" Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") #Spp f<-factor(Spp) intra<-NULL inter<-NULL for(i in 1:(length(levels(f))-1)){ cat(paste("i=",i),"\n") seqInOneSpe<-ref[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] for(j in (i+1):length(levels(f))){ seqInOneSpe2<-ref[grep(levels(f)[j], Spp, value = FALSE,fixed = TRUE),] #t.d<-twoSpeDist(seqInOneSpe,seqInOneSpe2,dist="raw") ### "euclidean","K80","" t.d<-twoSpeDist(seqInOneSpe,seqInOneSpe2,dist=dist) ### "euclidean","K80","" intra<-c(intra,t.d[[1]]) inter<-c(inter,t.d[[2]]) } } out2<-list(intra,inter) return(out2) } ### "raw" m.d<-multSpeDist(ref,dist=dist) ### "raw", "K80" ref.IDs<-NAMES(ref) morph.spe<-gsub(".+,","",ref.IDs) # remove sequence ID before "," k2p.dist<-dist.dna(ref,model = "raw",pairwise.deletion = TRUE) inter<-nonConDist(k2p.dist,morph.spe,rmNA = T) intra<-maxInDist(k2p.dist,morph.spe,rmNA = T) intra<-m.d[[1]] inter<-m.d[[2]] h.inter<-hist(inter,freq = TRUE) h.intra<-hist(intra,freq = TRUE) ### plot max.x<-max(c(inter,intra)) #max.x<-median(c(inter,intra))*2 max.y<-max(c(h.inter$counts,h.intra$counts)) # plot(c(0,max.x),c(0,max.y*0.1),type="n", ## ver1.02 plot(c(0,max.x),c(0,max.y*0.5),type="n", xlab="genetic distance",ylab="Frequency", main="DNA barcoding gap analysis", #sub="red-intra,blue-inter" ) #title("DNA barcoding gap analysis") h.inter2<-hist(inter,freq = TRUE,breaks = "Sturges",col = "blue",border = "white",add = TRUE) #h.inter2<-hist(inter,freq = TRUE,breaks = 100,col = "blue",border = "white",add = TRUE)## ver1.02 #h.inter2<-hist(inter,freq = TRUE,col = "blue",border = "white",add = TRUE) #h.inter2<-hist(inter,freq = TRUE,breaks = 12,col = "blue",border = "white") h.inter.xfit<-seq(min(inter),max(inter),length=40) h.inter.yfit<-dnorm(h.inter.xfit,mean=mean(h.inter.xfit),sd=sd(h.inter.xfit)) h.inter.yfit<-h.inter.yfit*diff(h.inter2$mids[1:2])*length(inter) lines(h.inter.xfit,h.inter.yfit,col="blue",lwd=2) h.intra2<- hist(intra,freq = TRUE,breaks = "Sturges",col = "red",border = "white",add = TRUE) #h.intra2<- hist(intra,freq = TRUE,breaks = 100,col = "red",border = "white",add = TRUE) ## ver1.02 #h.intra2<- hist(intra,freq = TRUE,col = "red",border = "white",add = TRUE) h.intra.xfit<-seq(min(intra),max(intra),length=40) h.intra.yfit<-dnorm(h.intra.xfit,mean=mean(h.intra.xfit),sd=sd(h.intra.xfit)) h.intra.yfit<-h.intra.yfit*diff(h.intra2$mids[1:2])*length(intra) lines(h.intra.xfit,h.intra.yfit,col="red",lwd=2) legend(x = 0.65*max.x,y = 0.5*max.y, #legend(x = 0.70*max.x,y = 0.10*max.y, ## ver1.02 legend = c("intraspecific","interspecific"), pch = c(22,22), col = c("red","blue"), #bg = c("red","blue") ) ################## out<-list(c("inter","intra"),c(summary(inter),summary(intra))) return(out) } #b.gap<-barcoding.gap(ref,dist="raw") #b.gap<-barcoding.gap(ref,dist="K80") #b.gap<-barcoding.gap(ref,dist="euclidean")
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/barcoding.gap.R
#' Species Identification using Protein-coding Barcodes #' #' @description Species identification using protein-coding barcodes with different methods,including BP-based method #' (Zhang et al. 2008), fuzzy-set based method (Zhang et al. 2012), Bayesian-based method (Jin et al. 2013). #' #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param que object of class "DNAbin", whose identities (species names) need to be inferred. #' @param method a character string indicating which method will be used to train model and/or infer species membership. #' One of these methods ("fuzzyId", "bpNewTraining", "bpNewTrainingOnly", "bpUseTrained","Bayesian") should be specified. #' #' @return a list containing model parameters used, species identification success rates using references, #' query sequences, species inferred, and corresponding confidence levels #' (bp probability for BP-based method / FMF values for fuzzy set theory based method / posterior probability for Bayesian method) when available. #' #' @keywords BSI #' #' @export #' #' @import ape #' @import nnet #' @import class #' @import stats #' @import sp #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' zhangab2008(at)mail.cnu.edu.cn #' #' @note functions fasta2DNAbin() from package:adegenet and read.dna() from package:ape were used to obtain DNAbin object in our package. #' The former is used to read large aligned coding DNA barcodes, the latter unaligned ones. ref and que #' should be aligned with identical sequence length. We provided a pipeline to perform fast #' sequences alignment for reference and query sequences. Windows users could contact zhangab2008(at)mail.cnu.edu.cn #' for an exec version of the package. For very large DNA dataset, read.fas() package:phyloch is strongly suggested instead of #' fasta2DNAbin() since the latter is very slow. #' #' @references #' Zhang, A. B., M. D. Hao, C. Q. Yang, and Z. Y. Shi. (2017). BarcodingR: an integrated R package for species identification using DNA barcodes. Methods Ecol Evol. 8(5):627-634. #' https://besjournals.onlinelibrary.wiley.com/doi/10.1111/2041-210X.12682. #' #' Jin,Q., H.L. Han, X.M. Hu, X.H. Li,C.D. Zhu,S. Y. W. Ho, R. D. Ward, A.B. Zhang . (2013). Quantifying Species Diversity with a DNA Barcoding-Based Method: Tibetan Moth Species (Noctuidae) on the Qinghai-Tibetan Plateau. PloS One 8: e644. #' https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0064428. #' #' Zhang, A. B., C. Muster, H.B. Liang, C.D. Zhu, R. Crozier, P. Wan, J. Feng, R. D. Ward.(2012). A fuzzy-set-theory-based approach to analyse species membership in DNA barcoding. Molecular Ecology, 21(8):1848-63. #' https://onlinelibrary.wiley.com/doi/10.1111/j.1365-294X.2011.05235.x #' #' Zhang, A. B., D. S. Sikes, C. Muster, S. Q. Li. (2008). Inferring Species Membership using DNA sequences with Back-propagation Neural Networks. Systematic Biology, 57(2):202-215. #' https://besjournals.onlinelibrary.wiley.com/doi/10.1111/2041-210X.12682 #' @examples #' data(TibetanMoth) #' ref<-as.DNAbin(as.character(TibetanMoth[1:5,])) #' que<-as.DNAbin(as.character(TibetanMoth[50:55,])) #' bsi<-barcoding.spe.identify(ref, que, method = "fuzzyId") #' bsi #' bsi<-barcoding.spe.identify(ref, que, method = "bpNewTraining") #' bsi #' bsi<-barcoding.spe.identify(ref, que, method = "Bayesian") #' bsi #library(BarcodingR) barcoding.spe.identify<-function(ref, que, method = "bpNewTraining") {##"bpNewTraining", "bpUseTrained", "fuzzyId","Bayesian" #barcoding.species.identification<-function (ref, que, method = "bp") {##"fuzzyId","bp","Bayesian" # "bpNewTraining", ### train and identify at the same time # "bpNewTrainingOnly" ### just train the model for later use # "bpUseTrained", ### use the trained model in "bpNewTrainingOnly" which is save to a tmp file. if (dim(que)[2] != dim(ref)[2]) warning("sequences in ref and que are different in length!") digitize.DNA<-function(seqs){ locus<-toupper(as.character(seqs)) digitized.DNA<-locus digitized.DNA[digitized.DNA=="A"]<-0.1 digitized.DNA[digitized.DNA=="T"]<-0.2 digitized.DNA[digitized.DNA=="G"]<-0.3 digitized.DNA[digitized.DNA=="C"]<-0.4 digitized.DNA[digitized.DNA=="-"]<-0.5 digitized.DNA[digitized.DNA=="N"]<-0.6 digitized.DNA[digitized.DNA=="R"]<-0 digitized.DNA[digitized.DNA=="Y"]<-0 digitized.DNA[digitized.DNA=="M"]<-0 digitized.DNA[digitized.DNA=="K"]<-0 digitized.DNA[digitized.DNA=="S"]<-0 digitized.DNA[digitized.DNA=="W"]<-0 digitized.DNA[digitized.DNA=="H"]<-0 digitized.DNA[digitized.DNA=="B"]<-0 digitized.DNA[digitized.DNA=="V"]<-0 digitized.DNA[digitized.DNA=="D"]<-0 digitized.DNA<-as.numeric(digitized.DNA) #digitized.DNA<-as.matrix(digitized.DNA) digitized.DNA2<-array(digitized.DNA,dim=dim(seqs)) dim(digitized.DNA2) return(digitized.DNA2) } eucl.dist.two.vect<-function(v1,v2){ v1minusv2<-v1-v2 squared.v1minusv2<-v1minusv2*v1minusv2 out.sqrt<-sqrt(sum(squared.v1minusv2)) return(out.sqrt) }### end of fucntion strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} ############## naiveBayes <- function(x, ...) UseMethod("naiveBayes") naiveBayes.default <- function(x, y, laplace = 0, ...) { call <- match.call() Yname <- deparse(substitute(y)) x <- as.data.frame(x,stringsAsFactors=TRUE) ## estimation-function est <- function(var) if (is.numeric(var)) { cbind(tapply(var, y, mean, na.rm = TRUE), tapply(var, y, sd, na.rm = TRUE)) } else { tab <- table(y, var) (tab + laplace) / (rowSums(tab) + laplace * nlevels(var)) } ## create tables apriori <- table(y) tables <- lapply(x, est) ## fix dimname names for (i in 1:length(tables)) names(dimnames(tables[[i]])) <- c(Yname, colnames(x)[i]) names(dimnames(apriori)) <- Yname structure(list(apriori = apriori, tables = tables, levels = levels(y), call = call ), class = "naiveBayes" ) } naiveBayes.formula <- function(formula, data, laplace = 0, ..., subset, na.action = na.pass) { call <- match.call() Yname <- as.character(formula[[2]]) if (is.data.frame(data)) { ## handle formula m <- match.call(expand.dots = FALSE) m$... <- NULL m$laplace = NULL m$na.action <- na.action m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) Terms <- attr(m, "terms") if (any(attr(Terms, "order") > 1)) stop("naiveBayes cannot handle interaction terms") Y <- model.extract(m, "response") X <- m[,-attr(Terms, "response"), drop = FALSE] return(naiveBayes(X, Y, laplace = laplace, ...)) } else if (is.array(data)) { nam <- names(dimnames(data)) ## Find Class dimension Yind <- which(nam == Yname) ## Create Variable index deps <- strsplit(as.character(formula)[3], ".[+].")[[1]] if (length(deps) == 1 && deps == ".") deps <- nam[-Yind] Vind <- which(nam %in% deps) ## create tables apriori <- margin.table(data, Yind) tables <- lapply(Vind, function(i) (margin.table(data, c(Yind, i)) + laplace) / (as.numeric(apriori) + laplace * dim(data)[i])) names(tables) <- nam[Vind] structure(list(apriori = apriori, tables = tables, levels = names(apriori), call = call ), class = "naiveBayes" ) } else stop("naiveBayes formula interface handles data frames or arrays only") } print.naiveBayes <- function(x, ...) { cat("\nNaive Bayes Classifier for Discrete Predictors\n\n") cat("Call:\n") print(x$call) cat("\nA-priori probabilities:\n") print(x$apriori / sum(x$apriori)) cat("\nConditional probabilities:\n") for (i in x$tables) {print(i); cat("\n")} } predict.naiveBayes <- function(object, newdata, type = c("class", "raw"), threshold = 0.001, eps = 0, ...) { type <- match.arg(type) newdata <- as.data.frame(newdata,stringsAsFactors=TRUE) attribs <- match(names(object$tables), names(newdata)) isnumeric <- sapply(newdata, is.numeric) newdata <- data.matrix(newdata) L <- sapply(1:nrow(newdata), function(i) { ndata <- newdata[i, ] L <- log(object$apriori) + apply(log(sapply(seq_along(attribs), function(v) { nd <- ndata[attribs[v]] if (is.na(nd)) rep(1, length(object$apriori)) else { prob <- if (isnumeric[attribs[v]]) { msd <- object$tables[[v]] msd[, 2][msd[, 2] <= eps] <- threshold dnorm(nd, msd[, 1], msd[, 2]) } else object$tables[[v]][, nd] prob[prob <= eps] <- threshold prob } })), 1, sum) if (type == "class") L else { ## Numerically unstable: ## L <- exp(L) ## L / sum(L) ## instead, we use: sapply(L, function(lp) { 1/sum(exp(L - lp)) }) } }) if (type == "class") factor(object$levels[apply(L, 2, which.max)], levels = object$levels) else t(L) } ############## #source("aggregate.R") FMF<-function(xtheta12){ ### xtheta12<-as.numeric(xtheta12) if (class(xtheta12)!="numeric" ||length(xtheta12)!=3) stop("input should be a numeric vector with length of 3!!!") x<-xtheta12[1] theta1<-xtheta12[2] theta2<-xtheta12[3] ##### test: #x<-0.6289163 #theta1<-0.1465522 #theta2<-0.6379375 ##### test: if (x<=theta1) FMF<-1 if (x>theta1 && x<=(theta2+theta1)/2) FMF<-1-2*((x-theta1)/(theta2-theta1))^2 if (x<=theta2 && x>(theta2+theta1)/2) FMF<-2*((x-theta2)/(theta2-theta1))^2 if (x>=theta2) FMF<-0 return(FMF) } #[1] 0.6289163 0.1465522 0.6379375 sampleSpeNames<-attr(ref,"dimnames")[[1]] mpattern<-".+," #mpattern<-".+,Noctuidae_" #mpattern<-"Noctuidae_" #mpattern<-".+,[[:alpha:]]+_" Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") #Spp bp<-function(ref1,sampleSpeNames2, method = "bpNewTraining",que1){ # "bpNewTraining", ### train and identify at the same time # "bpNewTrainingOnly" ### just train the model for later use # "bpUseTrained", if(method == "bpNewTraining"){ ref1_tmp<-ref1[!is.na(ref1)] range1<-1./max(abs(ref1_tmp)) n.hidden<-ceiling(log2(dim(ref1)[1])) nnet.trained <- nnet(ref1, sampleSpeNames2, size = n.hidden, rang = range1, decay = 5e-5, maxit = 1e+6, abstol = 1.0e-8, reltol = 1.0e-8, MaxNWts = 20000) #decay = 5e-4, maxit = 1e+5,MaxNWts = 2000) spe.inferred0<-predict(nnet.trained, que1) spe.inferred<-spe.inferred0 #spe.inferred[spe.inferred>=0.95]<-1 #spe.inferred[spe.inferred<0.95]<-0 #spe.inferred #colnames(spe.inferred) #which.max(spe.inferred[1,]) inferred<-apply(spe.inferred,1,FUN=which.max) inferred.prob<-apply(spe.inferred,1,FUN=max) #inferred colnames(spe.inferred)[inferred] #spe.inferred0<-t(spe.inferred0) #bp.prob<-spe.inferred0[inferred] output.identified<-data.frame(queIDs=queIDs, spe.Identified=colnames(spe.inferred)[inferred], bp.prob=inferred.prob,stringsAsFactors=TRUE) rownames(output.identified)<-NULL ############################################################ ####### calculate model success rate using ref start... ########################################################### spe.inferred0.ref<-predict(nnet.trained, ref1) spe.inferred.ref<-spe.inferred0.ref inferred.ref<-apply(spe.inferred.ref,1,FUN=which.max) inferred.prob.ref<-apply(spe.inferred.ref,1,FUN=max) # length(inferred) #inferred p.ref<-colnames(spe.inferred.ref)[inferred.ref] #p.ref[1]<-"abz" spe.morph.Identified<-data.frame(Spp,p.ref,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates.ref<-matches[1]/matches[2] names(success.rates.ref)<-NULL ############################################################ ####### calculate model success rate using ref the end. ########################################################### out.bp<-list(summary.model=summary(nnet.trained),###convergence=nnet.trained$convergence, convergence=nnet.trained$convergence, success.rates.ref=success.rates.ref, output_identified=output.identified) #current.wd<-getwd() #setwd #Rhome<-R.home() ### 2020/4/13 21:06:46 Rhome<-tempdir() ### 2020/4/14 21:31:02 fileName<-"bbsi_tmp" fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:06:46 fileName #fileName<-paste("simulation",i,sep = "") fileName<-paste(fileName,".RData",sep = "") fileName save(nnet.trained, out.bp, success.rates.ref, #unique.str.ref, #center.ref1, file = fileName) ## #save(x, y, file = "xy.RData") #return(out.bp) output2.identified<-out.bp } if(method == "bpNewTrainingOnly"){ ref1_tmp<-ref1[!is.na(ref1)] center.ref1<-apply(ref1,MARGIN=2,FUN=mean) range1<-1./max(abs(ref1_tmp)) n.hidden<-ceiling(log2(dim(ref1)[1])) nnet.trained <- nnet(ref1, sampleSpeNames2, size = n.hidden, rang = range1, decay = 5e-5, maxit = 1e+6, abstol = 1.0e-8, reltol = 1.0e-8, MaxNWts = 20000) ############################################################ ####### calculate model success rate using ref start... ########################################################### spe.inferred0.ref<-predict(nnet.trained, ref1) spe.inferred.ref<-spe.inferred0.ref inferred.ref<-apply(spe.inferred.ref,1,FUN=which.max) inferred.prob.ref<-apply(spe.inferred.ref,1,FUN=max) # length(inferred) #inferred p.ref<-colnames(spe.inferred.ref)[inferred.ref] #p.ref[1]<-"abz" spe.morph.Identified<-data.frame(Spp,p.ref,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates.ref<-matches[1]/matches[2] names(success.rates.ref)<-NULL ############################################################ ####### calculate model success rate using ref the end. ########################################################### # fileName<-"bbsi_tmp" #fileName<-paste(fileName,".RData",sep = "") #Rhome<-R.home() ### 2020/4/13 21:06:46 Rhome<-tempdir() ### 2020/4/14 21:31:02 fileName<-"bbsi_tmp" fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:06:46 fileName #fileName<-paste("simulation",i,sep = "") fileName<-paste(fileName,".RData",sep = "") fileName save(nnet.trained, success.rates.ref, #center.ref1, file = fileName) ## #save(x, y, file = "xy.RData") output2.identified<-"just saved to file!" }### if(method == "bpUseTrained"){ ##################### ### 2020/4/13 21:06:46 #Rhome<-R.home() ### 2020/4/13 21:06:46 Rhome<-tempdir() ### 2020/4/14 21:31:02 fileName<-"bbsi_tmp" fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:06:46 fileName #fileName<-paste("simulation",i,sep = "") fileName<-paste(fileName,".RData",sep = "") fileName ############################ 2020/4/13 21:06:46 #if(!file.exists("bbsi_tmp.RData")) if(!file.exists(fileName)) ### stop("file bbsi_tmp.RData not found in R.home directory! you need to rebuild the model! ") #load("bbsi_tmp.RData") load(fileName) ####2020/4/13 21:14:15 queIDs<-attr(que,"dimnames")[[1]] spe.inferred0<-predict(nnet.trained, que1) spe.inferred<-spe.inferred0 #spe.inferred[spe.inferred>=0.95]<-1 #spe.inferred[spe.inferred<0.95]<-0 #spe.inferred #colnames(spe.inferred) #which.max(spe.inferred[1,]) inferred<-apply(spe.inferred,1,FUN=which.max) inferred.prob<-apply(spe.inferred,1,FUN=max) #inferred colnames(spe.inferred)[inferred] #spe.inferred0<-t(spe.inferred0) #bp.prob<-spe.inferred0[inferred] output.identified<-data.frame(queIDs=queIDs, spe.Identified=colnames(spe.inferred)[inferred], bp.prob=inferred.prob,stringsAsFactors=TRUE) rownames(output.identified)<-NULL output2.identified<-list(summary.model=summary(nnet.trained),###convergence=nnet.trained$convergence, convergence=nnet.trained$convergence, success.rates.ref=success.rates.ref, output_identified=output.identified) } class(output2.identified) <- c("BarcodingR") return(output2.identified) #return(output.identified) } #bp(ref1,sampleSpeNames2,que1) ############################# #Spp2<-as.factor(Spp) #rownames(ref1)<-Spp #sampleSpeNames #rownames(que1)<-queIDs fuzzyId<-function(ref1,Spp2,que1){ #library(class) knn1<-knn(ref1, que1, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) #knn1 spe.Identified<-as.character(knn1) #rownames(ref1)<-sampleSpeNames # Ref<-ref1 FMFtheta12<-function(Ref){ # if (class(seqsRef)!="matrix") # stop("input should be an object of matrix!") ### 2.1 dealing with input DNA data! ### 2.2 calculate species center vectors morph.spe<-gsub(".+,","",rownames(Ref)) # remove sequence ID before "," #no.morph.spe<-length(unique(morph.spe)) #species.centers<-aggregate(scale(Ref),by=list(morph.spe),FUN="mean") species.centers<-aggregate(Ref,by=list(morph.spe),FUN="mean") #class(species.centers) #head(species.centers) #dim(species.centers) list.spe<-species.centers[,1] species.centers<-species.centers[,-1] ### 2.3 seek NN for PS (all species in this case!) ### 2.3.1.calculate pair distance of species centers units.dist<-dist(species.centers, method = "euclidean", diag = F, upper = T, p = 2) units.dist0<-units.dist units.dist<-as.matrix(units.dist) ### important! #units.dist0<-units.dist for (i in 1: nrow(units.dist)) {units.dist[i,i]<-NA} ##### ### 2.4. look for elements (their indices) with minimal distance to each other index1<-numeric(length(unique(morph.spe))) index2<-index1 min.dist<-index1 for (i in 1:nrow(units.dist)){ # i<-1 index1[i]<-i b<-which.min(units.dist[i,]) if (length(b)==0) {index2[i]<-NA min.dist[i]<-NA} else {index2[i]<-b min.dist[i]<-min(units.dist[i,],na.rm=T)} } ### for loop pairs<-rbind(index1,index2) pairs<-t(pairs) #class(pairs) pairs<-subset(pairs,subset=!is.na(pairs[,2])) theta1.tmp<-numeric(length(unique(morph.spe))) Spp<-morph.spe ### seqsRef$unit.classif #seqs<-scale(digitized.locus) ### seqsRef$data #dim(seqs) uniSpeNames<-unique(Spp) #table(Spp) f<-factor(Spp) ##### ################################### ### popSize calculation ################################### popSize.PS<-as.numeric(table(Spp)) ################################### ### theta1,2 calculation ################################### ### for i<-1 #source("eucl.dist.two.vect.R") #seqInOneSpe<-Ref[grep(levels(f)[1], Spp, value = FALSE,fixed = TRUE),] seqInOneSpe<-Ref[Spp %in% levels(f)[1],] #dim(seqInOneSpe)==NULL ifelse(popSize.PS[1]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ### ifelse(popSize.PS[1]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ### centroid.spe0 - length.sites<-length(centroid.spe) ifelse(popSize.PS[1]==1,intra.dist<-0,intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe)) ### to the centroid ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-3*sd(intra.dist)) ### theta1 is sligthly different from #ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-max(intra.dist)) ### theta1 is sligthly different from ###### for(i in 2:length(levels(f))){ #i<-2 # i<-9 #i=3 #cat(paste("i=",i),"\n") # cat("\n") #seqInOneSpe<-sDNAbin[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] #seqInOneSpe<-Ref[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] seqInOneSpe<-Ref[Spp %in% levels(f)[i],] ifelse(popSize.PS[i]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) #ifelse(popSize.PS[i]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ifelse(popSize.PS[i]==1,centroid.spe<-c(centroid.spe,seqInOneSpe),centroid.spe<-c(centroid.spe,apply(seqInOneSpe,2,mean))) #length(seqInOneSpe) #ifelse(dim(seqInOneSpe)==NULL,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) # error! #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) ifelse(popSize.PS[i]==1,intra.dist<-eucl.dist.two.vect(seqInOneSpe,centroid.spe0),intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0)) #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) ### to the centroid #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(intra.dist)) ### theta1 is sligthly different from ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-3*sd(intra.dist)) ### theta1 is sligthly different from #theta1.tmp[i]<-max(dist(seqInOneSpe)) } centroid.spe.matrix<-t(array(centroid.spe,dim=c(length.sites,length(centroid.spe)%/%length.sites))) ### 1.2 calculate theta2 for each pairt of species ################### #codes<-out.somu$out.som.unique$codes ### seqsRef$codes codes<-centroid.spe.matrix # dim(codes) theta12.all<-data.frame(list.spe=list.spe,PS=pairs[,1],NN=pairs[,2],stringsAsFactors=TRUE) #source("eucl.dist.two.vect.R") theta2.tmp<-numeric(dim(pairs)[1]) for (i in 1:dim(pairs)[1]){ # i<-1 #v1<-codes[i, pairs[i,1]] #v2<-codes[i, pairs[i,2]] v1<-codes[pairs[i,1],] v2<-codes[pairs[i,2],] theta2.tmp[i]<-eucl.dist.two.vect(v1,v2) } theta12.all$theta1<-with(theta12.all,theta1.tmp) theta12.all$theta2<-with(theta12.all,theta2.tmp) theta12.all$popSize.PS<-with(theta12.all,popSize.PS) #theta12.all return(theta12.all) #class(theta12.all) } ### the end of the function FMF.theta12<-FMFtheta12(ref1) #mpattern<-".+,Noctuidae_" #mpattern<-"Noctuidae_" #mpattern<-"Noctuidae_" #FMF.theta12$list.spe<-gsub(mpattern,"",FMF.theta12$list.spe) # remove seqs names before "," (incl.",") #FMF.theta12$list.spe FMF.que<-numeric(dim(que1)[1]) for(j in 1:dim(que1)[1]){ #j<-81 #seqInOneSpe<-ref1[grep(spe.Identified[j], FMF.theta12$list.spe, value = FALSE,fixed = TRUE),] seqInOneSpe<-ref1[grep(spe.Identified[j], rownames(ref1), value = FALSE,fixed = TRUE),] #class(FMF.theta12$list.spe) k<-match(x=spe.Identified[j], table=FMF.theta12$list.spe) #ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) que2PS.dist<-eucl.dist.two.vect(que1[j,],centroid.spe0) #que2PS.dist<-0 #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) xtheta12<-c(que2PS.dist,FMF.theta12$theta1[k],FMF.theta12$theta2[k]) FMF.que[j]<-FMF(xtheta12) } #FMF.theta12<-FMFtheta12(ref1) #sub.FMF.theta12<-subset(FMF.theta12,popSize.PS>1) #average.theta1<-mean(sub.FMF.theta12$theta1) #### calculate success.rates.ref knn1<-knn(ref1, ref1, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) #knn1 spe.Identified.ref<-as.character(knn1) spe.morph.Identified<-data.frame(Spp,spe.Identified.ref,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates.ref<-matches[1]/matches[2] names(success.rates.ref)<-NULL output.identified<-data.frame(queIDs=queIDs, spe.Identified=spe.Identified, FMF=FMF.que,stringsAsFactors=TRUE) out<-list(success.rates.ref=success.rates.ref, #output_identified.ref=output.identified.ref, output_identified=output.identified) #Bayesian.prob=Bayesian.prob) # return(output.identified) class(out) <- c("BarcodingR") return(out) ################## # output.identified<-data.frame(queIDs=queIDs, # spe.Identified=spe.Identified, # FMF=FMF.que) #Bayesian.prob=Bayesian.prob) # return(output.identified) } #fuzzy.Id<-fuzzyId(ref1,Spp2,que1) Bayesian<-function(ref,Spp,que){ rq<-rbind(ref,que) rq<-rq[,seg.sites(rq)] # remove constant sites! queIDs<-attr(que,"dimnames")[[1]] ref.constant.sites.removed<-rq[1:dim(ref)[1],] que.constant.sites.removed<-rq[-(1:dim(ref)[1]),] #rownames(ref.constant.sites.removed) #rownames(que.constant.sites.removed) ref2<-as.character(ref.constant.sites.removed) ref2<-as.data.frame(ref2,stringsAsFactors=TRUE) que2<-as.character(que.constant.sites.removed) que2<-as.data.frame(que2,stringsAsFactors=TRUE) rq2<-rbind(ref2,que2) Spp<-c(Spp,queIDs) rq2$species<-as.factor(Spp) ref3<-rq2[1:dim(ref2)[1],] #rq2$species ref3$species<-with(ref3,as.factor(gsub(".+,","",sampleSpeNames))) #ref3$species que3<-rq2[-(1:dim(ref2)[1]),] del<-dim(que3)[2] que3<-que3[,-del] head(ref3);class(ref3);dim(ref3) rownames(ref3)<-1:dim(ref3)[1] Bayesian.trained <- naiveBayes(species ~ ., data = ref3) #Bayesian.trained <- naiveBayes(species ~ ., data = ref3,type = "raw") #model <- naiveBayes(species ~ ., data = training.set) #attributes(Bayesian.trained) #attr(Bayesian.trained,"apriori")[[1]] spe.inferred<-predict(Bayesian.trained, que3) spe.inferred.prob<-predict(Bayesian.trained, que3, type = "raw") #dim(que3) #attributes(spe.inferred) Bayesian.prob<-apply(spe.inferred.prob,1,max) #spe.inferred<-predict(Bayesian.trained, que3,type = "raw") #spe.inferred spe.inferred<-as.character(spe.inferred) output.identified<-data.frame(queIDs=queIDs, spe.Identified=spe.inferred, Bayesian.prob=Bayesian.prob,stringsAsFactors=TRUE) #class(output.identified) <- c("BarcodingR") out<-list(output_identified=output.identified) #Bayesian.prob=Bayesian.prob) # return(output.identified) class(out) <- c("BarcodingR") return(out) } #Bayesian(ref,Spp,que) # bp(ref1,sampleSpeNames2,ref1) #bpNewTraining, bpUseTrained method <- pmatch(method, c("bpNewTraining", ### train and identify at the same time "bpNewTrainingOnly", ### just train the model for later use "bpUseTrained", "fuzzyId", "Bayesian", "all")) if (is.na(method)) stop("invalid method") if (method == -1) stop("ambiguous method") if (method == 1){ ### "bpNewTraining", ### train and identify at the same time sampleSpeNames<-attr(ref,"dimnames")[[1]] queIDs<-attr(que,"dimnames")[[1]] mpattern<-".+," #mpattern<-".+,Noctuidae_" #mpattern<-"Noctuidae_" Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") Spp #length(unique(Spp)) sampleSpeNames2<-class.ind(Spp) ### rq rq<-rbind(ref,que) rq<-rq[,seg.sites(rq)] # remove constant sites! ref1<-rq[1:dim(ref)[1],] que1<-rq[-(1:dim(ref)[1]),] ref1<-digitize.DNA(ref1) #names(ref)<-sampleSpeNames que1<-digitize.DNA(que1) out<-bp(ref1,sampleSpeNames2,method = "bpNewTraining",que1) } if (method == 2){ ###"bpNewTrainingOnly" ### just train the model for later use sampleSpeNames<-attr(ref,"dimnames")[[1]] #queIDs<-attr(que,"dimnames")[[1]] mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") Spp sampleSpeNames2<-class.ind(Spp) ref1<-digitize.DNA(ref) #que1<-digitize.DNA(que1) out<-bp(ref1,sampleSpeNames2,method = "bpNewTrainingOnly",que1) } if (method == 3){ ###"bpUseTrained" #que <-fasta2DNAbin("que.fas") que1<-digitize.DNA(que) out<-bp(ref1,sampleSpeNames2,method = "bpUseTrained",que1) } if (method == 4){### "fuzzyId" rq<-rbind(ref,que) rq<-rq[,seg.sites(rq)] # remove constant sites! ref1<-rq[1:dim(ref)[1],] que1<-rq[-(1:dim(ref)[1]),] ref1<-digitize.DNA(ref1) #names(ref)<-sampleSpeNames que1<-digitize.DNA(que1) Spp2<-as.factor(Spp) rownames(ref1)<-Spp #sampleSpeNames queIDs<-attr(que,"dimnames")[[1]] rownames(que1)<-queIDs out<-fuzzyId(ref1,Spp2,que1) } if (method == 5) out<-Bayesian(ref,Spp,que) return(out) } #setwd("C:/R/myRprojects/SpeDelimitation/SOFM") #source("run.time.R") #start.time<-Sys.time() ##### #bsi<-barcoding.spe.identify(ref, que, method = "bpNewTraining") ##### #setwd("C:/R/myRprojects/SpeDelimitation/SOFM") #time.elapsed<-run.time(start.time) #cat("Time used : H:M:S") #time.elapsed
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/barcoding.spe.identify.R
#' Species Identification Based on Fuzzy-set Method and kmer #' @description Species identification based on fuzzy-set method (Zhang et al. 2012)and kmer. #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param que object of class "DNAbin", whose identities (species names) need to be inferred. #' @param kmer a numeric to indicate the length of maximum kmer to try in the range of 1 to kmer in case of #' optimization = TRUE, otherwise, only a certain length of kmer is used. #' @param optimization a character string, indicating whether different length of kmer (up to kmer) will be used or #' just a specified length of kmer will be used. #' @return a list indicating the identified species. #' @keywords barcoding.spe.identify2 #' @export #' @import class #' @import sp #' @author Ai-bing ZHANG, Cai-qing YANG, Meng-di HAO, CNU, Beijing, CHINA, contact at zhangab2008 (at) mail. cnu. edu. cn. #' @note read.dna() from package {ape} was used to obtain DNAbin object for unaligned non-coding barcodes. #' #' @references #' #' Zhang, A. B., M. D. Hao, C. Q. Yang, and Z. Y. Shi. (2017). BarcodingR: an integrated R package for species identification using DNA barcodes. Methods Ecol Evol. 8(5):627-634. #' https://besjournals.onlinelibrary.wiley.com/doi/10.1111/2041-210X.12682. #' #' Jin,Q., H.L. Han, X.M. Hu, X.H. Li,C.D. Zhu,S. Y. W. Ho, R. D. Ward, A.B. Zhang . (2013). Quantifying Species Diversity with a DNA Barcoding-Based Method: Tibetan Moth Species (Noctuidae) on the Qinghai-Tibetan Plateau. PloS One 8: e644. #' https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0064428. #' #' Zhang, A. B., C. Muster, H.B. Liang, C.D. Zhu, R. Crozier, P. Wan, J. Feng, R. D. Ward.(2012). A fuzzy-set-theory-based approach to analyse species membership in DNA barcoding. Molecular Ecology, 21(8):1848-63. #' https://onlinelibrary.wiley.com/doi/10.1111/j.1365-294X.2011.05235.x #' #' Zhang, A. B., D. S. Sikes, C. Muster, S. Q. Li. (2008). Inferring Species Membership using DNA sequences with Back-propagation Neural Networks. Systematic Biology, 57(2):202-215. #' https://besjournals.onlinelibrary.wiley.com/doi/10.1111/2041-210X.12682 #' @examples #' #' data(pineMothITS2) #' ref<-pineMothITS2 #' que<-ref #' spe.id<-barcoding.spe.identify2(ref,que, kmer = 1, optimization = FALSE) #' spe.id #' barcoding.spe.identify2<-function (ref, que, kmer = kmer, optimization = TRUE) { # "fixedNumber","optimal" set.seed(7) ref.IDs<-rownames(ref) que.IDs<-rownames(que) if(length(ref.IDs)==0) ref.IDs<-names(ref) if(length(que.IDs)==0) que.IDs<-names(que) if(length(ref.IDs)!=0){ que.IDs<-rownames(que) if(length(que.IDs)==0) que.IDs<-names(que) #ref<-del.gaps(ref) #que<-del.gaps(que) #names(ref)<-ref.IDs #names(que)<-que.IDs } ########## c2s<-function (chars = c("m", "e", "r", "g", "e", "d")) { return(paste(chars, collapse = "")) } ########## morph.spe<-gsub(".+,","",ref.IDs) # remove sequence ID before "," Spp2<-as.factor(morph.spe) len.shortest.seq<-min(as.numeric(summary(ref)[,1])) #if (kmer>0.05*len.shortest.seq) # stop("kmer is too large, it will take lots of time to run! a vaule less than 10 is suggested!") # optimization <- TRUE ################################################# ### functions used! ################################################# DNAbin2kmerFreqMatrix2<-function(ref,que,kmer=kmer){ ### return kmer frequency matrices for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) #kmer<-1 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### do some cleaning by removing IUPAC codes, "-" mpattern<-"-+[a-z]*" u.s<-gsub(mpattern,NA,u.s) # strings with "-" mpattern<-"[rymkswhbvdn]+" u.s<-gsub(mpattern,NA,u.s) # strings with "-" u.s<- u.s[!is.na(u.s)] ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? rownames(kmer.freq.matrix)<-rownames(ref) if(length(rownames(kmer.freq.matrix))==0) rownames(kmer.freq.matrix)<-names(ref) ### 4. calculate kmer frequency for each sequence in que kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector2)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix2<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? rownames(kmer.freq.matrix2)<-rownames(que) if(length(rownames(kmer.freq.matrix2))==0) rownames(kmer.freq.matrix2)<-names(que) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix, kmer.Freq.que=kmer.freq.matrix2) #cat(kmer.freq.matrix) #return(kmer.freq.matrix) return(out) }### the end of function strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} FMF<-function(xtheta12){ ### xtheta12<-as.numeric(xtheta12) if (class(xtheta12)!="numeric" ||length(xtheta12)!=3) stop("input should be a numeric vector with length of 3!!!") x<-xtheta12[1] theta1<-xtheta12[2] theta2<-xtheta12[3] ##### test: #x<-0.6289163 #theta1<-0.1465522 #theta2<-0.6379375 ##### test: if (x<=theta1) FMF<-1 if (x>theta1 && x<=(theta2+theta1)/2) FMF<-1-2*((x-theta1)/(theta2-theta1))^2 if (x<=theta2 && x>(theta2+theta1)/2) FMF<-2*((x-theta2)/(theta2-theta1))^2 if (x>=theta2) FMF<-0 return(FMF) } FMFtheta12<-function(Ref){ # if (class(seqsRef)!="matrix") # stop("input should be an object of matrix!") ### 2.1 dealing with input DNA data! ### 2.2 calculate species center vectors # rm(ref) # Ref<-pineMothITS2 morph.spe<-gsub(".+,","",rownames(Ref)) # remove sequence ID before "," if(length(morph.spe)==0) morph.spe<-gsub(".+,","",names(Ref)) #no.morph.spe<-length(unique(morph.spe)) #species.centers<-aggregate(scale(Ref),by=list(morph.spe),FUN="mean") species.centers<-aggregate(Ref,by=list(morph.spe),FUN="mean") list.spe<-species.centers[,1] species.centers<-species.centers[,-1] ### 2.3 seek NN for PS (all species in this case!) ### 2.3.1.calculate pair distance of species centers units.dist<-dist(species.centers, method = "euclidean", diag = F, upper = T, p = 2) units.dist0<-units.dist units.dist<-as.matrix(units.dist) ### important! #units.dist0<-units.dist for (i in 1: nrow(units.dist)) {units.dist[i,i]<-NA} ##### ### 2.4. look for elements (their indices) with minimal distance to each other index1<-numeric(length(unique(morph.spe))) index2<-index1 min.dist<-index1 for (i in 1:nrow(units.dist)){ # i<-1 index1[i]<-i b<-which.min(units.dist[i,]) if (length(b)==0) {index2[i]<-NA min.dist[i]<-NA} else {index2[i]<-b min.dist[i]<-min(units.dist[i,],na.rm=T)} } ### for loop pairs<-rbind(index1,index2) pairs<-t(pairs) #class(pairs) pairs<-subset(pairs,subset=!is.na(pairs[,2])) theta1.tmp<-numeric(length(unique(morph.spe))) Spp<-morph.spe ### seqsRef$unit.classif #seqs<-scale(digitized.locus) ### seqsRef$data #dim(seqs) uniSpeNames<-unique(Spp) #table(Spp) f<-factor(Spp) ##### ################################### ### popSize calculation ################################### popSize.PS<-as.numeric(table(Spp)) ################################### ### theta1,2 calculation ################################### ### for i<-1 #source("eucl.dist.two.vect.R") seqInOneSpe<-Ref[grep(levels(f)[1], Spp, value = FALSE,fixed = TRUE),] #dim(seqInOneSpe)==NULL ifelse(popSize.PS[1]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ### ifelse(popSize.PS[1]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ### centroid.spe0 - length.sites<-length(centroid.spe) ifelse(popSize.PS[1]==1,intra.dist<-0,intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe)) ### to the centroid ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-sd(intra.dist)) ### theta1 is sligthly different from #ifelse(popSize.PS[1]==1,theta1.tmp[1]<-0,theta1.tmp[1]<-max(intra.dist)) ### theta1 is sligthly different from ###### for(i in 2:length(levels(f))){ #i<-2 # i<-9 #i=3 cat(paste("i=",i),"\n") # cat("\n") #seqInOneSpe<-sDNAbin[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] seqInOneSpe<-Ref[grep(levels(f)[i], Spp, value = FALSE,fixed = TRUE),] ifelse(popSize.PS[i]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) #ifelse(popSize.PS[i]==1,centroid.spe<-seqInOneSpe,centroid.spe<-apply(seqInOneSpe,2,mean)) ifelse(popSize.PS[i]==1,centroid.spe<-c(centroid.spe,seqInOneSpe),centroid.spe<-c(centroid.spe,apply(seqInOneSpe,2,mean))) #length(seqInOneSpe) #ifelse(dim(seqInOneSpe)==NULL,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) # error! #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(dist(seqInOneSpe))) ifelse(popSize.PS[i]==1,intra.dist<-eucl.dist.two.vect(seqInOneSpe,centroid.spe0),intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0)) #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) ### to the centroid #ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-max(intra.dist)) ### theta1 is sligthly different from ifelse(popSize.PS[i]==1,theta1.tmp[i]<-0,theta1.tmp[i]<-sd(intra.dist)) ### theta1 is sligthly different from #theta1.tmp[i]<-max(dist(seqInOneSpe)) } centroid.spe.matrix<-t(array(centroid.spe,dim=c(length.sites,length(centroid.spe)%/%length.sites))) ### 1.2 calculate theta2 for each pairt of species ################### #codes<-out.somu$out.som.unique$codes ### seqsRef$codes codes<-centroid.spe.matrix # dim(codes) theta12.all<-data.frame(list.spe=list.spe,PS=pairs[,1],NN=pairs[,2],stringsAsFactors=TRUE) #source("eucl.dist.two.vect.R") theta2.tmp<-numeric(dim(pairs)[1]) for (i in 1:dim(pairs)[1]){ # i<-1 #v1<-codes[i, pairs[i,1]] #v2<-codes[i, pairs[i,2]] v1<-codes[pairs[i,1],] v2<-codes[pairs[i,2],] theta2.tmp[i]<-eucl.dist.two.vect(v1,v2) } theta12.all$theta1<-with(theta12.all,theta1.tmp) theta12.all$theta2<-with(theta12.all,theta2.tmp) theta12.all$popSize.PS<-with(theta12.all,popSize.PS) #theta12.all return(theta12.all) #class(theta12.all) } ### the end of the function eucl.dist.two.vect<-function(v1,v2){ v1minusv2<-v1-v2 squared.v1minusv2<-v1minusv2*v1minusv2 out.sqrt<-sqrt(sum(squared.v1minusv2)) return(out.sqrt) }### end of fucntion DNAbin2kmerFreqMatrix<-function(ref,kmer=kmer){ ### return kmer frequency matricies for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) #kmer<-1 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) #seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) #ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 ### do some cleaning by removing IUPAC codes, "-" mpattern<-"-+[a-z]*" u.s<-gsub(mpattern,NA,u.s) # strings with "-" mpattern<-"[rymkswhbvdn]+" u.s<-gsub(mpattern,NA,u.s) # strings with "-" u.s<- u.s[!is.na(u.s)] for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? rownames(kmer.freq.matrix)<-rownames(ref) if(length(rownames(kmer.freq.matrix))==0) rownames(kmer.freq.matrix)<-names(ref) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix) #cat(kmer.freq.matrix) return(kmer.freq.matrix) #return(out) }# #### optimization = FALSE, in this case, a certain kmer will be used, such as kmer = 7 if (optimization!=TRUE){ #kmer<-7 kmer.Freq.ref.que<-DNAbin2kmerFreqMatrix2(ref,que,kmer=kmer) #class(kmer.Freq.ref.que) #head(kmer.Freq.ref.que) ################################################# ### check model success rate for ref ################################################# set.seed(7) knn1<-knn(kmer.Freq.ref.que$kmer.Freq.ref, kmer.Freq.ref.que$kmer.Freq.ref, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) spe.morph<-as.character(Spp2) spe.Identified<-as.character(knn1) spe.morph.Identified<-data.frame(spe.morph,spe.Identified,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates.ref<-matches[1]/matches[2] names(success.rates.ref)<-NULL ################################################# ### make prediction for que ################################################# #set.seed(7) knn1<-knn(kmer.Freq.ref.que$kmer.Freq.ref, kmer.Freq.ref.que$kmer.Freq.que, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) spe.morph<-as.character(Spp2) spe.Identified.que<-as.character(knn1) #spe.morph.Identified<-data.frame(spe.morph,spe.Identified.que) #matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) #matches<-colSums(matches,dims = 1) #success.rates.que<-matches[1]/matches[2] #names(success.rates.que)<-rownames(que) #if(length(rownames(que))==0) names(success.rates.que)<-names(que) #que<-pineMothITS2 ################################################# ### calculate theta12 for ref ref1<-kmer.Freq.ref.que$kmer.Freq.ref rownames(ref1)<-ref.IDs if(length(rownames(ref1))==0) names(ref1)<-ref.IDs FMF.theta12<-FMFtheta12(ref1) ################################################# ### calculate FMF for que identified que1<-kmer.Freq.ref.que$kmer.Freq.que rownames(que1)<-que.IDs if(length(rownames(que1))==0) names(que1)<-que.IDs FMF.que<-numeric(dim(que1)[1]) for(j in 1:dim(que1)[1]){ #j<-1 #seqInOneSpe<-ref1[grep(spe.Identified[j], FMF.theta12$list.spe, value = FALSE,fixed = TRUE),] if(length(rownames(ref1))==0){ seqInOneSpe<-ref1[grep(spe.Identified.que[j], names(ref1), value = FALSE,fixed = TRUE),] }else{seqInOneSpe<-ref1[grep(spe.Identified.que[j], rownames(ref1), value = FALSE,fixed = TRUE),]} #seqInOneSpe<-ref1[grep(spe.Identified[j], rownames(ref1), value = FALSE,fixed = TRUE),] #class(FMF.theta12$list.spe) k<-match(x=spe.Identified.que[j], table=FMF.theta12$list.spe) #ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) que2PS.dist<-eucl.dist.two.vect(que1[j,],centroid.spe0) #que2PS.dist<-0 #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) xtheta12<-c(que2PS.dist,FMF.theta12$theta1[k],FMF.theta12$theta2[k]) FMF.que[j]<-FMF(xtheta12) } output.identified<-data.frame(queIDs=que.IDs, spe.Identified=spe.Identified.que, FMF=FMF.que,stringsAsFactors=TRUE) out<-list(model.success=success.rates.ref,output_identified=output.identified) #Bayesian.prob=Bayesian.prob) #return(output.identified) # return(out) }else{ ######################################## ### kmer.best ######################################## #set.seed(7) optimize.kmer<-function (ref,max.kmer=max.kmer){ #require(ape) #set.seed(7) NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } }### the end of the function #seqNames<-NAMES(ref3) #ref.IDs<-rownames(ref) ref.IDs<-NAMES(ref) #ref<-del.gaps(ref) #names(ref)<-ref.IDs morph.spe<-gsub(".+,","",ref.IDs) # remove sequence ID before "," Spp2<-as.factor(morph.spe) strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} DNAbin2kmerFreqMatrix<-function(ref,kmer=kmer){ ### return kmer frequency matricies for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) #kmer<-1 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) #seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) #ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? #rownames(kmer.freq.matrix)<-rownames(ref) rownames(kmer.freq.matrix)<-NAMES(ref) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix) #cat(kmer.freq.matrix) return(kmer.freq.matrix) #return(out) }# success.rates<-numeric(max.kmer) for (i in 1:max.kmer){ #set.seed(7) # i<-1 kmer.freq.ref<-DNAbin2kmerFreqMatrix(ref,kmer=i) knn1<-knn(kmer.freq.ref, kmer.freq.ref, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) #knn1<-knn1(kmer.freq.ref, kmer.freq.ref, cl=Spp2) #attributes(.Last.value) #attributes(knn1) #knn1 spe.morph<-as.character(Spp2) spe.Identified<-as.character(knn1) spe.morph.Identified<-data.frame(spe.morph,spe.Identified,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates[i]<-matches[1]/matches[2] #cat("i:",i,"\n") #cat("spe.morph:",spe.morph,"\n") #cat("spe.Identified:",spe.Identified,"\n") #cat("success.rates[i]:",success.rates[i],"\n") } kmer.best<-which.max(success.rates) ### plot start plot(1:max.kmer,success.rates,type="h",main="Success rates of spe identification with different length of kmer (ref)", xlab="k (length of kmer)", ylab="Success rates of spe identification") axis(1,kmer.best, paste("optimum",kmer.best,sep="\n"),col="red",font=2,col.axis="red") points(kmer.best,max(success.rates),pch=16,col="red",cex=1.5) ### plot end success.rates.ref<-max(success.rates) names(success.rates.ref)<-NULL out<-c(kmer.best,success.rates.ref) return(out) #return(kmer.best) } #kmer<-10 #kmer.optimal<-optimal.kmer(ref,max.kmer=kmer) kmer.optimal<-optimize.kmer(ref,max.kmer=kmer) success.rates.ref<-kmer.optimal[2] kmer.optimal<-kmer.optimal[1] ######################################## ### species identification after having got kmer.optimal ######################################## kmer.Freq.ref.que<-DNAbin2kmerFreqMatrix2(ref,que,kmer=kmer.optimal) ################################################# ### check model success rate for ref ################################################# # knn1<-knn(kmer.Freq.ref.que$kmer.Freq.ref, # kmer.Freq.ref.que$kmer.Freq.ref, # cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) # spe.morph<-as.character(Spp2) # spe.Identified<-as.character(knn1) # spe.morph.Identified<-data.frame(spe.morph,spe.Identified) # matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) # matches<-colSums(matches,dims = 1) # success.rates.ref<-matches[1]/matches[2] # names(success.rates.ref)<-NULL ################################################# ### make prediction for que ################################################# #set.seed(7) knn1<-knn(kmer.Freq.ref.que$kmer.Freq.ref, kmer.Freq.ref.que$kmer.Freq.que, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) spe.morph<-as.character(Spp2) spe.Identified.que<-as.character(knn1) #spe.morph.Identified<-data.frame(spe.morph,spe.Identified.que) #matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) #matches<-colSums(matches,dims = 1) #success.rates.que<-matches[1]/matches[2] #names(success.rates.que)<-rownames(que) # if(length(names(success.rates.que))==0) names(success.rates.que)<-names(que) ################################################# ### calculate theta12 for ref ref1<-kmer.Freq.ref.que$kmer.Freq.ref #class(ref1) rownames(ref1)<-ref.IDs if(length(rownames(ref1))==0) names(ref1)<-ref.IDs FMF.theta12<-FMFtheta12(ref1) ################################################# ### calculate FMF for que identified que1<-kmer.Freq.ref.que$kmer.Freq.que ### matrix class(que1) rownames(que1)<-que.IDs if(length(rownames(que1))==0) names(que1)<-que.IDs FMF.que<-numeric(dim(que1)[1]) for(j in 1:dim(que1)[1]){ #j<-1 #seqInOneSpe<-ref1[grep(spe.Identified[j], FMF.theta12$list.spe, value = FALSE,fixed = TRUE),] if(length(rownames(ref1))==0){ seqInOneSpe<-ref1[grep(spe.Identified.que[j], names(ref1), value = FALSE,fixed = TRUE),] }else{seqInOneSpe<-ref1[grep(spe.Identified.que[j], rownames(ref1), value = FALSE,fixed = TRUE),]} #seqInOneSpe<-ref1[grep(spe.Identified[j], rownames(ref1), value = FALSE,fixed = TRUE),] #class(FMF.theta12$list.spe) k<-match(x=spe.Identified.que[j], table=FMF.theta12$list.spe) #ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) ifelse(FMF.theta12$popSize.PS[k]==1,centroid.spe0<-seqInOneSpe,centroid.spe0<-apply(seqInOneSpe,2,mean)) que2PS.dist<-eucl.dist.two.vect(que1[j,],centroid.spe0) #que2PS.dist<-0 #intra.dist<-apply(seqInOneSpe,1,eucl.dist.two.vect,v2=centroid.spe0) xtheta12<-c(que2PS.dist,FMF.theta12$theta1[k],FMF.theta12$theta2[k]) FMF.que[j]<-FMF(xtheta12) } output.identified<-data.frame(queIDs=que.IDs, spe.Identified=spe.Identified.que, FMF=FMF.que,stringsAsFactors=TRUE) out<-list(model.success=success.rates.ref,output_identified=output.identified) #return(output.identified) } class(out) <- c("BarcodingR") return(out) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/barcoding.spe.identify2.R
#' Bp Barcoding Species Identify using Kmer #' #' @description Species identification using BP-based method for both protein-coding #' barcodes, for instance, COI, and non-coding barcodes, such as, ITS, using kmer statistics. #' #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param que object of class "DNAbin", which needs to be inferred. #' @param kmer a numeric indicating the length of kmer used. #' @param UseBuiltModel logic value to indicate whether a built model is used #' or not. #' @param lr parameter for weight decay. Default 5e-5. #' @param maxit maximum number of iterations. Default 1e+6. #' @return a list containing model parameters used, species identification success rates using references, #' query sequences, species inferred, and corresponding confidence levels (bp probability for BP-based method). #' @keywords bbsik #' @export #' @import nnet #' @author Ai-bing ZHANG, Meng-di HAO, Cai-qing YANG, CNU, Beijing, CHINA. zhangab2008 (at) mail. cnu. edu.cn #' @references #' Zhang, A. B., D. S. Sikes, C. Muster, S. Q. Li. (2008). Inferring Species Membership #' using DNA sequences with Back-propagation Neural Networks. Systematic Biology, 57(2):202-215. #' https://academic.oup.com/sysbio/article/57/2/202/1622290 #' #' #' @examples #' data(TibetanMoth) #' ref<-as.DNAbin(as.character(TibetanMoth[1:50,])) #' que<-as.DNAbin(as.character(TibetanMoth[51:60,])) #' out<-bbsik(ref, que, kmer = 1, UseBuiltModel = FALSE) #' out #' out$convergence #' out$success.rates.ref #' #' data(pineMothITS2) #' ref<-pineMothITS2 #' que<-pineMothITS2 #' out<-bbsik(ref, que, kmer = 1, UseBuiltModel = FALSE) #' out #' out$convergence #' out$success.rates.ref #' #' #' bbsik<-function (ref, que, kmer = kmer, UseBuiltModel = FALSE,lr=5e-5, maxit=1e+6) { ### functions used! DNAbin2kmerFreqMatrix2<-function(ref,que,kmer=kmer){ ### return kmer frequency matrices for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) #kmer<-1 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 #length(u.s) for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### do some cleaning by removing IUPAC codes, "-" mpattern<-"-+[a-z]*" u.s<-gsub(mpattern,NA,u.s) # strings with "-" mpattern<-"[rymkswhbvdn]+" u.s<-gsub(mpattern,NA,u.s) # strings with "-" u.s<- u.s[!is.na(u.s)] ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? rownames(kmer.freq.matrix)<-rownames(ref) ### 4. calculate kmer frequency for each sequence in que kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector2)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix2<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? rownames(kmer.freq.matrix2)<-rownames(que) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix, kmer.Freq.que=kmer.freq.matrix2) #cat(kmer.freq.matrix) #return(kmer.freq.matrix) return(out) } char2NumVector<-function(c){ if (class(c)!="character") c<-as.character(c) #stop("invalid input format! input should be character vector!") c<-as.factor(c) level.c<-levels(c) #b<-dim(ecol.sample.list)[1] levels(c)<-seq(1:length(level.c)) c<-as.numeric(c) return(c)} strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} ########## c2s<-function (chars = c("m", "e", "r", "g", "e", "d")) { return(paste(chars, collapse = "")) } ########## NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } if(UseBuiltModel == FALSE){ sampleSpeNames<-attr(ref,"dimnames")[[1]] if(length(sampleSpeNames)==0) sampleSpeNames<-names(ref) queIDs<-attr(que,"dimnames")[[1]] if(length(queIDs)==0) queIDs<-names(que) mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") #Spp #length(unique(Spp)) sampleSpeNames2<-class.ind(Spp) #if (kmer>0.05*len.shortest.seq) # stop("kmer is too large, it will take lots of time to run! a vaule less than 10 is suggested!") out1<-DNAbin2kmerFreqMatrix2(ref,que,kmer=kmer) unique.str.ref<-out1$unique.str # bp<-function(ref1,sampleSpeNames2,que1,unique.str.ref,lr=5e-5, maxit=1e+6){ bp<-function(ref1,sampleSpeNames2,que1,lr=5e-5, maxit=1e+6){ #unique.str.ref<-out1$unique.str ### standard center.ref1<-apply(ref1,MARGIN=2,FUN=mean) sd.ref1<-apply(ref1,MARGIN=2,FUN=sd) ref1<-scale(ref1,scale=F) que1<-scale(que1,center = center.ref1, scale=F) #que1<-scale(que1,center = center.ref1, scale=sd.ref1) ref1_tmp<-ref1[!is.na(ref1)] range1<-1./max(abs(ref1_tmp)) # range1 n.hidden<-ceiling(log2(dim(ref1)[1])) nnet.trained <- nnet(ref1, sampleSpeNames2, size = n.hidden, rang = range1,### 0.5 entropy = FALSE, decay = lr, maxit = maxit,#decay = 5e-5, maxit = 1e+6, abstol = 1.0e-8, reltol = 1.0e-8, MaxNWts = 20000) ### decay = 5e-5 spe.inferred0<-predict(nnet.trained, que1) spe.inferred<-spe.inferred0 #spe.inferred[spe.inferred>=0.95]<-1 #spe.inferred[spe.inferred<0.95]<-0 #colnames(spe.inferred) #which.max(spe.inferred[1,]) inferred<-apply(spe.inferred,1,FUN=which.max) inferred.prob<-apply(spe.inferred,1,FUN=max) # length(inferred) #inferred #colnames(spe.inferred)[inferred] ############################################################ ####### calculate model success rate using ref start... ########################################################### spe.inferred0.ref<-predict(nnet.trained, ref1) spe.inferred.ref<-spe.inferred0.ref inferred.ref<-apply(spe.inferred.ref,1,FUN=which.max) inferred.prob.ref<-apply(spe.inferred.ref,1,FUN=max) # length(inferred) #inferred p.ref<-colnames(spe.inferred.ref)[inferred.ref] #p.ref[1]<-"abz" spe.morph.Identified<-data.frame(Spp,p.ref,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates.ref<-matches[1]/matches[2] names(success.rates.ref)<-NULL ############################################################ ####### calculate model success rate using ref the end. ########################################################### output.identified<-data.frame(queIDs=queIDs, spe.Identified=colnames(spe.inferred)[inferred], bp.prob=inferred.prob,stringsAsFactors=TRUE) #output.identified rownames(output.identified)<-NULL out.bp<-list(summary.model=summary(nnet.trained),###convergence=nnet.trained$convergence, convergence=nnet.trained$convergence, success.rates.ref=success.rates.ref, kme.ref=kmer, output_identified=output.identified, nnet.trained=nnet.trained, center.ref1=center.ref1 ) #current.wd<-getwd() #setwd return(out.bp) #return(output.identified) } output2.identified<-bp(ref1=out1$kmer.Freq.ref, sampleSpeNames2, que1=out1$kmer.Freq.que, #unique.str.ref=out1$unique.str, lr=5e-5, maxit=1e+6) #fileName<-"bbsik_tmp" #fileName<-paste("simulation",i,sep = "") #fileName<-paste(fileName,".RData",sep = "") #Rhome<-R.home() ### 2020/4/13 21:50:29 Rhome<-tempdir() ### 2020/4/14 21:31:02 fileName<-"bbsik_tmp" fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:50:34 fileName fileName<-paste(fileName,".RData",sep = "") fileName save(output2.identified, #nnet.trained, # out.bp, # success.rates.ref, # kmer, unique.str.ref, # center.ref1, file = fileName) ## #save(x, y, file = "xy.RData") }else{ ##################### ### 2020/4/13 21:57:42 #Rhome<-R.home() ### 2020/4/13 21:57:47 Rhome<-tempdir() ### 2020/4/14 21:31:02 fileName<-"bbsik_tmp" fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:06:46 fileName fileName<-paste(fileName,".RData",sep = "") fileName ############################ 2020/4/13 21:57:50 #if(!file.exists("bbsik_tmp.RData")) if(!file.exists(fileName)) ### 2020/4/13 21:52:14 stop("file bbsik_tmp.RData not found in R.home directory! you need to rebuild the model! ") ### 4. calculate kmer frequency for each sequence in que #load("bbsik_tmp.RData") load(fileName) ####2020/4/13 21:58:54 center.ref1<-output2.identified$center.ref1 nnet.trained<-output2.identified$nnet.trained success.rates.ref<-output2.identified$success.rates.ref queIDs<-attr(que,"dimnames")[[1]] if(length(queIDs)==0) queIDs<-names(que) #u.s<-unique.str.ref u.s<-unique.str.ref seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector2)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector2[k]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix2<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) rownames(kmer.freq.matrix2)<-rownames(que) if(length(rownames(kmer.freq.matrix2))==0) rownames(kmer.freq.matrix2)<-names(que) que1<-scale(kmer.freq.matrix2,center = center.ref1, scale=F) spe.inferred0<-predict(nnet.trained, que1) spe.inferred<-spe.inferred0 inferred<-apply(spe.inferred,1,FUN=which.max) inferred.prob<-apply(spe.inferred,1,FUN=max) output.identified<-data.frame(queIDs=queIDs, spe.Identified=colnames(spe.inferred)[inferred], bp.prob=inferred.prob,stringsAsFactors=TRUE) rownames(output.identified)<-NULL output2.identified<-list(summary.model=summary(nnet.trained), convergence=nnet.trained$convergence, success.rates.ref=success.rates.ref, kme.ref=kmer, output_identified=output.identified) } class(output2.identified) <- c("BarcodingR") return(output2.identified) } ### the end of the function!
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/bbsik.R
#' Character to Integer Vector #' #' @description Conversion from a character vector to an integer vector. #' #' @param c character vector. #' @return an integer vector. #' @keywords char2NumVector #' @export #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. zhangab2008 (at) mail. cnu. edu.cn. #' @references #' zhangab2008 (at) mail. cnu. edu. cn. #' #' @examples #' c<-c("a","a","b") #' num<-char2NumVector(c) #' num #' char2NumVector<-function(c){ if (class(c)!="character") c<-as.character(c) #stop("invalid input format! input should be character vector!") c<-as.factor(c) level.c<-levels(c) #b<-dim(ecol.sample.list)[1] levels(c)<-seq(1:length(level.c)) c<-as.numeric(c) return(c)}
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/char2NumVector.R
#' Comparision between two Delimitations #' #' @description Comparision between two delimitations of a group of samples, for instance, #' traditionally morphological delimitation and molecular delimitation (MOTU). #' #' @param deli1 a character array (vector),containing a set of, for example, morphological identification (species names), to compare with #' @param deli2 a character array (vector),containing a set of, molecular delimitation (MOTU). #' @return a list containing the adjusted Rand index comparing the two partitions (a scalar). This index has zero expected value in the case #' of random partition, and it is bounded above by 1 in the case of perfect agreement between two partitions; the numbers of matches, #' splits,merges, and corresponding percentage. #' @keywords compare2delimitations #' @export #' #' @note This is for the same set of samples with two partitions/delimitations. #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references #' L. Hubert and P. Arabie (1985) Comparing Partitions, Journal of the Classification 2:193-218. #' @examples #' #' #' #' #' #' deli1<-c(1,1,1,1,1,1) #' deli2<-c(1,1,2,1,1,3) #' out<-compare2delimitations(deli1,deli2) #' out compare2delimitations<-function(deli1,deli2){ char2NumVector<-function(c){ if (class(c)!="character") c<-as.character(c) #stop("invalid input format! input should be character vector!") c<-as.factor(c) level.c<-levels(c) #b<-dim(ecol.sample.list)[1] levels(c)<-seq(1:length(level.c)) c<-as.numeric(c) return(c)} ########## adjustedRandIndex<-function (x, y){ x <- as.vector(x) y <- as.vector(y) if (length(x) != length(y)) stop("arguments must be vectors of the same length") tab <- table(x, y) if (all(dim(tab) == c(1, 1))) return(1) a <- sum(choose(tab, 2)) b <- sum(choose(rowSums(tab), 2)) - a c <- sum(choose(colSums(tab), 2)) - a d <- choose(sum(tab), 2) - a - b - c ARI <- (a - (a + b) * (a + c)/(a + b + c + d))/((a + b + a + c)/2 - (a + b) * (a + c)/(a + b + c + d)) return(ARI) } ########## #U<-deli1 ### morphological classification, being compared to #V<-deli2 if(length(deli1)!=length(deli2)) stop("deli1 and deli2 shoud be same in length!") twoPartitions<-cbind(deli1,deli2) no<-1:length(deli1) rownames(twoPartitions)<-paste("sample",no,sep="") U<-twoPartitions[,1] ### morphological classification, being compared to V<-twoPartitions[,2] no.partion.U<-length(levels(factor(U))) no.partion.V<-length(levels(factor(V))) ### to generate contingency table of U and V ### ContingencyTableUV:ContableUV k=1 ### the total number of elements in ContableUV #rm (no.in.common) #no.in.common <- c(0) ### initilize the vector!?? no.in.common <- NULL ### initilize the vector! for (j in 1: no.partion.V){ for (i in 1:no.partion.U){ sub.U<-U[U==levels(factor(U))[i]] names.sub.U<-names(sub.U) sub.V<-V[V==levels(factor(V))[j]] names.sub.V<-names(sub.V) no.in.common[k]<-length(intersect(names.sub.U,names.sub.V)) #ContableUV[i,j]=length(intersect(names.sub.U,names.sub.V)) #no.in.common[k] k=k+1 } } ContableUV<-array(no.in.common,c(no.partion.U,no.partion.V)) #ContableUV<-array(no.in.common,c(no.partion.V,no.partion.U)) #ContableUV ### to count the number and frequency of MATCH, SPLIT, MERGE, MIXTURE(?) of V (e.g. MOTUS) ### referred to U (morpho-partition) MATCH<-0 SPLIT<-0 indexTrue<-0 for (j in 1:no.partion.U) { #j<-3 cat("j=",j) cat("\n") #x<-ContableUV[,j] x<-ContableUV[j,] if (length(x[x>0])==1&&length(x)!=1) { a<-x>0 for (k in 1:length(a)) { #k<-3 #ifelse(a[k]==TRUE,indexTrue<-k,break) if(a[k]==TRUE) indexTrue<-k } x2<-ContableUV[,indexTrue] if (length(x2[x2>0])==1) MATCH<-MATCH+1 #else SPLIT<-SPLIT+1 } else SPLIT<-SPLIT+1 } #freq.MATCH=MATCH/no.partion.V #freq.SPLIT=SPLIT/no.partion.V freq.MATCH=MATCH/no.partion.U freq.SPLIT=SPLIT/no.partion.U ### to count MERGE MERGE<-0 for (i in 1:no.partion.V) { #x<-ContableUV[i,] x<-ContableUV[,i] #if (length(x[x>0])==1) MATCH<-MATCH else MERGE<-MERGE+1 if (length(x[x>0])>1) MERGE<-MERGE+1 } freq.MERGE=MERGE/no.partion.V # freq.mixture<-1-freq.MATCH-freq.SPLIT-freq.MERGE ### to calculate ARI:The adjusted Rand index comparing the two partitions. deli1<-char2NumVector(deli1) deli2<-char2NumVector(deli2) ARI<-adjustedRandIndex(deli1, deli2) #OUT<-list(Match=MATCH,Split=SPLIT,Merge=MERGE,Freq.match=freq.MATCH,Freq.split=freq.SPLIT,Freq.merge=freq.MERGE,Freq.mixture=freq.mixture,ContableUV=ContableUV) OUT<-list(adjustedRandIndex=ARI,Match=MATCH,Split=SPLIT,Merge=MERGE,Freq.match=freq.MATCH,Freq.split=freq.SPLIT,Freq.merge=freq.MERGE,ContableUV=ContableUV) return(OUT) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/compare2delimitations.R
#' Consensus Identification #' #' @description Make consensus for identifications from two or more methods, usually for #' a set of query sequences. #' @param identifiedBy2orMore an object of class "data.frame", containing (queIDs, as rownames), identifiedByMethod1,identifiedByMethod2,and so on. #' @return a data frame with consensus.identification, and corresponding votes. #' @keywords consensus.identify #' @export #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA, contact at zhangab2008(at)mail.cnu.edu.cn #' @note Suitable for case where a set of queries were identified by more than two methods. #' @examples #' #' queIDs<-c("q1","q2","q3") #' #' bp<-c("sp1","sp1","sp1") #' bpk<-c("sp1","sp1","sp2") #' bayes<-c("sp2","sp1","sp3") #' fuzzyID<-c("sp1","sp1","sp2") #' identifiedBy2orMore<-data.frame(bp=bp,bpk=bpk,bayes=bayes,fuzzyID=fuzzyID) #' rownames(identifiedBy2orMore)<-queIDs<-c("q1","q2","q3") #' ccs<-consensus.identify(identifiedBy2orMore) consensus.identify<-function (identifiedBy2orMore){ concensus<-function(ids){ ### a character string indicating different identification for ### by different methods ids<-as.factor(ids) t.ids<-table(ids) ifelse(max(t.ids)!=1, ccs<-levels(ids)[as.numeric(which.max(t.ids))], ccs<-"ambigous identification" ) names(ccs)<-max(t.ids) return(ccs) } ### the end of concensus concensus2<-function(ids){ ### a character string indicating different identification for ### by different methods ids<-as.factor(ids) t.ids<-table(ids) #ifelse(max(t.ids)!=1, # ccs<-levels(ids)[as.numeric(which.max(t.ids))], # ccs<-"ambigous identification" # ) #names(ccs)<-max(t.ids) return(max(t.ids)) } ccs<-apply(identifiedBy2orMore,MARGIN=1,FUN=concensus) ccs2<-apply(identifiedBy2orMore,MARGIN=1,FUN=concensus2) #ccs<-data.frame(queIDs=rownames(identifiedBy2orMore),concensus.id=ccs,votes=ccs2) ccs<-data.frame(concensus.id=ccs,votes=ccs2,stringsAsFactors=TRUE) return(ccs) } ### the end of the function ### how to call the function: ### ccs<-concensus.identify(identifiedBy2orMore) ### ccs
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/consensus.identify.R
#' Digitize DNAbin #' #' @description Digitize an object of DNAbin. #' @param seqs an object of DNAbin. #' @return a numeric matrix of DNA sequences digitized. #' @keywords digitize.DNA #' @export #' #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references zhangab2008(at)mail.cnu.edu.cn #' @examples #' #' data(TibetanMoth) #' digitized.DNA<-digitize.DNA(seqs=TibetanMoth) #' digitized.DNA #' #' ###################################################### ########### function: digitalize.DNA start ############ ###################################################### digitize.DNA<-function(seqs){ locus<-toupper(as.character(seqs)) digitized.DNA<-locus digitized.DNA[digitized.DNA=="A"]<-1 digitized.DNA[digitized.DNA=="T"]<-2 digitized.DNA[digitized.DNA=="G"]<-3 digitized.DNA[digitized.DNA=="C"]<-4 digitized.DNA[digitized.DNA=="-"]<-5 digitized.DNA[digitized.DNA=="N"]<-6 digitized.DNA[digitized.DNA=="R"]<-0 digitized.DNA[digitized.DNA=="Y"]<-0 digitized.DNA[digitized.DNA=="M"]<-0 digitized.DNA[digitized.DNA=="K"]<-0 digitized.DNA[digitized.DNA=="S"]<-0 digitized.DNA[digitized.DNA=="W"]<-0 digitized.DNA[digitized.DNA=="H"]<-0 digitized.DNA[digitized.DNA=="B"]<-0 digitized.DNA[digitized.DNA=="V"]<-0 digitized.DNA[digitized.DNA=="D"]<-0 digitized.DNA<-as.numeric(digitized.DNA) #digitized.DNA<-as.matrix(digitized.DNA) digitized.DNA2<-array(digitized.DNA,dim=dim(seqs)) dim(digitized.DNA2) return(digitized.DNA2) } ###################################################### ########### function: digitalize.DNA end ############ ######################################################
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/digitize.DNA.R
#' Optimize kmer Length #' #' @description Optimize kmer length by trying kmers which length is in the range from 1 to max.kmer. #' The optimal kmer will have maximal species identification success rate. #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param max.kmer a numeric to indicate the length of maximal kmer. #' #' @return a numeric indicating the optimal kmer in the range examined. #' @keywords optimize.kmer #' @export #' @author Ai-bing ZHANG, Cai-qing YANG, Meng-di HAO, CNU, Beijing, CHINA. #' @references zhangab2008 (at) mail. cnu. edu. cn/zhangab2008 (at) gmail.com. #' @examples #' #' data(TibetanMoth) #' ref<-TibetanMoth[1:10,] #' optimial.kmer<-optimize.kmer(ref,max.kmer=5) #' #' #' optimize.kmer<-function (ref,max.kmer=max.kmer){ #require(ape) set.seed(7) NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } }### the end of the function ref.IDs<-NAMES(ref) #ref<-del.gaps(ref) #names(ref)<-ref.IDs morph.spe<-gsub(".+,","",ref.IDs) # remove sequence ID before "," Spp2<-as.factor(morph.spe) strings.equal<-function(str1,str2){ifelse(str1==str2,1,0)} DNAbin2kmerFreqMatrix<-function(ref,kmer=kmer){ ### return kmer frequency matricies for both ref and que sequences, but only based on kmers found in ref!!! ### new kmers in que will be ignored #require(seqinr) ########## c2s<-function (chars = c("m", "e", "r", "g", "e", "d")) { return(paste(chars, collapse = "")) } ########## #kmer<-1 ### 1. check the format of input arguments if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") if (class(kmer)!="integer") kmer<-as.integer(kmer) ### 2. seek unique.kmer.vector for all seqs: u.s seqs.as.char<-as.character(ref) #seqs.as.char2<-as.character(que) ifelse(is.vector(seqs.as.char),seqs.as.str.vector<-lapply(seqs.as.char,FUN=c2s),seqs.as.str.vector<-apply(seqs.as.char, MARGIN=1,FUN=c2s)) #ifelse(is.vector(seqs.as.char2),seqs.as.str.vector2<-lapply(seqs.as.char2,FUN=c2s),seqs.as.str.vector2<-apply(seqs.as.char2, MARGIN=1,FUN=c2s)) seqs.unique.as.str.vector<-unique(seqs.as.str.vector) s<- seqs.unique.as.str.vector u.s0<-unique(substring(s, 1, kmer)) ### check along the column first! u.s<-u.s0 for (i in 2:(max(nchar(s))-kmer+1)){ # i<-2 #u.s<-u.s0 ### unique str u.s<-c(u.s,unique(substring(s, i, kmer+i-1))) u.s<-unique(u.s) n.char<-nchar(u.s) size.kmer.exact<-n.char==kmer ### logic to remove short kmer! u.s<-subset(u.s,size.kmer.exact) } ### the end of for-loop ### 3. calculate kmer frequency for each sequence in ref #b<-gregexpr(u.s[1],seqs.as.str.vector[1]) kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ #kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]]) ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- kmer.freq.one.seq2 for (k in 2:length(seqs.as.str.vector)){ # k<-2 kmer.freq.one.seq<-sapply(u.s,gregexpr,seqs.as.str.vector[k]) #b<-lapply(u.s,gregexpr,seqs.as.str.vector[1]) kmer.freq.one.seq2<-numeric(length(kmer.freq.one.seq)) for (i in 1:length(kmer.freq.one.seq)){ ifelse(kmer.freq.one.seq[[i]]==-1,kmer.freq.one.seq2[i]<-0,kmer.freq.one.seq2[i]<-length(kmer.freq.one.seq[[i]])) } kmer.freq.one.seq3<- c(kmer.freq.one.seq3,kmer.freq.one.seq2) } ### end of k-for-loop kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq),length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq)))) #kmer.freq.matrix<-t(array(kmer.freq.one.seq3,dim=c(length(kmer.freq.one.seq3)%/%length(kmer.freq.one.seq),length(kmer.freq.one.seq)))) ### error!? #rownames(kmer.freq.matrix)<-rownames(ref) rownames(kmer.freq.matrix)<-NAMES(ref) out<-list(unique.str=u.s,kmer.Freq.ref=kmer.freq.matrix) #cat(kmer.freq.matrix) return(kmer.freq.matrix) #return(out) }# success.rates<-numeric(max.kmer) for (i in 1:max.kmer){ # i<-1 kmer.freq.ref<-DNAbin2kmerFreqMatrix(ref,kmer=i) knn1<-knn(kmer.freq.ref, kmer.freq.ref, cl=Spp2, k = 1, l = 0, prob = FALSE, use.all = TRUE) #knn1<-knn1(kmer.freq.ref, kmer.freq.ref, cl=Spp2) #attributes(.Last.value) #attributes(knn1) #knn1 spe.morph<-as.character(Spp2) spe.Identified<-as.character(knn1) spe.morph.Identified<-data.frame(spe.morph,spe.Identified,stringsAsFactors=TRUE) matches<-apply(spe.morph.Identified,2,strings.equal,str2=spe.morph.Identified[,2]) matches<-colSums(matches,dims = 1) success.rates[i]<-matches[1]/matches[2] #cat("i:",i,"\n") #cat("spe.morph:",spe.morph,"\n") #cat("spe.Identified:",spe.Identified,"\n") #cat("success.rates[i]:",success.rates[i],"\n") } kmer.best<-which.max(success.rates) ### plot start plot(1:max.kmer,success.rates,type="h",main="Success rates of spe identification with different length of kmer (ref)", xlab="k (length of kmer)", ylab="Success rates of spe identification") axis(1,kmer.best, paste("optimum",kmer.best,sep="\n"),col="red",font=2,col.axis="red") points(kmer.best,max(success.rates),pch=16,col="red",cex=1.5) ### plot end success.rates.ref<-max(success.rates) names(success.rates.ref)<-NULL out<-c(kmer.best,success.rates.ref) return(out) #return(kmer.best) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/optimize.kmer.R
#' Sample Random Datasets from References (DNAbin) #' #' @description Randomly sample reference data at different levels of taxon. #' @param ref Object of class "DNAbin" used as a reference dataset, which contains taxon information. #' @param sample.porp a numeric value between 0 and 1, indicating proportion of samples to draw #' at each level of taxon. #' @param sample.level a character string choosing from c("full","family","genus","species"). #' @return a list containing the selected samples and the samples left, in DNAbin format stored in a matrix or a list. #' @keywords sample.ref #' @export #' @note the ref must contain information on taxonomy, in format like, ">LS0909030M,Noctuidae_Himalaea_unica", #' i.e., "seqID,family_genus_species", or ">LS0909030M,Himalaea_unica"; in case there is only one sample/individual #' for a taxon level, this sample will be retained in ref.selected. #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references zhangab2008(at)mail.cnu.edu.cn; #' @examples #' #' data(TibetanMoth) #' data(pineMothITS2) #' ref<-TibetanMoth #' ref2<-pineMothITS2 #' out<-sample.ref(ref,sample.porp=0.5,sample.level="full") #' out #' out2<-sample.ref(ref2,sample.porp=0.5,sample.level="full") #' out2 #' #' sample.ref<-function(ref,sample.porp=0.5,sample.level="full"){ ###sample.level<-c("full","family","genus","species") ### functions used: NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } taxonInfoExtraction<-function(seqLables,returnValue="id"){ if(class(seqLables)!="character") stop("seqLables is not character!") id<-strsplit(seqLables, ",")[[1]][1] taxon<-strsplit(seqLables, ",")[[1]][2] taxa<-strsplit(taxon, "_") taxa<-taxa[[1]] if(length(taxa)==3){ ### with family information, "LS0909030M,Noctuidae_Himalaea_unica" family<-taxa[1] genus<-paste(taxa[1],taxa[2],sep="_") species<-paste(taxa[2],taxa[3],sep="_") #ti<-c(id,family,genus,species) }else{ ### no family information, "LS0909030M,Himalaea_unica" family<-"NA" genus<-taxa[1] #genus<-paste(taxa[1],taxa[2],sep="_") species<-paste(taxa[1],taxa[2],sep="_") #ti<-c(id,genus,species) } #ti<-data.frame(IDs=id,families=family,genera=genus,species=species) #ti<-c(id,family,genus,species) #return(ti) #family<-as.character(family) if(returnValue=="id") return(id) if(returnValue=="family") return(family) if(returnValue=="genus") return(genus) if(returnValue=="species") return(species) } if (class(ref)!="DNAbin") stop("seqs should be in DNAbin format!") ### 1. basic statistics for taxon information sampleSpeNames<-NAMES(ref) id<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="id") family<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="family") ### just return a list! family<-as.character(family) genus<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="genus") genus<-as.character(genus) species<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="species") species<-as.character(species) IDs<-id no.samples<-length(IDs) family.list<-unique(family) no.family<-length(unique(family)) genera.list<-unique(genus) no.genera<-length(unique(genus)) species.list<-unique(species) no.species<-length(unique(species)) basic.stat<-c(no.samples,no.family,no.genera,no.species) names(basic.stat)<-c("no.samples","no.family","no.genera","no.species") taxonStat<-list(basic.stat=basic.stat, family.list=family.list, genera.list=genera.list, species.list=species.list) ### 2. make random samples specified by the parameter "sample.level" ### 2.1 case "full" ref.selected<-NULL ref.left<-NULL if(sample.level=="full"){ if(mode(ref)=="raw"){### case COI sampled<-sample(dim(ref)[1], size=dim(ref)[1]*sample.porp) ref.selected<-ref[sampled,] ref.left<-ref[-sampled,] #ref.selected<-ref[sample(dim(ref)[1], size=dim(ref)[1]*sample.porp),] #ref.left<-ref[-sample(dim(ref)[1], size=dim(ref)[1]*sample.porp),] }else{ ### mode(ref)=="list" case ITS #ref<-ref2 sampled<-sample(length(ref), size=length(ref)*sample.porp) ref.selected<-ref[sampled] ref.left<-ref[-sampled] #ref.selected<-ref[sample(length(ref), size=length(ref)*sample.porp)] #ref.left<-ref[-sample(length(ref), size=length(ref)*sample.porp)] #names(ref) } } ### 2.2 case "family" if(sample.level=="family"){ if(family[1]=="NA"){ ### LS0909030M,Himalaea_unica ## no family information! stop("Your data have no family information!!!") }else{ sampleSpeNames<-NAMES(ref) mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") ### loop to deal with all families: ### initinize ref.selected and ref.left using the first loop ref.selected<-NULL ref.left<-NULL if(mode(ref)=="raw"){### case COI taxon.name<-paste(taxonStat$family.list[1], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] if(dim(each.taxon)[1]>1){ sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) ref.selected<-each.taxon[sampled,] ref.left<-each.taxon[-sampled,] }else{ ref.selected<-each.taxon } #ref.selected<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] if(length(taxonStat$family.list)>1){ for(i in 2:length(taxonStat$family.list)){ ### i<-2 cat("i:",i,"\n") ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each family into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$family.list[i], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$family.list[i], Spp, value = FALSE,fixed = TRUE),] #cat("no.sample.each taxon:",dim(each.taxon)[1],"\n") if(dim(each.taxon)[1]>1){ sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) ref.selected.tmp<-each.taxon[sampled,] ref.left.tmp<-each.taxon[-sampled,] #cat("no.sample.ref.selected.tmp:",dim(ref.selected.tmp)[1],"\n") #cat("no.sample.ref.left.tmp:",dim(ref.left.tmp)[1],"\n") #ref.selected.tmp<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left.tmp<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-rbind(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp } }else{ ref.selected.tmp<-each.taxon ref.selected<-rbind(ref.selected,ref.selected.tmp) # cat("just one sample!\n") } }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop }else{ ### mode(ref)=="list" case ITS taxon.name<-paste(taxonStat$family.list[1], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected<-each.taxon[sampled] ref.left<-each.taxon[-sampled] }else{ ref.selected<-each.taxon } #ref.selected<-each.taxon[sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.left<-each.taxon[-sample(length(each.taxon), size=length(each.taxon)*sample.porp)] if(length(taxonStat$family.list)>1){ for(i in 2:length(taxonStat$family.list)){ ### i<-2 ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each family into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$family.list[i], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$family.list[i], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected.tmp<-each.taxon[sampled] ref.left.tmp<-each.taxon[-sampled] #ref.selected.tmp<-each.taxon[sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.left.tmp<-each.taxon[-sample(length(each.taxon), size=length(each.taxon)*sample.porp)] ref.selected<-c(ref.selected,ref.selected.tmp) #ref.left<-c(ref.left,ref.left.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-c(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp} }else{ ref.selected.tmp<-each.taxon ref.selected<-c(ref.selected,ref.selected.tmp) } #ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop } } } ### 2.3 case "genus" if(sample.level=="genus"){ #taxonStat$family.list[1] #class(taxonStat$family.list) #attributes(taxonStat) #taxonStat$genera.list[1] sampleSpeNames<-NAMES(ref) mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") ### loop to deal with all genera: ### initinize ref.selected and ref.left using the first loop ref.selected<-NULL ref.left<-NULL if(mode(ref)=="raw"){### case COI #each.taxon<-ref[grep(taxonStat$genera.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$genera.list[1], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] if(dim(each.taxon)[1]>1){ sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) ref.selected<-each.taxon[sampled,] ref.left<-each.taxon[-sampled,] }else{ ref.selected<-each.taxon } #ref.selected<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] if(length(taxonStat$genera.list)>1){ for(i in 2:length(taxonStat$genera.list)){ ### i<-44 cat("i:",i,"\n") ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each family into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$genera.list[i], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$genera.list[i], Spp, value = FALSE,fixed = TRUE),] if(dim(each.taxon)[1]>1){ sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) #ref.selected.tmp<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left.tmp<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] ref.selected.tmp<-each.taxon[sampled,] ref.left.tmp<-each.taxon[-sampled,] ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-rbind(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp } }else{ ref.selected.tmp<-each.taxon ref.selected<-rbind(ref.selected,ref.selected.tmp) } #ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop }else{ ### mode(ref)=="list" case ITS #taxon.name<-paste(taxonStat$genera.list[i], "_", sep = "") taxon.name<-paste(taxonStat$genera.list[1], "_", sep = "") #each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE)] #each.taxon<-ref[grep(taxonStat$genera.list[1], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected<-each.taxon[sampled] ref.left<-each.taxon[-sampled] }else{ ref.selected<-each.taxon } #ref.selected<-each.taxon[sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.left<-each.taxon[-sample(length(each.taxon), size=length(each.taxon)*sample.porp)] if(length(taxonStat$genera.list)>1){ for(i in 2:length(taxonStat$genera.list)){ ### i<-2 ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each family into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$genera.list[i], "_", sep = "") #each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE),] each.taxon<-ref[grep(taxon.name, Spp, value = FALSE,fixed = TRUE)] #each.taxon<-ref[grep(taxonStat$genera.list[i], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected.tmp<-each.taxon[sampled] ref.left.tmp<-each.taxon[-sampled] ref.selected<-c(ref.selected,ref.selected.tmp) #ref.left<-c(ref.left,ref.left.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-c(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp} }else{ ref.selected.tmp<-each.taxon ref.selected<-c(ref.selected,ref.selected.tmp) } #ref.selected.tmp<-each.taxon[sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.left.tmp<-each.taxon[-sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop } } #} ### 2.4 case "species" if(sample.level=="species"){ sampleSpeNames<-NAMES(ref) mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") Spp_tmp<-paste(Spp,"_",sep = "") ### loop to deal with all species: ### initinize ref.selected and ref.left using the first loop ref.selected<-NULL ref.left<-NULL if(mode(ref)=="raw"){### case COI taxon.name<-paste(taxonStat$species.list[1], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$species.list[1], Spp, value = FALSE,fixed = TRUE),] #cat("no.sample.each taxon:",dim(each.taxon)[1],"\n") if(dim(each.taxon)[1]>1){ sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) ref.selected<-each.taxon[sampled,] ref.left<-each.taxon[-sampled,] #cat("no.sample.ref.selected.tmp:",dim(ref.selected.tmp)[1],"\n") #cat("no.sample.ref.left.tmp:",dim(ref.left.tmp)[1],"\n") }else{ ref.selected<-each.taxon cat("just one sample!\n") } #ref.selected<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] if(length(taxonStat$species.list)>1){ for(i in 2:length(taxonStat$species.list)){ ### i<-3 cat("i:",i,"\n") ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each family into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$species.list[i], "_", sep = "") each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE),] #each.taxon<-ref[grep(taxonStat$species.list[i], Spp, value = FALSE,fixed = TRUE),] #cat("no.sample.each taxon:",dim(each.taxon)[1],"\n") if(dim(each.taxon)[1]>1){ #dim(each.taxon) sampled<-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp) ref.selected.tmp<-each.taxon[sampled,] ref.left.tmp<-each.taxon[-sampled,] #cat("no.sample.ref.selected.tmp:",dim(ref.selected.tmp)[1],"\n") #cat("no.sample.ref.left.tmp:",dim(ref.left.tmp)[1],"\n") #ref.selected.tmp<-each.taxon[sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #ref.left.tmp<-each.taxon[-sample(dim(each.taxon)[1], size=dim(each.taxon)[1]*sample.porp),] #class() ref.selected<-rbind(ref.selected,ref.selected.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-rbind(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp } }else{ ref.selected.tmp<-each.taxon ref.selected<-rbind(ref.selected,ref.selected.tmp) #cat("just one sample!\n") } #cat("ref.selected:",dim(ref.selected)[1],"\n") #cat("ref.left:",dim(ref.left)[1],"\n") }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop }else{ ### mode(ref)=="list" case ITS taxon.name<-paste(taxonStat$species.list[1], "_", sep = "") #each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE),] each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE)] #each.taxon<-ref[grep(taxonStat$species.list[1], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected<-each.taxon[sampled] ref.left<-each.taxon[-sampled] }else{ ref.selected<-each.taxon } #ref.selected<-each.taxon[sample(length(each.taxon), size=length(each.taxon)*sample.porp)] #ref.left<-each.taxon[-sample(length(each.taxon), size=length(each.taxon)*sample.porp)] if(length(taxonStat$species.list)>1){ for(i in 2:length(taxonStat$species.list)){ ### i<-2 ### 2.2.1 extract all sequences for each family, and choose specified proportion of that ### each species into ref.selected, the left into ref.left #seqInOneSpe<-ref[grep(taxonStat$family.list[1], Spp, value = FALSE,fixed = TRUE),] taxon.name<-paste(taxonStat$species.list[i], "_", sep = "") #each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE),] each.taxon<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE)] #each.taxon<-ref[grep(taxonStat$species.list[i], Spp, value = FALSE,fixed = TRUE)] if(length(each.taxon)>1){ sampled<-sample(length(each.taxon), size=length(each.taxon)*sample.porp) ref.selected.tmp<-each.taxon[sampled] ref.left.tmp<-each.taxon[-sampled] #ref.selected<-list(ref.selected,ref.selected.tmp) #ref.left<-list(ref.left,ref.left.tmp) #ref.selected<-rbind(ref.selected,ref.selected.tmp) #ref.left<-rbind(ref.left,ref.left.tmp) ref.selected<-c(ref.selected,ref.selected.tmp) #ref.left<-c(ref.left,ref.left.tmp) if(length(ref.left)>0&length(ref.left.tmp)>0){ ref.left<-c(ref.left,ref.left.tmp)}else{ref.left<-ref.left.tmp} }else{ ref.selected.tmp<-each.taxon ref.selected<-c(ref.selected,ref.selected.tmp) } }### the end of i for-loop }### the end of if-length(taxonStat$family.list) loop } } #} out<-list(ref.selected=ref.selected,ref.left=ref.left) return(out) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/sample.ref.R
#' Save Identifications #' #' @description Output identified results to an outfile in temporty directory (found by tempdir() function). #' #' @param outfile character string to indicate outfile name. #' @param ids object of class "BarcodingR", which contains identified taxon information. #' #' @return no value returned,but an output file. #' @keywords save.ids #' @export #' @author Ai-bing ZHANG, PhD. CNU, Beijing, CHINA. #' @references zhangab2008(at)mail.cnu.edu.cn #' @seealso barcoding.spe.identify() #' @examples #' #' #' data(TibetanMoth) #' ref<-as.DNAbin(as.character(TibetanMoth[1:50,])) #' que<-as.DNAbin(as.character(TibetanMoth[50:60,])) #' bsi<-barcoding.spe.identify(ref, que, method = "fuzzyId") #' bsi #' save.ids(outfile="identified.txt",bsi) save.ids<-function(outfile="identified.txt",ids){ if(class(ids)!="BarcodingR") stop("A BarcodingR object is required for ids!!!") #attributes(bsi) Rhome<-tempdir() ### 2020/4/13 21:06:46 fileName<-outfile fileName<-paste(Rhome,fileName,sep = "/")### 2020/4/13 21:06:46 #fileName<-paste(Rhome,fileName,sep = "\\")### 2020/4/13 21:06:46 fileName #fileName<-paste("simulation",i,sep = "") #fileName<-paste(fileName,".RData",sep = "") #fileName outfile<-fileName x<-ids$output_identified #x<-bsi$output_identified write.table(x, file = outfile, append = FALSE, quote = FALSE, sep = " ", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = TRUE, qmethod = c("escape", "double"), fileEncoding = "") } #save.ids(outfile="out.txt",bsi)
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/save.ids.R
#' Summarize Reference Data #' #' @description Summarize taxon information, sequence statistics,barcodes numbers per species for reference dataset. #' #' @param ref object of class "DNAbin" used as a reference dataset, which contains taxon information, or just an array #' containing taxon information only. #' @param taxonStat logic value to indicate whether the item is calculated only when ref is an object of class "DNAbin". #' @param seqStat logic value to indicate whether the item is calculated #' @param barcodeStat logic value to indicate whether the item is calculated #' @return a list containing taxon statistics, sequence statistics, population parameters,barcoding statistics () #' @keywords summarize ref #' @export #' @author Ai-bing ZHANG, Meng-di HAO, CNU, Beijing, CHINA. #' @references zhangab2008(at)mail.cnu.edu.cn./zhangab2008(at)gmail.com. #' @examples #' #' data(TibetanMoth) #' s.r<-summarize.ref(TibetanMoth,taxonStat=TRUE,seqStat=TRUE,barcodeStat=TRUE) #' s.r summarize.ref<-function(ref,taxonStat=TRUE,seqStat=TRUE,barcodeStat=TRUE){ NAMES<-function(seqs){ if(mode(seqs)=="raw"){ SeqNames<-attr(seqs,"dimnames")[[1]] }else{ ### mode(seqs)=="list" SeqNames<-names(seqs) } #names(SeqNames)<-NULL if(length(SeqNames)==0) { stop("the mode(seqs) is wrong!") }else{ return(SeqNames) } } if (class(ref)!="DNAbin"&&class(ref)!="character") stop("ref should be in DNAbin format! or an array containing sequences IDs!") ### 1. taxon statistics: if (taxonStat==TRUE){ #sampleSpeNames<-attr(ref,"dimnames")[[1]] if(class(ref)=="DNAbin"){ sampleSpeNames<-NAMES(ref) }else{ sampleSpeNames<-ref } #ifelse(length(sampleSpeNames)!=0,sampleSpeNames<-sampleSpeNames,sampleSpeNames<-names(ref)) taxonInfoExtraction<-function(seqLables,returnValue="id"){ if(class(seqLables)!="character") stop("seqLables is not character!") id<-strsplit(seqLables, ",")[[1]][1] taxon<-strsplit(seqLables, ",")[[1]][2] taxa<-strsplit(taxon, "_") taxa<-taxa[[1]] if(length(taxa)==3){ ### with family information, "LS0909030M,Noctuidae_Himalaea_unica" family<-taxa[1] genus<-paste(taxa[1],taxa[2],sep="_") species<-paste(taxa[2],taxa[3],sep="_") #ti<-c(id,family,genus,species) }else{ ### no family information, "LS0909030M,Himalaea_unica" family<-"NA" genus<-taxa[1] #genus<-paste(taxa[1],taxa[2],sep="_") species<-paste(taxa[1],taxa[2],sep="_") #ti<-c(id,genus,species) } #ti<-data.frame(IDs=id,families=family,genera=genus,species=species) #ti<-c(id,family,genus,species) #return(ti) #family<-as.character(family) if(returnValue=="id") return(id) if(returnValue=="family") return(family) if(returnValue=="genus") return(genus) if(returnValue=="species") return(species) } #id<-apply(sampleSpeNames,1,FUN=taxonInfoExtraction,returnValue="id") #family<-apply(sampleSpeNames,1,FUN=taxonInfoExtraction,returnValue="family") id<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="id") family<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="family") ### just return a list! family<-as.character(family) genus<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="genus") genus<-as.character(genus) species<-sapply(sampleSpeNames,taxonInfoExtraction,returnValue="species") species<-as.character(species) sample.sizes<-list(family=table(family),genus=table(genus),species=table(species)) if(family[1]!="NA"){ ### LS0909030M,Noctuidae_Himalaea_unica IDs<-id no.samples<-length(IDs) family.list<-unique(family) no.family<-length(unique(family)) genera.list<-unique(genus) no.genera<-length(unique(genus)) species.list<-unique(species) no.species<-length(unique(species)) basic.stat<-c(no.samples,no.family,no.genera,no.species) names(basic.stat)<-c("no.samples","no.family","no.genera","no.species") taxonStat<-list(basic.stat=basic.stat, family.list=family.list, genera.list=genera.list, species.list=species.list, sample.sizes=sample.sizes ) }else{### LS0909030M,Himalaea_unica IDs<-id no.samples<-length(IDs) #family.list<-unique(taxa[2,]) #no.family<-length(unique(taxa[2,])) genera.list<-unique(genus) no.genera<-length(unique(genus)) species.list<-unique(species) no.species<-length(unique(species)) basic.stat<-c(no.samples,no.genera,no.species) names(basic.stat)<-c("no.samples","no.genera","no.species") taxonStat<-list(basic.stat=basic.stat, genera.list=genera.list, species.list=species.list, sample.sizes=sample.sizes ) } ### the end of else }else{taxonStat<-NULL} if (seqStat==TRUE&&class(ref)=="DNAbin"){ no.seqs<-dim(ref)[1] length.seqs<-dim(ref)[2] ifelse(length(no.seqs)!=0,no.seqs<-no.seqs,no.seqs<-length(ref)) ifelse(length(length.seqs)!=0,length.seqs<-length.seqs,length.seqs<-mean(as.numeric(summary(ref)[,1]))) seqStat<-c(no.seqs,length.seqs) names(seqStat)<-c("no.seqs","length.seqs") }else{seqStat<-NULL} if (barcodeStat==TRUE && class(ref)=="DNAbin"){ #sampleSpeNames<-attr(ref,"dimnames")[[1]] sampleSpeNames<-NAMES(ref) mpattern<-".+," Spp<-gsub(mpattern,"",sampleSpeNames) # remove seqs names before "," (incl.",") #Spp Spp_tmp<-paste(Spp,"_",sep = "") #f<-factor(Spp) f<-factor(Spp_tmp) pop.size<-numeric(length(levels(f))) for(i in 1:length(levels(f))){ #i<-1 taxon.name<-paste(levels(f)[i], "_", sep = "") #samplesInOneSpe<-ref[grep(taxon.name, Spp_tmp, value = FALSE,fixed = TRUE),] #samplesInOneSpe<-list(grep(levels(f)[i], Spp, value = TRUE)) samplesInOneSpe<-list(grep(levels(f)[i], Spp_tmp, value = TRUE)) #DNAsamplesInOneSpe<-minput.fas[grep(levels(f)[i], Spp, value = F),]#updata2014-4-24 pop.size[i]<-length(samplesInOneSpe[[1]]) } min.popSize<-min(pop.size) max.popSize<-max(pop.size) mean.popSize<-mean(pop.size) basic.stat<-c(min.popSize,max.popSize,mean.popSize) names(basic.stat)<-c("min.popSize","max.popSize","mean.popSize") barcodeStat<-list(basic.stat=basic.stat, pop.size=data.frame(species=levels(f),pop.size=pop.size,stringsAsFactors=TRUE) ) }else{barcodeStat<-NULL} ### 2. sequence statistics: ### 3. population parameters estimated: ### 4. barcode statistics: (interspecific, intraspecific distance, draw barcoding gaps) ### barcodes/species (min,max,mean/spe) out<-list(taxonStat=taxonStat, seqStat=seqStat, barcodeStat=barcodeStat ) return(out) }
/scratch/gouwar.j/cran-all/cranData/BarcodingR/R/summarize.ref.R
`barnard.test` <- function(n1,n2,n3,n4,dp=0.001,pooled=TRUE) { n1<-abs(as.integer(n1)) n2<-abs(as.integer(n2)) n3<-abs(as.integer(n3)) n4<-abs(as.integer(n4)) conmat<-matrix(c(n1,n2,n3,n4),ncol=2,byrow=TRUE,dimnames=list(c("Outcome I","Outcome II"),c("Treatment I","Treatment II"))) alternative<-c("One sided","Two sided") if (any(rowSums(conmat)==0) || any(colSums(conmat)==0)) { warning("No observations found in at least one category") return(list(contingency.matrix = conmat, alternative = alternative, p.value = c(1,1))) } vec.size<-1.0+1.0/dp mat.size<-4.0*(n1+n3+1)*(n2+n4+1) - 4.0*2.0 # correction for (0,0) and (n1+n3,n2+n4) statSW<-c("WaldS","ScoreS")[1+pooled] ret1=.C(statSW, as.integer(n1), as.integer(n2), as.integer(n3), as.integer(n4), as.numeric(dp), mat.size = as.integer(0), statistic.table = as.double(vector("double",mat.size)), statistic = as.double(0.0)) xr<-seq(1,ret1$mat.size,4)+2 ret1$statistic.table[xr+1][(ret1$statistic<=0 & ret1$statistic.table[xr]<=ret1$statistic) | (ret1$statistic>=0 & ret1$statistic.table[xr]>=ret1$statistic)]<-1 ret1$statistic.table[xr+1][(ret1$statistic<=0 & ret1$statistic.table[xr]>=-ret1$statistic) | (ret1$statistic>=0 & ret1$statistic.table[xr]<=-ret1$statistic)]<-2 ret2=.C("Barnard", as.integer(n1), as.integer(n2), as.integer(n3), as.integer(n4), as.numeric(dp), as.integer(ret1$mat.size), nuisance.vector.x = as.double(vector("double",vec.size)), nuisance.vector.y0 = as.double(vector("double",vec.size)), nuisance.vector.y1 = as.double(vector("double",vec.size)), statistic.table = as.double(ret1$statistic.table), NAOK=TRUE) np0<-which.max(ret2$nuisance.vector.y0) np1<-which.max(ret2$nuisance.vector.y1) nuisance.matrix<-matrix(cbind(ret2$nuisance.vector.x,ret2$nuisance.vector.y0,ret2$nuisance.vector.y1),ncol=3) statistic.table<-matrix(ret1$statistic.table,ncol=4,byrow=TRUE,dimnames=list(c(),c("n1","n2","statistic","include.in.p.value"))) cat(sprintf("\nBarnard's Unconditional Test\n"),sep="\n") print(conmat) cat(sprintf("\nNull hypothesis: Treatments have no effect on the outcomes\n%s statistic = %g\nNuisance parameter = %g (%s), %g (%s)\nP-value = %g (%s), %g (%s)\n", c("Wald","Score")[1+pooled], ret1$statistic, ret2$nuisance.vector.x[np0], alternative[1], ret2$nuisance.vector.x[np1], alternative[2], ret2$nuisance.vector.y0[np0], alternative[1], ret2$nuisance.vector.y1[np1], alternative[2]),sep="\n") return(invisible( list(statistic.table = statistic.table, nuisance.matrix = nuisance.matrix, dp = dp, contingency.matrix = conmat, alternative = alternative, statistic = ret1$statistic, nuisance.parameter = ret2$nuisance.vector.x[c(np0,np1)], p.value = c(ret2$nuisance.vector.y0[np0],ret2$nuisance.vector.y1[np1]), pooled = pooled) )) }
/scratch/gouwar.j/cran-all/cranData/Barnard/R/barnard.test.R
`barnardw.test` <- function(n1,n2,n3,n4,dp=0.001) { return(barnard.test(n1,n2,n3,n4,dp=dp,pooled=TRUE)) }
/scratch/gouwar.j/cran-all/cranData/Barnard/R/barnardw.test.R
#' @include validity.R NULL # S4 classes ### #' A tidy class to represent a set #' #' A set is a group of unique elements it can be either a fuzzy set, where the #' relationship is between 0 or 1 or nominal. #' #' When printed if an element or a set do not have any relationship is not #' shown. #' They can be created from lists, matrices or data.frames. Check [tidySet()] #' constructor for more information. #' @slot relations A data.frame with elements and the sets were they belong. #' @slot elements A data.frame of unique elements and related information. #' @slot sets A data.frame of unique sets and related information. #' @aliases TidySet #' @export #' @seealso \link{tidySet} #' @family methods #' @examples #' x <- list("A" = letters[1:5], "B" = LETTERS[3:7]) #' a <- tidySet(x) #' a #' x <- list("A" = letters[1:5], "B" = character()) #' b <- tidySet(x) #' b #' name_sets(b) setClass( "TidySet", representation( elements = "data.frame", sets = "data.frame", relations = "data.frame" ), validity = is.valid )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/AllClasses.R
#' @include AllClasses.R #' @importFrom methods setGeneric #' @importFrom methods as #' @importFrom methods new #' @importFrom methods slot #' @importFrom methods slot<- #' @importFrom methods validObject #' @importFrom methods show NULL #' Elements of the TidySet #' #' Given TidySet retrieve the elements or substitute them. #' @param object A TidySet object. #' @param value Modification of the elements. #' @return A \code{data.frame} with information about the elements #' @export elements #' @seealso [nElements()] #' @family slots #' @family methods #' @aliases elements<- #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[2:10])) #' elements(TS) methods::setGeneric("elements", function(object) setGeneric("elements")) #' Sets of the TidySet #' #' Given TidySet retrieve the sets or substitute them. #' @param object A \code{SetCollection} object. #' @param value Modification of the sets. #' @return A \code{data.frame} with information from the sets. #' @export sets #' @aliases sets<- #' @seealso [nSets()] #' @family slots #' @family methods #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[2:10])) #' sets(TS) methods::setGeneric("sets", function(object) standardGeneric("sets")) #' Check if a TidySet is fuzzy. #' #' Check if there are fuzzy sets. A fuzzy set is a set where the relationship #' between elements is given by a probability (or uncertainty). #' @param object Object to be coerced or tested. #' @return A logical value. #' @export is.fuzzy #' @family methods #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[2:10])) #' is.fuzzy(TS) methods::setGeneric("is.fuzzy", function(object) standardGeneric("is.fuzzy")) #' Number of sets #' #' Check the number of sets of the TidySet #' @param object Object to be coerced or tested. #' @export nSets #' @family count functions #' @family methods #' @return The number of sets present. #' @examples #' TS <- tidySet(list(A = letters[1:2], B = letters[5:7])) #' nSets(TS) methods::setGeneric("nSets", function(object) standardGeneric("nSets")) #' Number of elements #' #' Check the number of elements of the TidySet. #' @param object Object to be coerced or tested. #' @return A numeric value with the number of elements. #' @export nElements #' @family count functions #' @family methods #' @examples #' TS <- tidySet(list(A = letters[1:2], B = letters[5:7])) #' nElements(TS) methods::setGeneric("nElements", function(object) standardGeneric("nElements")) #' Number of relations #' #' Check the number of relations of the TidySet. #' @param object Object to be coerced or tested. #' @return A numeric value with the number of the relations. #' @export nRelations #' @family count functions #' @family methods #' @examples #' TS <- tidySet(list(A = letters[1:2], B = letters[5:7])) #' nRelations(TS) methods::setGeneric("nRelations", function(object) standardGeneric("nRelations") ) #' Incidence #' #' Check which elements are in which sets. #' @param object Object to be coerced or tested. #' @export incidence #' @family methods #' @seealso [adjacency()], [tidySet()] methods::setGeneric("incidence", function(object) standardGeneric("incidence")) #' Relations of the TidySet #' #' Given TidySet retrieve the relations or substitute them. #' [TidySet()] object #' @param object Object to be coerced or tested. #' @param value Modification of the relations. #' @return A \code{data.frame} with information about the relations between #' elements and sets. #' @family slots #' @family methods #' @seealso [nRelations()] #' @export #' @examples #' TS <- tidySet(list(A = letters[1:2], B = letters[5:7])) #' relations(TS) methods::setGeneric("relations", function(object) standardGeneric("relations")) #' @rdname relations #' @export methods::setGeneric("relations<-", function(object, value) standardGeneric("relations<-") ) #' @rdname elements #' @export methods::setGeneric("elements<-", function(object, value) standardGeneric("elements<-")) #' @rdname sets #' @export methods::setGeneric("sets<-", function(object, value) standardGeneric("sets<-")) #' Calculates the size of a set #' #' Assuming that the fuzzy values are probabilities, #' calculates the probability of being of different sizes for a given set. #' @param object A TidySet object. #' @param sets The sets from which the length is calculated. #' @return A list with the size of the set or the probability of having that #' size. #' @export set_size #' @seealso cardinality #' @family sizes #' @family methods methods::setGeneric("set_size", function(object, sets = NULL) standardGeneric("set_size")) #' Calculates the size of the elements #' #' Assuming that the fuzzy values are probabilities, calculates the probability #' of being of different sizes for a given set. #' @param object A TidySet object. #' @param elements The element from which the length is calculated. #' @return A list with the size of the elements or the probability of having #' that size. #' @family sizes #' @seealso cardinality #' @export element_size #' @family methods methods::setGeneric("element_size", function(object, elements = NULL) standardGeneric("element_size") ) #' Rename sets #' #' Change the default names of sets and elements. #' @param object A TidySet object. #' @param old A character vector of to be renamed. #' @param new A character vector of with new names. #' @return A \code{TidySet} object. #' @family renames #' @family names #' @seealso [name_sets()] #' @export #' @family methods #' @examples #' x <- list("A" = letters[1:5], "B" = letters[3:7]) #' TS <- tidySet(x) #' name_sets(TS) #' TS2 <- rename_set(TS, "A", "C") #' name_sets(TS2) methods::setGeneric("rename_set", function(object, old, new) standardGeneric("rename_set") ) #' Rename elements #' #' Change the default names of sets and elements. #' @param object A TidySet object. #' @param old A character vector of to be renamed. #' @param new A character vector of with new names. #' @return A \code{TidySet} object. #' @family renames #' @family names #' @seealso [name_elements()] #' @export #' @family methods #' @examples #' x <- list("A" = letters[1:5], "B" = letters[3:7]) #' TS <- tidySet(x) #' name_elements(TS) #' TS2 <- rename_elements(TS, "a", "first") #' name_elements(TS2) methods::setGeneric("rename_elements", function(object, old, new) standardGeneric("rename_elements") ) #' Name sets #' #' Retrieve the name of the sets. #' @param object A TidySet object. #' @param value A character with the new names for the sets. #' @return A \code{TidySet} object. #' @family names #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' name_sets(TS) methods::setGeneric("name_sets", function(object) standardGeneric("name_sets") ) #' Name elements #' #' Retrieve the name of the elements. #' @param object A TidySet object. #' @param value A character with the new names for the elements. #' @return A \code{TidySet} object. #' @family names #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' name_elements(TS) methods::setGeneric("name_elements", function(object) standardGeneric("name_elements") ) #' Rename elements #' #' Rename elements. #' @param object A TidySet object. #' @param value A character with the new names for the elements. #' @return A \code{TidySet} object. #' @family names #' @family methods #' @seealso [rename_elements()] #' @export #' @aliases name_elements<- #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' TS #' name_elements(TS) <- letters[1:6] methods::setGeneric("name_elements<-", function(object, value) standardGeneric("name_elements<-") ) #' Rename sets #' #' Rename sets. #' @param object A TidySet object. #' @param value A character with the new names for the sets. #' @return A \code{TidySet} object. #' @family names #' @family methods #' @seealso [rename_set()] #' @export #' @aliases name_sets<- #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' TS #' name_sets(TS) <- LETTERS[1:2] methods::setGeneric("name_sets<-", function(object, value) standardGeneric("name_sets<-") ) #' Add column #' #' Add column to a slot of the TidySet object. #' @param object A TidySet object. #' @param slot A TidySet slot. #' @param columns The columns to add. #' @return A \code{TidySet} object. #' @family column #' @family methods #' @seealso [rename_set()] #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' add_column(TS, "relations", data.frame(well = c( #' "GOOD", "BAD", "WORSE", #' "UGLY", "FOE", "HEY" #' ))) methods::setGeneric("add_column", function(object, slot, columns) standardGeneric("add_column") ) #' Remove column #' #' Removes column from a slot of the TidySet object. #' @param object A TidySet object. #' @param slot A TidySet slot. #' @param column_names The name of the columns. #' @return A \code{TidySet} object. #' @family column #' @family methods #' @seealso [rename_set()] #' @export #' @examples #' x <- data.frame(sets = c(rep("A", 5), rep("B", 5)), #' elements = c(letters[1:5], letters[3:7]), #' extra = sample(c("YES", "NO"), 10, replace = TRUE)) #' TS <- tidySet(x) #' TS #' remove_column(TS, "relations", "extra") methods::setGeneric("remove_column", function(object, slot, column_names) standardGeneric("remove_column") ) #' Intersection of two or more sets #' #' Given a TidySet creates a new set with the elements on the both of them #' following the logic defined on FUN. #' #' #' The default uses the `min` function following the [standard fuzzy #' definition](https://en.wikipedia.org/wiki/Fuzzy_set_operations), but it can #' be changed. #' @param sets The character of sets to be intersect. #' @param name The name of the new set. By defaults joins the sets with an #' \ifelse{latex}{\out{$\cup$}}{\ifelse{html}{\out{&cup;}}{}}. #' @inheritParams union #' @param FUN A function to be applied when performing the union. #' The standard intersection is the "min" function, but you can provide any #' other function that given a numeric vector returns a single number. #' @param keep A logical value if you want to keep originals sets. #' @param ... Other named arguments passed to `FUN`. #' @return A \code{TidySet} object. #' @export #' @family methods that create new sets #' @family methods #' @aliases intersect #' @examples #' rel <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = c("a", "b", "c", "d", "f", "f") #' ) #' TS <- tidySet(rel) #' intersection(TS, c("A", "B")) # Default Name #' intersection(TS, c("A", "B"), "C") # Set the name #' # Fuzzy set #' rel <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = c("a", "b", "c", "d", "f", "f"), #' fuzzy = runif(6) #' ) #' TS2 <- tidySet(rel) #' intersection(TS2, c("A", "B"), "C") #' intersection(TS2, c("A", "B"), "C", FUN = function(x){max(sqrt(x))}) methods::setGeneric("intersection", function(object, sets, ...) standardGeneric("intersection") ) #' Add relations #' #' Given a TidySet adds new relations between elements and sets. #' @param object A TidySet object #' @param relations A data.frame object #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A \code{TidySet} object. #' @family add functions #' @export #' @family methods #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' relations <- data.frame( #' sets = c(rep("A2", 5), "B2"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6), #' new = runif(6) #' ) #' add_relation(TS, relations) methods::setGeneric("add_relation", function(object, relations, ...) standardGeneric("add_relation") ) #' Remove a relation #' #' Given a TidySet removes relations between elements and sets #' @param object A TidySet object #' @param elements The elements of the sets. #' @param sets The name of the new set. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A \code{TidySet} object. #' @family remove functions #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' remove_relation(TS, "A", "a") methods::setGeneric("remove_relation", function(object, elements, sets, ...) standardGeneric("remove_relation") ) #' Remove elements #' #' Given a TidySet remove elements and the related relations and if #' required also the sets. #' @param object A TidySet object. #' @param elements The elements to be removed. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A \code{TidySet} object. #' @export #' @family remove functions #' @family methods #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' remove_element(TS, "c") methods::setGeneric("remove_element", function(object, elements, ...) standardGeneric("remove_element") ) #' Remove sets #' #' Given a TidySet remove sets and the related relations and if #' required also the elements #' @param object A TidySet object. #' @param sets The sets to be removed. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A \code{TidySet} object. #' @export #' @family remove functions #' @family methods #' @examples #' relations <- data.frame( #' sets = c("A", "A", "B", "B", "C", "C"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' remove_set(TS, "B") methods::setGeneric("remove_set", function(object, sets, ...) standardGeneric("remove_set") ) #' Complement of a set #' #' Return the complement for a set #' @param object A TidySet object. #' @param sets The name of the set to look for the complement. #' @param keep Logical value to keep all the other sets. #' @param name Name of the new set. By default it adds a "C". #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @inheritParams union #' @return A \code{TidySet} object. #' @family complements #' @family methods that create new sets #' @family methods #' @seealso [filter()] #' @export #' @examples #' relations <- data.frame( #' sets = c("A", "A", "B", "B", "C", "C"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' complement_set(TS, "A") methods::setGeneric("complement_set", function(object, sets, ...) standardGeneric("complement_set") ) #' Cardinality or membership of sets #' #' Calculates the membership of sets according to the logic defined in FUN. #' @param object A TidySet object. #' @param sets Character vector with the name of the sets. #' @export #' @seealso [size()] #' @examples #' rel <- list(A = letters[1:3], B = letters[1:2]) #' TS <- tidySet(rel) #' cardinality(TS, "A") methods::setGeneric("cardinality", function(object, sets = NULL, ...) standardGeneric("cardinality") ) #' Complement of elements #' #' Return the objects without the elements listed #' @param object A TidySet object. #' @param elements The set to look for the complement. #' @inheritParams complement_set #' @return A \code{TidySet} object. #' @family complements #' @family methods that create new sets #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c("A", "A", "B", "B", "C", "C"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' complement_element(TS, "a", "C_a") #' complement_element(TS, "a", "C_a", keep = FALSE) methods::setGeneric("complement_element", function(object, elements, ...) standardGeneric("complement_element") ) #' Subtract #' #' Elements in a set not present in the other set. Equivalent to #' [setdiff()]. #' @param object A TidySet object. #' @param set_in Name of the sets where the elements should be present. #' @param not_in Name of the sets where the elements should not be present. #' @inheritParams complement_set #' @return A \code{TidySet} object. #' @family complements #' @family methods that create new sets #' @family methods #' @seealso [setdiff()] #' @export #' @examples #' relations <- data.frame( #' sets = c("A", "A", "B", "B", "C", "C"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(relations) #' subtract(TS, "A", "B") #' subtract(TS, "A", "B", keep = FALSE) methods::setGeneric("subtract", function(object, set_in, not_in, ...) standardGeneric("subtract") ) #' Move columns between slots #' #' Moves information from one slot to other slots. #' For instance from the sets to the relations. #' @param object A TidySet object. #' @param from The name of the slot where the content is. #' @param to The name of the slot to move the content. #' @param columns The name of the columns that should be moved. #' @return A TidySet object where the content is moved from one slot to other. #' @family methods #' @examples #' x <- list("A" = c("a" = 0.1, "b" = 0.5), "B" = c("a" = 0.2, "b" = 1)) #' TS <- tidySet(x) #' TS <- mutate_element(TS, b = runif(2)) #' TS2 <- move_to(TS, from = "elements", to = "relations", "b") #' # Note that apparently we haven't changed anything: #' TS2 methods::setGeneric("move_to", function(object, from, to, columns) standardGeneric("move_to") )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/AllGenerics.R
#' @docType package #' @keywords internal #' @details #' It provides a class [`TidySet`] with methods to do operations with sets. #' @examples #' set <- list("A" = letters[1:5], "B" = letters[4:7]) #' TS <- tidySet(set) #' cardinality(TS) #' intersection(TS, c("A", "B")) "_PACKAGE" ## usethis namespace: start ## usethis namespace: end NULL
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/BaseSet-package.R
#' @include AllClasses.R NULL #' Import GMT (Gene Matrix Transposed) files #' #' The GMT (Gene Matrix Transposed) file format is a tab delimited file format #' that describes groups of genes. In this format, each row represents a group. #' Each group is described by a name, a description, and the genes in it. #' @param con File name of the GMT file. #' @param sep GMT file field separator, by default tabs. #' @param ... Other arguments passed to `readLines`. #' @return A TidySet object. #' @family IO functions #' @references #' The file format is defined by the Broad Institute [here](https://software.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats#GMT:_Gene_Matrix_Transposed_file_format_.28.2A.gmt.29) #' @export #' @examples #' gmtFile <- system.file( #' package = "BaseSet", "extdata", #' "hallmark.gene.symbol.gmt" #' ) #' gs <- getGMT(gmtFile) #' nRelations(gs) #' nElements(gs) #' nSets(gs) getGMT <- function(con, sep = "\t", ...) { lines <- strsplit(readLines(con, ...), sep) if (any(lengths(lines) < 2)) { stop("all records in the GMT file must have >= 2 fields", call. = FALSE) } dups <- new.env(parent = emptyenv()) lines <- lapply(lines, function(elt, dups) { if (any(d <- duplicated(elt[-c(1, 2)]))) { dups[[elt[[1]]]] <- unique(elt[-c(1, 2)][d]) elt <- c(elt[c(1, 2)], unique(elt[-c(1, 2)])) } elt }, dups) if (length(dups)) { stop("The file contain duplicate ids for the same set", call. = FALSE) } names(lines) <- vapply(lines, "[", i = 1, character(1L)) links <- vapply(lines, "[", i = 2, character(1L)) lines <- lapply(lines, function(x) { x[seq(from = 3, to = length(x))] }) TS <- tidySet(lines) df <- data.frame(links = links, stringsAsFactors = FALSE) TS <- add_column(TS, "sets", df) TS }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/GMT.R
#' @include AllClasses.R AllGenerics.R NULL #' Convert GSEABase classes to a TidySet #' @param object A GeneSetCollection or a GeneSet derived object #' @return A TidySet object. #' @export tidy <- function(object) { UseMethod("tidy") } #' @export tidy.default <- function(object) { tidySet(object) } #' @describeIn tidy Converts to a tidySet given a GeneSetCollection #' @export #' @method tidy GeneSetCollection #' @examples #' # Needs GSEABase pacakge from Bioconductor #' if (requireNamespace("GSEABase", quietly = TRUE)) { #' library("GSEABase") #' gs <- GeneSet() #' gs #' tidy(gs) #' fl <- system.file("extdata", "Broad.xml", package="GSEABase") #' gs2 <- getBroadSets(fl) # actually, a list of two gene sets #' TS <- tidy(gs2) #' dim(TS) #' sets(TS) #' } tidy.GeneSetCollection <- function(object) { data <- slot(object, ".Data") sets <- lapply(data, tidy) TS <- Reduce(merge_tidySets, sets) validObject(TS) TS } #' @describeIn tidy Converts to a tidySet given a GeneSet #' @export #' @method tidy GeneSet tidy.GeneSet <- function(object) { if (length(object@geneIds) == 0) { elements <- character(length = 1) } else { elements <- object@geneIds } if (is.na(object@setName)) { sets <- "set1" } else { sets <- object@setName } relations <- data.frame( elements = elements, sets = sets ) TS <- tidySet(relations) TS <- filter_element(TS, elements != "") new_sets <- c( sets = as.character(sets(TS)$sets[1]), Identifier = slot(object, "setIdentifier"), shortDescripton = slot(object, "shortDescription"), longDescription = slot(object, "longDescription"), organism = slot(object, "organism"), pubMedIds = slot(object, "pubMedIds"), urls = slot(object, "urls"), contributor = slot(object, "contributor") ) new_sets <- c(new_sets, tidy(object@collectionType)) sets(TS) <- as.data.frame(t(new_sets)) validObject(TS) TS } helper_tidy <- function(object) { name <- object@type if (name != "Null") { c("type" = name) } } #' @export #' @method tidy CollectionType tidy.CollectionType <- function(object) { helper_tidy(object) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/GeneSetCollection.R
#' @include AllClasses.R AllGenerics.R NULL #' Determine the context of subsequent manipulations. #' #' Functions to help to perform some action to just some type of data: elements, #' sets or relations. #' \code{activate}: To table the focus of future manipulations: elements, sets #' or relations. #' \code{active}: To check the focus on the \code{TidySet}. #' \code{deactivate}: To remove the focus on a specific \code{TidySet}- #' @param .data A \code{TidySet} object. #' @param what Either "elements", "sets" or "relations" #' @return A \code{TidySet} object. #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b", rep("a2", 5), "b2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12) #' ) #' a <- tidySet(relations) #' elements(a) <- cbind(elements(a), #' type = c(rep("Gene", 4), rep("lncRNA", 2)) #' ) #' # Filter in the whole TidySet #' filter(a, elements == "a") #' filter(a, elements == "a", type == "Gene") #' # Equivalent to filter_elements #' filter_element(a, type == "Gene") #' a <- activate(a, "elements") #' active(a) #' filter(a, type == "Gene") #' a <- deactivate(a) #' active(a) #' filter(a, type == "Gene") activate <- function(.data, what) { UseMethod("activate") } #' @export #' @importFrom rlang enquo quo_text activate.TidySet <- function(.data, what) { active(.data) <- quo_text(enquo(what)) .data } #' @rdname activate #' @export active <- function(.data) { attr(.data, "active") } `active<-` <- function(x, value) { if (is.null(value)) { attr(x, "active") <- value } else { value <- gsub('"', "", value) value <- switch( value, element = , elements = "elements", set = , sets = "sets", relation = , relations = "relations", stop("Only possible to activate elements, sets and relations", call. = FALSE) ) attr(x, "active") <- value } x }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/activate.R
add_elements_internal <- function(object, elements) { original_elements <- name_elements(object) final_elements <- unique(c(original_elements, elements)) new_elements <- setdiff(final_elements, original_elements) if (length(new_elements) == 0) { return(object) } df_elements <- data.frame(elements = new_elements) column_names <- setdiff(colnames(object@elements), "elements") df_elements[, column_names] <- NA object@elements <- rbind(object@elements, df_elements) rownames(object@elements) <- NULL object@elements <- droplevels(object@elements) object } add_sets_internal <- function(object, set) { original_sets <- name_sets(object) final_sets <- unique(c(original_sets, set)) new_sets <- setdiff(final_sets, original_sets) if (length(new_sets) == 0) { return(object) } df_sets <- data.frame(sets = new_sets) column_names <- setdiff(colnames(object@sets), "sets") df_sets[, column_names] <- NA object@sets <- rbind(object@sets, df_sets) rownames(object@sets) <- NULL object@sets <- droplevels(object@sets) object } add_relations_internal <- function(object, elements, sets, fuzzy) { nElements <- length(elements) if (length(sets) != nElements && length(sets) == 1) { sets <- rep(sets, nElements) } else if (length(sets) != nElements && length(sets) > 1) { stop("The number of elements is greater than the number of sets.", "It can be either equal to the number of sets or just one set", call. = FALSE ) } original_relations <- elements_sets(object) relations <- paste(elements, sets) new_relations <- setdiff(relations, original_relations) if (length(fuzzy) > length(new_relations)) { stop("Redefining the same relations with a different fuzzy number", call. = FALSE ) } else if (length(fuzzy) <= length(new_relations) && length(fuzzy) == 1) { fuzzy <- rep(fuzzy, length(new_relations)) } else if (length(fuzzy) != length(elements)) { stop("Recyling fuzzy is not allowed", call. = FALSE) } # Split the remaining elements and sets elements_sets <- strsplit(new_relations, split = " ") elements <- vapply(elements_sets, "[", i = 1, character(1L)) sets <- vapply(elements_sets, "[", i = 2, character(1L)) df_relations <- data.frame( elements = elements, sets = sets, fuzzy = fuzzy ) column_names <- setdiff( colnames(object@relations), c("sets", "elements", "fuzzy") ) df_relations[, column_names] <- NA object@relations <- rbind.data.frame(object@relations, df_relations) rownames(object@relations) <- NULL object@relations <- droplevels(object@relations) object } #' Add elements to a TidySet #' #' Functions to add elements. If the elements are new they are added, #' otherwise they are omitted. #' @note `add_element` doesn't set up any other information about the elements. #' Remember to add/modify them if needed with [`mutate`] or [`mutate_element`] #' @param object A [`TidySet`] object #' @param elements A character vector of the elements. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A [`TidySet`] object with the new elements. #' @family add_* #' @examples #' x <- list("a" = letters[1:5], "b" = LETTERS[3:7]) #' a <- tidySet(x) #' b <- add_elements(a, "fg") #' elements(b) #' @export add_elements <- function(object, elements, ...) { UseMethod("add_elements") } #' @export #' @method add_elements TidySet add_elements.TidySet <- function(object, elements, ...) { object <- add_elements_internal(object, elements) validObject(object) object } #' Add sets to a TidySet #' #' Functions to add sets. If the sets are new they are added, #' otherwise they are omitted. #' @note `add_sets` doesn't set up any other information about the sets. #' Remember to add/modify them if needed with [`mutate`] or [`mutate_set`] #' @inheritParams add_elements #' @param sets A character vector of sets to be added. #' @return A [`TidySet`] object with the new sets. #' @family add_* #' @examples #' x <- list("a" = letters[1:5], "b" = LETTERS[3:7]) #' a <- tidySet(x) #' b <- add_sets(a, "fg") #' sets(b) #' @export add_sets <- function(object, sets, ...) { UseMethod("add_sets") } #' @export #' @method add_sets TidySet add_sets.TidySet <- function(object, sets, ...) { object <- add_sets_internal(object, sets) validObject(object) object } #' Add relations to a TidySet #' #' Adds new relations to existing or new sets and elements. #' If the sets or elements do not exist they are added. #' @note `add_relations` doesn't set up any other information about the #' relationship. #' Remember to add/modify them if needed with [`mutate`] or [`mutate_relation`] #' @inheritParams add_sets #' @inheritParams add_elements #' @param fuzzy The strength of the membership. #' @return A [`TidySet`] object with the new relations. #' @family add_* #' @seealso [add_relation()] to add relations with new sets or/and #' new elements. #' @examples #' x <- list("a" = letters[1:5], "b" = LETTERS[3:7]) #' a <- tidySet(x) #' add_relations(a, elements = c("a", "b", "g"), sets = "d") #' add_relations(a, elements = c("a", "b"), sets = c("d", "g")) #' add_relations(a, elements = c("a", "b"), sets = c("d", "g"), fuzzy = 0.5) #' add_relations(a, #' elements = c("a", "b"), sets = c("d", "g"), #' fuzzy = c(0.5, 0.7) #' ) #' @export add_relations <- function(object, elements, sets, fuzzy, ...) { UseMethod("add_relations") } #' @export #' @method add_relations TidySet add_relations.TidySet <- function(object, elements, sets, fuzzy = 1, ...) { object <- add_elements(object, elements) object <- add_sets(object, sets) object <- add_relations_internal(object, elements, sets, 1) if (length(fuzzy) != length(elements) && length(fuzzy) != 1) { stop("Fuzzy values do not match with the number of relations", call. = FALSE ) } relations <- relations(object) e_s <- paste(relations$elements, relations$sets) eo_so <- paste(elements, sets) m <- match(e_s, eo_so) m <- m[!is.na(m)] if (length(fuzzy) != length(elements)) { fuzzy <- rep(fuzzy, length(elements)) } else { fuzzy <- fuzzy[m] } relations$fuzzy[relations$elements %in% elements & relations$sets %in% sets] <- fuzzy relations(object) <- relations validObject(object) object }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/add.R
#' @include AllGenerics.R AllClasses.R NULL #' @describeIn add_column Add a column to any slot #' @export setMethod("add_column", signature = signature( object = "TidySet", slot = "character", columns = "ANY" ), function(object, slot, columns) { slot <- match.arg(slot, c("sets", "elements", "relations")) original <- slot(object, slot) if (nrow(columns) != nrow(original)) { stop("Please columns should be of the same size as the slot", call. = FALSE ) } out <- cbind(original, columns) slot(object, slot) <- out validObject(object) object } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/add_column.R
#' @include AllClasses.R NULL #' @describeIn add_relation Adds relations #' @export setMethod("add_relation", signature = signature( object = "TidySet", relations = "data.frame" ), function(object, relations) { relations_columns <- colnames(relations) non_fuzzyness <- check_colnames(relations_columns, "fuzzy") if (non_fuzzyness) { relations <- cbind(relations, fuzzy = 1) } valid <- check_colnames( colnames(relations), c("elements", "sets") ) if (valid) { stop("Relations must have elements and sets columns.") } object <- add_sets(object, as.character(relations$sets)) object <- add_elements(object, as.character(relations$elements)) new_r <- merge(object@relations, relations, all = TRUE, sort = FALSE, suffixes = c(".old", ".new") ) object@relations <- new_r validObject(object) object } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/add_relation.R
#' Adjacency #' #' Are two elements connected ? #' @param object A TidySet object #' @return A square matrix, 1 if two nodes are connected, 0 otherwise. #' @export #' @method adjacency TidySet #' @seealso [incidence()] #' @examples #' x <- list("SET1" = letters[1:5], "SET2" = LETTERS[3:7]) #' a <- tidySet(x) #' adjacency_element(a) #' adjacency_set(a) adjacency <- function(object) { UseMethod("adjacency") } #' @rdname adjacency #' @export adjacency_element <- function(object) { UseMethod("adjacency_element") } #' @rdname adjacency #' @export adjacency_set <- function(object) { UseMethod("adjacency_set") } #' @rdname adjacency #' @export #' @method adjacency TidySet adjacency.TidySet <- function(object) { activated <- active(object) if (is.null(activated)) { warning( "You must especify on what do you want the adjacency?", "\n\tYou might need activate() or adjacency_*" ) NULL } else if (activated == "elements") { adjacency_element(object) } else if (activated == "sets") { adjacency_set(object) } } #' @export #' @method adjacency_element TidySet adjacency_element.TidySet <- function(object) { adj <- tcrossprod(incidence(object)) adj[adj != 0] <- 1 adj } #' @export #' @method adjacency_set TidySet adjacency_set.TidySet <- function(object) { adj <- crossprod(incidence(object)) adj[adj != 0] <- 1 adj }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/adjacency.R
#' @include AllClasses.R AllGenerics.R #' @importFrom dplyr arrange #' @importFrom rlang !! #' @export dplyr::arrange #' Arrange the order of a TidySet #' #' Use arrange to extract the columns of a TidySet object. You can use activate #' with filter or use the specific function. The S3 method filters using all #' the information on the TidySet. #' @param .data The TidySet object #' @param ... Comma separated list of variables names or expressions #' integer column position to be used to reorder the TidySet. #' @return A TidySet object #' @export #' @seealso [dplyr::arrange()] and [activate()] #' @family methods #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B", rep("A2", 5), "B2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12) #' ) #' a <- tidySet(relations) #' a <- mutate_element(a, #' type = c(rep("Gene", 4), rep("lncRNA", 2)) #' ) #' #' b <- arrange(a, desc(type)) #' elements(b) #' b <- arrange_element(a, elements) #' elements(b) #' # Arrange sets #' arrange_set(a, sets) #' @rdname arrange_ #' @export #' @method arrange TidySet arrange.TidySet <- function(.data, ...) { if (is.null(active(.data))) { out <- dplyr::arrange(as.data.frame(.data), ...) df2TS(.data = .data, df = out) } else { switch( active(.data), elements = arrange_element(.data, ...), sets = arrange_set(.data, ...), relations = arrange_relation(.data, ...) ) } } #' @rdname arrange_ #' @export arrange_set <- function(.data, ...) { UseMethod("arrange_set") } #' @rdname arrange_ #' @export arrange_element <- function(.data, ...) { UseMethod("arrange_element") } #' @rdname arrange_ #' @export arrange_relation <- function(.data, ...) { UseMethod("arrange_relation") } #' @export #' @method arrange_set TidySet arrange_set.TidySet <- function(.data, ...) { sets <- sets(.data) out <- dplyr::arrange(sets, ...) .data@sets <- out validObject(.data) .data } #' @export #' @method arrange_element TidySet arrange_element.TidySet <- function(.data, ...) { elements <- elements(.data) out <- dplyr::arrange(elements, ...) .data@elements <- out validObject(.data) .data } #' @export #' @method arrange_relation TidySet arrange_relation.TidySet <- function(.data, ...) { relations <- relations(.data) out <- dplyr::arrange(relations, ...) .data@relations <- out validObject(.data) .data }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/arrange.R
# Allow to merge TidySets #' Combine Values into a Vector or List #' #' This method combines TidySets. #' It only works if the first element is a TidySet. #' @param x A TidySet object. #' @param ... Objects to be concatenated. All NULL entries are dropped. #' @export #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[6])) #' TS2 <- c(TS, data.frame(sets = "C", elements = "gg")) setMethod("c", "TidySet", function(x, ...) { l <- list(x, ...) null <- vapply(l, is.null, FUN.VALUE = logical(1)) dc <- lapply(l[!null], function(x){ as.data.frame(tidySet(x)) }) # browser() m <- function(x, y) { merge(x, y, all = TRUE, sort = FALSE) } r <- Reduce(m, dc) missing_fuzzy <- is.na(r$fuzzy) if (any(missing_fuzzy)) { new_sets <- r$sets[missing_fuzzy] new_elements <- r$elements[missing_fuzzy] if (all(is.na(new_sets)) && all(is.na(new_elements))) { warning("Some data might be lost.") } TS <- tidySet(r[!missing_fuzzy, , drop = FALSE]) TS <- add_sets_internal(TS, new_sets) TS <- add_elements_internal(TS, new_elements) validObject(TS) return(TS) } tidySet(r) })
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/c.R
#' @param FUN Function that returns a single numeric value given a vector of #' fuzzy values. #' @param ... Other arguments passed to `FUN`. #' @describeIn cardinality Cardinality of sets #' @export cardinality setMethod("cardinality", signature = signature(object = "TidySet"), function(object, sets, FUN = "sum", ...) { FUN <- match.fun(FUN) rel <- relations(object) if (is.null(sets)) { sets <- name_sets(object) } rel <- rel[rel$sets %in% sets, , drop = FALSE] fuzzy <- split(rel$fuzzy, rel$sets) card <- vapply(fuzzy, FUN, FUN.VALUE = numeric(1L), ... = ...) df <- data.frame(sets = names(fuzzy), cardinality = card) rownames(df) <- NULL df })
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/cardinality.R
#' @include AllClasses.R AllGenerics.R NULL #' Create the cartesian product of two sets #' #' Given two sets creates new sets with one element of each set #' @param object A TidySet object. #' @param set1,set2 The name of the sets to be used for the cartesian product #' @param name The name of the new set. #' @inheritParams union #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A TidySet object with the new set #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b"), #' elements = letters[seq_len(6)] #' ) #' TS <- tidySet(relations) #' cartesian(TS, "a", "b") cartesian <- function(object, set1, set2, name = NULL, ...) { UseMethod("cartesian") } #' @rdname cartesian #' @method cartesian TidySet #' @export cartesian.TidySet <- function(object, set1, set2, name = NULL, keep = TRUE, keep_relations = keep, keep_elements = keep, keep_sets = keep, ...) { if (!is.logical(keep)) { stop("keep must be a logical value.", call. = FALSE) } if (any(!c(set1, set2) %in% name_sets(object))) { stop("Sets must be on the object", call. = FALSE) } if (length(set1) > 1 || length(set2) > 1) { stop("Sets must be of length 1", call. = FALSE) } if (is.null(name)) { name <- naming(sets1 = set1, sets2 = set2, middle = "product") } relations <- relations(object) elements1 <- relations$elements[relations$sets %in% set1] elements2 <- relations$elements[relations$sets %in% set2] new_sets <- base::expand.grid(elements1, elements2, stringsAsFactors = FALSE ) l <- vector("list", nrow(new_sets)) for (i in seq_len(nrow(new_sets))) { l[[i]] <- unique(as.character(simplify2array(new_sets[i, ]))) } new_sets <- l[lengths(l) == 2] new_names <- paste0(name, "_", seq_along(new_sets)) object <- add_sets(object, name) object <- add_sets(object, new_names) relation <- data.frame( elements = unlist(new_sets, FALSE, FALSE), sets = rep(new_names, lengths(new_sets)) ) object <- add_relation(object, relation) relations <- relations(object) cart <- relations[relations$sets %in% new_names, , drop = FALSE] cart$sets <- name if (keep_relations) { relations(object) <- unique(rbind(relations, cart)) } else { relations(object) <- cart } droplevels(object) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/cartesian.R
#' @include AllClasses.R AllGenerics.R NULL #' @describeIn complement_set Complement of the sets. #' @export setMethod("complement_set", signature = signature( object = "TidySet", sets = "characterORfactor" ), function(object, sets, name = NULL, FUN = NULL, keep = TRUE, keep_relations = keep, keep_elements = keep, keep_sets = keep) { if (!is.logical(keep)) { stop("keep must be a logical value.", call. = FALSE) } old_relations <- relations(object) involved_relations <- old_relations$sets %in% sets # Elements present on sets complement <- old_relations[involved_relations, , drop = FALSE] if (is.null(FUN)) { complement$fuzzy <- 1 - complement$fuzzy } else { fun <- match.fun(FUN) complement$fuzzy <- fun(complement$fuzzy) } if (is.null(name)) { name <- naming("complement", sets) } object <- add_sets(object, name) complement$sets <- name object <- replace_interactions(object, complement, keep_relations) droplevels(object, !keep_elements, !keep_sets, !keep_relations) } ) #' @describeIn complement_element Complement of the elements. #' @export setMethod("complement_element", signature = signature( object = "TidySet", elements = "characterORfactor" ), function(object, elements, name = NULL, FUN = NULL, keep = TRUE, keep_relations = keep, keep_elements = keep, keep_sets = keep) { if (!is.logical(keep)) { stop("keep must be a logical value.", call. = FALSE) } old_relations <- relations(object) complement <- old_relations[old_relations$elements %in% elements, , drop = FALSE ] if (is.null(FUN)) { complement$fuzzy <- 1 - complement$fuzzy } else { fun <- match.fun(FUN) complement$fuzzy <- fun(complement$fuzzy) } if (is.null(name)) { name <- naming("complement", elements) } complement$sets <- name complement <- complement[complement$fuzzy != 0, , drop = FALSE] object <- replace_interactions(object, complement, keep_relations) object <- add_sets(object, name) droplevels(object, !keep_elements, !keep_sets, !keep_relations) } ) #' Complement TidySet #' #' Use complement to find elements or sets the TidySet object. You can use #' activate with complement or use the specific function. You must specify if #' you want the complements of sets or elements. #' @param .data The TidySet object #' @param ... Other arguments passed to either [complement_set()] or #' [complement_element()]. #' @return A TidySet object #' @export #' @family complements #' @family methods #' @seealso [activate()] #' @examples #' rel <- data.frame( #' sets = c("A", "A", "B", "B", "C", "C"), #' elements = letters[seq_len(6)], #' fuzzy = runif(6) #' ) #' TS <- tidySet(rel) #' TS %>% #' activate("elements") %>% #' complement("a") #' TS %>% #' activate("elements") %>% #' complement("a", "C_a", keep = FALSE) #' TS %>% #' activate("set") %>% #' complement("A") #' TS %>% #' activate("set") %>% #' complement("A", keep = FALSE) #' TS %>% #' activate("set") %>% #' complement("A", FUN = function(x){abs(x - 0.2)}, keep = FALSE) #' @export complement <- function(.data, ...) { UseMethod("complement") } #' @export #' @method complement TidySet complement.TidySet <- function(.data, ...) { if (is.null(active(.data))) { stop("Specify about what do you want the complement. Activate sets or elements") } else { switch( active(.data), elements = complement_element(.data, ...), sets = complement_set(.data, ...), relations = stop("Select either elements or sets") ) } }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/complement.R
#' @include AllClasses.R AllGenerics.R NULL setAs("TidySet", "data.frame", function(from) { # browser() r <- from@relations s <- merge(r, from@sets, by.x = "sets", by.y = "sets", sort = FALSE, all.x = TRUE, suffixes = c(".relations", ".sets") ) o <- merge(s, from@elements, by.x = "elements", by.y = "elements", sort = FALSE, all.x = TRUE, suffixes = c("", ".elements") ) # To keep the order of the data.frame new_ord <- paste0(o$elements, o$sets) old_ord <- paste0(r$elements, r$sets) o <- o[match(old_ord, new_ord), , drop = FALSE] rownames(o) <- seq_len(nrow(o)) o }) #' Transforms a TidySet to a data.frame #' #' Flattens the three slots to a single big table #' @param x The \code{TidySet} object. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A \code{data.frame} table. #' @method as.data.frame TidySet #' @export as.data.frame.TidySet <- function(x, ...) { as(x, "data.frame") } #' The opposite of as.data.frame #' #' Convert a data.frame to a TidySet by first using the relations. #' It requires the original TidySet in order to convert it back to resemble #' the position of the columns. #' @param .data The original TidySet #' @param df The flattened data.frame #' @seealso [tidySet.data.frame()] #' @return A TidySet object #' @keywords internal df2TS <- function(.data = NULL, df) { if (!is.null(.data)) { colnames_sets <- colnames(sets(.data)) colnames_elements <- colnames(elements(.data)) } sets <- c("sets") elements <- c("elements") if (!"fuzzy" %in% colnames(df)) { df$fuzzy <- 1 } final_colnames <- colnames(df) TS <- tidySet(df) # Move just the columns that need to be moved. move_sets <- setdiff(colnames_sets, sets) move_sets <- move_sets[move_sets %in% final_colnames] move_elements <- setdiff(colnames_elements, elements) move_elements <- move_elements[move_elements %in% final_colnames] TS <- move_to(TS, "relations", "sets", move_sets) TS <- move_to(TS, "relations", "elements", move_elements) validObject(TS) TS }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/data_frame.R
#' @include AllClasses.R AllGenerics.R NULL #' @rdname activate #' @export deactivate <- function(.data) { UseMethod("deactivate") } #' @export deactivate.TidySet <- function(.data) { active(.data) <- NULL .data }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/deactivate.R
#' @include AllClasses.R AllGenerics.R NULL drop_elements <- function(object) { remaining <- unique(relations(object)$elements) elements <- name_elements(object) remove_elements(object, elements[!elements %in% remaining]) } drop_sets <- function(object) { remaining <- unique(relations(object)$sets) sets <- name_sets(object) remove_sets(object, sets[!sets %in% remaining]) } drop_relations <- function(object) { sets <- name_sets(object) elements <- name_elements(object) relations <- object@relations if (nrow(relations) != 0) { keep_sets <- relations$sets %in% sets keep_elements <- relations$elements %in% elements object@relations <- relations[keep_sets & keep_elements, , drop = FALSE] } rownames(object@relations) <- NULL object } #' Drop unused elements and sets #' #' Drop elements and sets without any relation. #' @param x A TidySet object. #' @param elements Logical value: Should elements be dropped? #' @param sets Logical value: Should sets be dropped? #' @param relations Logical value: Should sets be dropped? #' @param ... Other arguments, currently ignored. #' @return A TidySet object. #' @export #' @examples #' rel <- list(A = letters[1:3], B = character()) #' TS <- tidySet(rel) #' TS #' sets(TS) #' TS2 <- droplevels(TS) #' TS2 #' sets(TS2) droplevels.TidySet <- function(x, elements = TRUE, sets = TRUE, relations = TRUE, ...) { stopifnot(is.logical(elements)) stopifnot(is.logical(sets)) stopifnot(is.logical(relations)) if (relations) { x <- drop_relations(x) } if (elements) { x <- drop_elements(x) } if (sets) { x <- drop_sets(x) } validObject(x) x }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/droplevels.R
#' @include AllClasses.R AllGenerics.R NULL #' @describeIn elements Retrieve the elements #' @export setMethod("elements", signature = signature(object = "TidySet"), function(object) { slot(object, "elements") } ) #' @describeIn elements Modify the elements #' @export #' @examples #' elements(TS) <- data.frame(elements = letters[10:1]) setMethod("elements<-", signature = signature(object = "TidySet"), function(object, value) { slot(object, "elements") <- value validObject(object) object } ) #' @rdname elements #' @export #' @examples #' TS2 <- replace_elements(TS, data.frame(elements = letters[1:11])) replace_elements <- function(object, value) { UseMethod("replace_elements") } #' @export #' @method replace_elements TidySet replace_elements.TidySet <- function(object, value) { elements(object) <- value object } #' @describeIn elements Return the number of elements #' @export #' @examples #' nElements(TS) #' nElements(TS2) setMethod("nElements", signature = signature(object = "TidySet"), function(object) { nrow(slot(object, "elements")) } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/elements.R
#' @include AllClasses.R AllGenerics.R NULL #' Extract #' #' Operators acting on TidySet to extract or replace parts. #' They are designed to resemble the basic operators. #' @param x A TidySet object. #' @param name The data about the TidysSet object to extract. #' @param value The value to overwrite #' @param i Which rows do you want to keep? By default all. #' @param j Which slot do you want to extract? One of "sets", "elements" or #' "relations". #' @param k Which columns do you want to extract. By default all. #' @param ... Other arguments currently ignored. #' @param drop Remove remaining elements, sets and relations? Passed to all #' arguments of [droplevels()]. #' @param exact A logical value. FALSE if fuzzy matching is wanted. #' Add values to the TidySet. Allows to control to which slot is it added. #' @return Always returns a valid [TidySet]. #' #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[6])) #' TS[, "sets", "origin"] <- sample(c("random", "non-random"), 2, replace = TRUE) #' TS[, "sets", "type"] <- c("Fantastic", "Wonderful") #' # This produces a warning # TS$description <- c("What", "can", "I", "say", "now", "?") #' # Better to be explicit: #' TS[, "relations", "description"] <- c("What", "can", "I", "say", "now", "?") #' relations(TS) #' TS[, "elements", "description"] <- rev(c("What", "can", "I", "say", "now", "?")) #' elements(TS) #' # Which will be deleted? #' # TS$description <- NULL #' TS$type #' TS$origin <- c("BCN", "BDN") #' # Different subsets #' TS[1, "elements"] #' TS[1, "sets"] #' # Always print #' TS #' TS[, "sets", c("type", "origin")] # Same #' TS[, "sets", "origin"] # Drop column type #' is(TS[, "sets", "origin"]) #' TS[, "sets"] #' TS[["A"]] #' TS[["B"]] #' TS[["C"]] # Any other set is the empty set #' @rdname extract-TidySet #' @name extract-TidySet NULL # From: https://stackoverflow.com/a/10961998 # $ #### #' @rdname extract-TidySet #' @export setMethod("$", "TidySet", function(x, name) { if (name %in% colnames(relations(x))) { return(slot(x, "relations")[[name]]) } if (name %in% colnames(sets(x))) { return(slot(x, "sets")[[name]]) } if (name %in% colnames(elements(x))) { return(slot(x, "elements")[[name]]) } NULL }) #' @rdname extract-TidySet #' @export setMethod("$<-", "TidySet", function(x, name, value) { p_length <- which(length(value) == dim(x)) # As per dim named output p_named <- switch(name, elements = 1, fuzzy = 2, sets = 3, NA) if (is.na(p_named)) { p_named <- in_slots(x, function(x, y){ y %in% colnames(x)}, y = name) p_named <- which(p_named) } # unknown new column to relations if (length(p_named) == 0 && length(p_length) == 0) { pos <- 2 value <- rep(value, nRelations(x)) } # Not tested with a relation documented multiple times! if (length(p_named) > 1 && length(p_length) > 1) { p_named <- intersect(p_named, p_length) } else if (length(p_named) == 0 && length(p_length) > 1) { p_named <- p_length } if (length(p_named) > 1) { if (2 %in% p_named) { warning("Matching multiple slots. Assigning value to relations.", call. = FALSE) pos <- 2 } else { warning("Matching multiple slots. Randomly assigning the value.", call. = FALSE) pos <- sample(p_named, 1) } } else { pos <- p_named } if (pos == 1) { elements(x)[[name]] <- value } else if (pos == 2) { relations(x)[[name]] <- value } else if (pos == 3) { sets(x)[[name]] <- value } droplevels(x) }) # [ #### #' @rdname extract-TidySet #' @export setMethod("[", "TidySet", function(x, i, j, k, ..., drop = TRUE) { if (!missing(i) && is.character(i)) { stop("TidySet does not accept characters as `i` index for `[`.", "\nDid you meant to use [[ instead?", call. = FALSE) } stopifnot(is.logical(drop)) if (missing(j)) { j <- "relations" } if (length(j) > 1 || is.na(j)) { stop("j only accepts: 'elements', 'sets' and ' relations'") } j <- match.arg(j, c("elements", "sets", "relations")) s <- slot(x, j) if (missing(k)) { k <- seq_len(ncol(s)) } k <- keep_columns(j, k) if (missing(i)) { s2 <- s[, k, ..., drop = FALSE] } else { s2 <- s[i, k, ..., drop = FALSE] rownames(s2) <- NULL } slot(x, j) <- s2 if (drop) { x <- switch(j, "sets" = drop_sets(x), "elements" = drop_elements(x), x) x <- drop_relations(x) } validObject(x) x }) #' @export #' @rdname extract-TidySet setMethod("[<-", "TidySet", function(x, i, j, k, ..., value) { if (missing(j)) { j <- "relations" } j <- match.arg(j, c("elements", "sets", "relations")) s <- slot(x, j) if (missing(k)) { k <- 1 } if (length(k) == 1 && NCOL(value) > 1) { if (missing(i)) { i <- "" } msg <- paste0("TS[", i, ", '", j, "', ", "c('column1', 'column2')] <- value") stop("Assigning multiple columns to a single position!\nUse one of:\n", "add_column(TS, '", j, "', value) or ",msg) } s[i, k, ...] <- value slot(x, j) <- s validObject(x) x }) # [[ #### #' @export #' @rdname extract-TidySet setMethod("[[", "TidySet", function(x, i, j, ..., exact =TRUE) { if (missing(i)) { stop("missing subscript") } i <- unique(i) i <- i[!is.na(i)] if (length(i) > 1) { stop("Trying to extract more than one set.") } stopifnot(isTRUE(exact) || isFALSE(exact)) if (missing(j)) { j <- seq_len(ncol(sets(x))) } j <- keep_columns("sets", j) ns <- nSets(x) logical_i <- is.logical(i) && length(i) > ns numeric_i <- is.numeric(i) && max(i, na.rm = TRUE) > ns if ( logical_i | numeric_i) { stop("Sets requested not available.") } nams <- name_sets(x) if (is.character(i)) { nsi <- i } else if (is.character(i) && !exact) { nsi <- pmatch(i, table = nams) } else { nsi <- nams[i] } namsi <- match(nsi, nams) x[namsi, "sets", j, drop = TRUE] }) #' @export #' @rdname extract-TidySet setMethod("[[<-", "TidySet", function(x, i, value) { if (missing(i)) { stop("missing subscript") } if (is.null(value)) { errors <- character() } else { errors <- valid_sets(value) } if (length(errors) != 0) { stop(paste(errors, collapse = "\n")) } i <- unique(i) i <- i[!is.na(i)] if (length(i) > 1) { stop("Trying to extract more than one set.") } ns <- nSets(x) logical_i <- is.logical(i) && length(i) > ns numeric_i <- is.numeric(i) && max(i, na.rm = TRUE) > ns if ( logical_i | numeric_i) { stop("Sets requested not available.") } nams <- name_sets(x) if (is.character(i)) { nsi <- i } else { nsi <- nams[i] } y <- remove_set(x, nsi) if (is.null(value)) { return(y) } new_sets <- merge(sets(y), value, all = TRUE, sort = FALSE) sets(y) <- new_sets y <- drop_relations(y) validObject(y) y }) keep_columns <- function(j, k) { if (is.numeric(k) && j == "relations") { return(unique(c(1:3, k))) } else if (is.numeric(k)) { return(unique(c(1, k))) } cc <- character_columns(j, k) if (!is.null(cc)) { return(cc) } if (is.logical(k) && j == "relations") { return(c(TRUE, TRUE, TRUE, k)) } else { return(c(TRUE, k)) } } character_columns <- function(j, k) { if (!is.character(k)) { return(NULL) } if (j == "relations") { return(unique(c("elements", "sets", "fuzzy", k))) } else if (j == "sets") { return(unique(c("sets", k))) } else if (j == "elements") { return(unique(c("elements", k))) } }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/extract.R
#' @include AllClasses.R AllGenerics.R #' @importFrom dplyr filter #' @export dplyr::filter #' Filter TidySet #' #' Use filter to subset the TidySet object. You can use activate with filter or #' use the specific function. The S3 method filters using all the information #' on the TidySet. #' @param .data The TidySet object. #' @param ... The logical predicates in terms of the variables of the sets. #' @return A TidySet object. #' @export #' @family methods #' @seealso [dplyr::filter()] and [activate()] #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b", rep("a2", 5), "b2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12), #' type = c(rep("Gene", 4), rep("lncRNA", 2)) #' ) #' TS <- tidySet(relations) #' TS <- move_to(TS, from = "relations", to = "elements", column = "type") #' filter(TS, elements == "a") #' # Equivalent to filter_relation #' filter(TS, elements == "a", sets == "a") #' filter_relation(TS, elements == "a", sets == "a") #' # Filter element #' filter_element(TS, type == "Gene") #' # Filter sets and by property of elements simultaneously #' filter(TS, sets == "b", type == "lncRNA") #' # Filter sets #' filter_set(TS, sets == "b") #' @rdname filter_ #' @export #' @method filter TidySet filter.TidySet <- function(.data, ...) { if (is.null(active(.data))) { df <- dplyr::filter(as.data.frame(.data), ...) df2TS(.data, df) } else { switch( active(.data), elements = filter_element(.data, ...), sets = filter_set(.data, ...), relations = filter_relation(.data, ...) ) } } #' @rdname filter_ #' @export filter_set <- function(.data, ...) { UseMethod("filter_set") } #' @rdname filter_ #' @export filter_element <- function(.data, ...) { UseMethod("filter_element") } #' @rdname filter_ #' @export filter_relation <- function(.data, ...) { UseMethod("filter_relation") } #' @export #' @method filter_set TidySet filter_set.TidySet <- function(.data, ...) { sets <- sets(.data) out <- dplyr::filter(sets, ...) if (nrow(out) == 0) { .data@sets <- out[0, , drop = FALSE] } else { .data@sets <- droplevels(out) } # Keep elements without sets, drop relations droplevels(.data, elements = FALSE, relations = TRUE) } #' @export #' @method filter_element TidySet filter_element.TidySet <- function(.data, ...) { elements <- elements(.data) out <- dplyr::filter(elements, ...) if (nrow(out) == 0) { .data@elements <- out[0, , drop = FALSE] } else { .data@elements <- droplevels(out) } # Keep empty sets, drop relations droplevels(.data, sets = FALSE, relations = TRUE) } #' @export #' @method filter_relation TidySet filter_relation.TidySet <- function(.data, ...) { relations <- relations(.data) out <- dplyr::filter(relations, ...) if (nrow(out) == 0) { .data@relations <- out[0, , drop = FALSE] } else { .data@relations <- droplevels(out) } # Keep empty sets and elements droplevels(.data, sets = FALSE, elements = FALSE) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/filter.R
#' @include AllClasses.R AllGenerics.R NULL #' Create a new set from existing elements #' #' It allows to create a new set given some condition. If no element meet the #' condition an empty set is created. #' @param object A TidySet object. #' @param name The name of the new set. #' @param ... A logical condition to subset some elements. #' @return A TidySet object with the new set. #' @family methods #' @export #' @examples #' x <- list("A" = c("a" = 0.1, "b" = 0.5), "B" = c("a" = 0.2, "b" = 1)) #' TS <- tidySet(x) #' TS1 <- group(TS, "C", fuzzy < 0.5) #' TS1 #' sets(TS1) #' TS2 <- group(TS, "D", fuzzy < 0) #' sets(TS2) #' r <- data.frame( #' sets = c(rep("A", 5), "B", rep("A2", 5), "B2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12), #' type = c(rep("Gene", 2), rep("Protein", 2), rep("lncRNA", 2)) #' ) #' TS3 <- tidySet(r) #' group(TS3, "D", sets %in% c("A", "A2")) group <- function(object, name, ...) { UseMethod("group") } #' @rdname group #' @export group.TidySet <- function(object, name, ...) { object <- tryCatch({ out <- filter(object, ...) out <- elements(out)[, "elements", drop = FALSE] out$sets <- name out$fuzzy <- 1 new_colnames <- setdiff(colnames(object@relations), colnames(out)) out[, new_colnames] <- NA object@relations <- rbind(object@relations, out) new_colnames <- setdiff(colnames(object@sets), "sets") sets <- data.frame(sets = name) sets[, new_colnames] <- NA object@sets <- rbind(object@sets, sets) object }, error = function(x){ add_sets(object, name) }) validObject(object) object }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/group.R
#' @include AllClasses.R AllGenerics.R #' @importFrom dplyr group_by #' @export dplyr::group_by #' group_by TidySet #' #' Use group_by to group the TidySet object. You can use activate with #' group_by or with the whole data. #' @param .data The TidySet object #' @param ... The logical predicates in terms of the variables of the sets #' @return A grouped data.frame (See The dplyr help page) #' @export #' @family methods #' @seealso [dplyr::group_by()] and [activate()] #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b", rep("a2", 5), "b2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12) #' ) #' a <- tidySet(relations) #' elements(a) <- cbind(elements(a), #' type = c(rep("Gene", 4), rep("lncRNA", 2)) #' ) #' group_by(a, elements) #' @rdname group_by_ #' @export #' @method group_by TidySet group_by.TidySet <- function(.data, ...) { if (is.null(active(.data))) { dplyr::group_by(as.data.frame(.data), ...) } else { dplyr::group_by(slot(.data, active(.data)), ...) } }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/group_by.R
#' @importFrom utils head #' @export #' @method head TidySet head.TidySet <- function(x, n = 6L, ...) { head(as(x, "data.frame"), n = n) } #' @importFrom utils tail #' @export #' @method tail TidySet tail.TidySet <- function(x, n = 6L, ...) { tail(as(x, "data.frame"), n = n) } #' @export #' @method dim TidySet dim.TidySet <- function(x) { c(Elements = nElements(x), Relations = nRelations(x), Sets = nSets(x)) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/head.R
#' @include AllClasses.R AllGenerics.R NULL #' @describeIn incidence Incidence of the TidySet #' @aliases incidence #' @return A matrix with elements in rows and sets in columns where the values #' indicate the relationship between the element and the set. #' @export #' @examples #' x <- list("a" = letters[1:5], "b" = LETTERS[3:7]) #' a <- tidySet(x) #' incidence(a) setMethod("incidence", signature = signature(object = "TidySet"), function(object) { incidence <- matrix(0, nrow = nElements(object), ncol = nSets(object), dimnames = list( elements(object)$elements, sets(object)$sets ) ) rel <- unique(relations(object)[, c("sets", "elements", "fuzzy")]) elements <- as.character(rel$elements) sets <- as.character(rel$sets) fuzziness <- rel$fuzzy for (p in seq_along(rel$fuzzy)) { incidence[elements[p], sets[p]] <- fuzziness[p] } incidence } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/incidence.R
#' Independence of the sets #' #' Checks if the elements of the sets are present in more than one set. #' @param object A [`TidySet`] object. #' @param sets A character vector with the names of the sets to analyze. #' @return A logical value indicating if the sets are independent (TRUE) or not. #' @export #' @examples #' x <- list("A" = letters[1:5], "B" = letters[3:7], "C" = letters[6:10]) #' TS <- tidySet(x) #' independent(TS) #' independent(TS, c("A", "B")) #' independent(TS, c("A", "C")) #' independent(TS, c("B", "C")) independent <- function(object, sets) { UseMethod("independent") } #' @export #' @method independent TidySet independent.TidySet <- function(object, sets = NULL) { if (is.null(sets)) { sets <- name_sets(object) } else if (any(!sets %in% name_sets(object))) { warning("Some sets provided are not present") } relations <- relations(object) flag <- anyDuplicated(relations$elements[relations$sets %in% sets]) flag == 0 }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/independent.R
#' @include AllClasses.R AllGenerics.R operations.R NULL .intersection <- function(object, sets, name, FUN, keep_relations, keep_elements, keep_sets, ...) { if (length(name) > 1) { stop("The name of the new set must be of length 1", call. = FALSE) } if (is.null(name)) { name <- collapse_sets(sets, "intersection") } inclusion <- check_sets(object, sets) if (!all(inclusion)) { stop("Sets must be present on the relations", call. = FALSE) } if (any(!inclusion)) { warning("Sets", sets[inclusion], "could not be found", call. = FALSE) } old_relations <- relations(object) relevant_relations <- old_relations$sets %in% sets intersection <- old_relations[relevant_relations, , drop = FALSE] intersection <- droplevels(intersection) intersection$sets <- as.character(intersection$sets) intersection$sets <- name relations <- paste(intersection$elements, intersection$sets) t_relations <- table(relations) k_relations <- t_relations >= sum(length(sets)) dup_relations <- names(t_relations)[k_relations] duplicate_rel <- relations %in% dup_relations if (any(duplicate_rel)) { intersection <- intersection[duplicate_rel, , drop = FALSE] intersection <- fapply(intersection, FUN, ... = ...) } object <- replace_interactions(object, intersection, keep_relations) object <- add_sets(object, name) object <- droplevels(object, !keep_elements, !keep_sets, !keep_relations) validObject(object) object } #' @describeIn intersection Applies the standard intersection #' @export setMethod("intersection", signature = signature( object = "TidySet", sets = "character" ), function(object, sets, name = NULL, FUN = "min", keep = FALSE, keep_relations = keep, keep_elements = keep, keep_sets = keep, ...) { .intersection( object, sets, name, match.fun(FUN), keep_relations, keep_elements, keep_sets, ... ) } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/intersection.R
#' @include AllClasses.R AllGenerics.R NULL #' Length of the TidySet #' #' Returns the number of sets in the object. #' @param x A TidySet object. #' #' No replacement function is available, either delete sets or add them. #' @return A numeric value. #' @seealso [dim()], [ncol()] and [nrow()]. #' Also look at [lengths()] for the number of relations of sets. #' @export #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[6])) #' length(TS) length.TidySet <- function(x) { nSets(x) } #' Lengths of the TidySet #' #' Returns the number of relations of each set in the object. #' @param x A TidySet object. #' @param use.names A logical value whether to inherit names or not. #' #' @return A vector with the number of different relations for each set. #' @seealso [length()], Use [set_size()] if you are using fuzzy sets. #' @export #' @examples #' TS <- tidySet(list(A = letters[1:5], B = letters[6])) #' lengths(TS) setMethod("lengths", "TidySet", function(x, use.names = TRUE) { r <- relations(x) sets_elements <- paste0(r$sets, r$elements) names(sets_elements) <- r$sets d <- duplicated(sets_elements) td <- table(names(sets_elements)[!d]) if (!use.names) { names(td) <- NULL } # To convert the table to a named integer c(td) } ) #' Probability of a vector of probabilities #' #' Calculates the probability that all probabilities happened simultaneously. #' `independent_probabilities()` just multiply the probabilities of the index #' passed. #' @param p Numeric vector of probabilities. #' @param i Numeric integer index of the complementary probability. #' @return A numeric value of the probability. #' @seealso [length_probability()] #' @export #' @examples #' multiply_probabilities(c(0.5, 0.1, 0.3, 0.5, 0.25, 0.23), c(1, 3)) #' independent_probabilities(c(0.5, 0.1, 0.3, 0.5, 0.25, 0.23), c(1, 3)) multiply_probabilities <- function(p, i) { stopifnot(all(i > 0)) stopifnot(all(p >= 0)) if (length(i) == length(p)) { return(prod(p)) } else if (length(i) == 0) { i <- seq_along(p) } prod(p[i], (1 - p)[-i]) } #' @rdname multiply_probabilities #' @export independent_probabilities <- function(p, i) { stopifnot(all(i > 0)) stopifnot(all(p >= 0)) if (length(i) == length(p)) { return(prod(p)) } else if (length(i) == 0) { i <- seq_along(p) } prod(p[i]) } #' @rdname length_probability #' @export union_probability <- function(p) { l <- length(p) if (l == 1) { return(p) } n <- vapply(seq_len(l)[-1], function(x){ sum(combn(seq_along(p), x, FUN = independent_probabilities, p = p)) }, numeric(1L)) sum(p) + sum(rep(c(-1, 1), length.out = length(n))*n) } #' Calculates the probability of a single length #' #' Creates all the possibilities and then add them up. #' `union_probability` Assumes independence between the probabilities to #' calculate the final size. #' @param p Numeric vector of probabilities. #' @param size Integer value of the size of the selected values. #' @return A numeric value of the probability of the given size. #' @seealso [multiply_probabilities()] and [length_set()] #' @export #' @examples #' length_probability(c(0.5, 0.75), 2) #' length_probability(c(0.5, 0.75, 0.66), 1) #' length_probability(c(0.5, 0.1, 0.3, 0.5, 0.25, 0.23), 2) #' union_probability(c(0.5, 0.1, 0.3)) length_probability <- function(p, size) { sum(combn(seq_along(p), size, FUN = multiply_probabilities, p = p)) } #' Calculates the probability #' #' Given several probabilities it looks for how probable is to have a vector of #' each length #' @param probability A numeric vector of probabilities. #' @return A vector with the probability of each set. #' @seealso [length_probability()] to calculate the probability of a specific #' length. #' @export #' @examples #' length_set(c(0.5, 0.1, 0.3, 0.5, 0.25, 0.23)) length_set <- function(probability) { p1 <- probability == 1 if (all(p1)) { out <- c(1) names(out) <- as.character(sum(probability)) return(out) # Non fuzzy sets } if (all(probability == 0)) { max_length <- 0 } else { max_length <- length(probability) } l <- seq(from = sum(p1), to = max_length) # Exclude those cases that are obvious l2 <- l - sum(p1) l2 <- l2[l2 != 0] v <- vapply(l2, length_probability, p = probability[!p1], numeric(1L)) # Substitute in the original possibilities names(l) <- as.character(l) l[] <- 0 l[as.character(l2 + sum(p1))] <- v l[as.character(sum(p1))] <- 1 - sum(v) l } # TODO Use matrix operations to simplify the process for large objects #' @describeIn set_size Calculates the size of a set using [length_set()] #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B", "C"), #' elements = c(letters[seq_len(6)], letters[6]), #' fuzzy = runif(7) #' ) #' a <- tidySet(relations) #' set_size(a) setMethod("set_size", signature = signature(object = "TidySet"), function(object, sets = NULL) { if (!all(sets %in% name_sets(object)) && !is.null(sets)) { stop("Please introduce valid set names. See name_sets", call. = FALSE ) } if (is.null(sets)) { names_sets <- name_sets(object) } else { names_sets <- sets } rel <- relations(object) rel <- rel[rel$sets %in% names_sets, , drop = FALSE] missing <- names_sets[!names_sets %in% rel$sets] rel <- rel[, c("fuzzy", "elements", "sets")] if (length(missing) != 0) { missing <- data.frame( sets = missing, elements = NA, fuzzy = 0 ) rel <- rbind(rel, missing) } # Duplicate relationships with different information... # To filter to unique relationships if (anyDuplicated(rel) != 0) { rel <- unique(rel) rel <- droplevels(rel) } if (!all(rel$fuzzy == 1)) { fuzzy_values <- split(rel$fuzzy, rel$sets) sizes <- lapply(fuzzy_values, length_set) sets <- rep(names(fuzzy_values), lengths(sizes)) lengths_set <- unlist(lapply(sizes, names), FALSE, FALSE) probability_length <- unlist(sizes, FALSE, FALSE) } else { sets <- names_sets lengths_set <- table(rel$sets)[names_sets] probability_length <- 1 } # Empty set if (any(is.na(lengths_set))) { lengths_set[is.na(lengths_set)] <- 0 } # Nothing is present if (is.null(lengths_set) && is.null(probability_length)) { sets <- names_sets lengths_set <- rep(0, length(sets)) probability_length <- rep(1, length(sets)) } out <- data.frame( sets = sets, size = as.numeric(lengths_set), probability = probability_length, stringsAsFactors = FALSE ) out <- merge(out, sets(object), sort = FALSE) if (is.null(sets)) { out } else { out[sets %in% sets, , drop = FALSE] } } ) #' @describeIn element_size Calculates the number of sets an element appears #' with [length_set()] #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("A", 5), "B", "C"), #' elements = c(letters[seq_len(6)], letters[6]), #' fuzzy = runif(7) #' ) #' a <- tidySet(relations) #' element_size(a) setMethod("element_size", signature = signature(object = "TidySet"), function(object, elements = NULL) { if (!all(elements %in% name_elements(object)) && !is.null(elements)) { msg <- paste0( "Please introduce valid ", "element names. See element_names" ) stop(msg, call. = FALSE) } # object <- droplevels(object) rel <- relations(object) if (is.null(elements)) { names_elements <- name_elements(object) } else { names_elements <- elements } rel <- rel[rel$elements %in% names_elements, , drop = FALSE] rel <- rel[, c("fuzzy", "elements", "sets")] missing <- names_elements[!names_elements %in% rel$elements] if (length(missing) != 0) { missing <- data.frame( sets = NA, elements = missing, fuzzy = 0 ) rel <- rbind(rel, missing) } # To filter to unique relationships if (anyDuplicated(rel) != 0) { rel <- unique(rel) rel <- droplevels(rel) } if (!all(rel$fuzzy == 1)) { fuzzy_values <- split(rel$fuzzy, rel$elements) sizes <- lapply(fuzzy_values, length_set) elements <- rep(names(fuzzy_values), lengths(sizes)) lengths_set <- unlist(lapply(sizes, names), FALSE, FALSE) probability_length <- unlist(sizes, FALSE, FALSE) } else { elements <- names_elements lengths_set <- table(rel$elements)[names_elements] probability_length <- 1 } # Empty group if (any(is.na(lengths_set))) { lengths_set[is.na(lengths_set)] <- 0 } # Nothing is present if (is.null(lengths_set) && is.null(probability_length)) { elements <- names_elements lengths_set <- rep(0, length(elements)) probability_length <- rep(1, length(elements)) } out <- data.frame( elements = elements, size = as.numeric(lengths_set), probability = probability_length, stringsAsFactors = FALSE ) out <- merge(out, elements(object), sort = FALSE) if (is.null(elements)) { out } else { out[elements %in% elements, , drop = FALSE] } } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/length.R
setAs("TidySet", "list", function(from) { r <- relations(from) if (ncol(r) > 3) { warning("Dropping information on the coercion.", call. = FALSE) } r <- r[, c("elements", "sets", "fuzzy")] r <- unique(r) out <- split(seq_len(nrow(r)), r$sets) lapply(out, function(x, relations) { out <- relations$fuzzy[x] names(out) <- relations$elements[x] out }, relations = r) }) #' Convert to list #' #' Converts a TidySet to a list. #' @param x A TidySet object to be coerced to a list. #' @param ... Placeholder for other arguments that could be passed to the #' method. Currently not used. #' @return A list. #' @method as.list TidySet #' @export #' @examples #' r <- data.frame(sets = c("A", "A", "A", "B", "C"), #' elements = c(letters[1:3], letters[2:3]), #' fuzzy = runif(5), #' info = rep_len(c("important", "very important"), 5)) #' TS <- tidySet(r) #' TS #' as.list(TS) as.list.TidySet <- function(x, ...) { as(x, "list") }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/list.R
#' @include AllClasses.R AllGenerics.R NULL #' @describeIn move_to Move columns #' @export setMethod("move_to", signature = signature( object = "TidySet", from = "characterORfactor", to = "characterORfactor", columns = "character" ), function(object, from, to, columns) { if (from == to) { return(object) } from <- match.arg(from, c("sets", "elements", "relations")) to <- match.arg(to, c("sets", "elements", "relations")) from_df <- slot(object, from) to_df <- slot(object, to) if (!all(columns %in% colnames(from_df))) { stop("All columns must come from the same table.", call. = FALSE ) } df <- as.data.frame(object) to_colnames <- colnames(to_df) from_colnames <- colnames(from_df) new_to <- unique(df[, c(to_colnames, columns), drop = FALSE]) new_from <- unique(from_df[, !from_colnames %in% columns, drop = FALSE ]) slot(object, to, check = FALSE) <- new_to slot(object, from, check = FALSE) <- new_from validObject(object) object } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/move_to.R
#' @importFrom dplyr mutate #' @importFrom rlang exprs #' @export dplyr::mutate #' Mutate #' #' Use mutate to alter the TidySet object. You can use activate with mutate or #' use the specific function. The S3 method filters using all the information #' on the TidySet. #' @param .data The TidySet object. #' @param ... The logical predicates in terms of the variables of the sets. #' @return A TidySet object #' @export #' @seealso [dplyr::mutate()] and [activate()] #' @family methods #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b", rep("a2", 5), "b2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12) #' ) #' a <- tidySet(relations) #' a <- mutate_element(a, Type = c(rep("Gene", 4), rep("lncRNA", 2))) #' a #' b <- mutate_relation(a, Type = sample(c("PPI", "PF", "MP"), 12, #' replace = TRUE #' )) #' @rdname mutate_ #' @export #' @method mutate TidySet mutate.TidySet <- function(.data, ...) { if (is.null(active(.data))) { df <- dplyr::mutate(as.data.frame(.data), ...) df2TS(.data, df) } else { switch( active(.data), elements = mutate_element(.data, ...), sets = mutate_set(.data, ...), relations = mutate_relation(.data, ...) ) } } #' @rdname mutate_ #' @export mutate_set <- function(.data, ...) { UseMethod("mutate_set") } #' @rdname mutate_ #' @export mutate_element <- function(.data, ...) { UseMethod("mutate_element") } #' @rdname mutate_ #' @export mutate_relation <- function(.data, ...) { UseMethod("mutate_relation") } #' @export #' @method mutate_element TidySet mutate_element.TidySet <- function(.data, ...) { elements <- elements(.data) out <- dplyr::mutate(elements, ...) if ("elements" %in% names(exprs(...))) { old_names <- name_elements(.data) new_names <- out$elements order <- match(.data@relations$elements, old_names) .data@relations$elements <- new_names[order] } elements(.data) <- unique(out) droplevels(.data) } #' @export #' @method mutate_set TidySet mutate_set.TidySet <- function(.data, ...) { sets <- sets(.data) out <- dplyr::mutate(sets, ...) if ("sets" %in% names(exprs(...))) { old_names <- name_sets(.data) new_names <- out$sets order <- match(.data@relations$sets, old_names) .data@relations$sets <- new_names[order] } sets(.data) <- unique(out) droplevels(.data) } #' @export #' @method mutate_relation TidySet mutate_relation.TidySet <- function(.data, ...) { relations <- relations(.data) out <- dplyr::mutate(relations, ...) relations(.data) <- out droplevels(.data) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/mutate.R
#' @include AllClasses.R AllGenerics.R NULL #' @describeIn name_sets Name sets #' @export name_sets setMethod("name_sets", signature = signature(object = "TidySet"), function(object) { s <- sets(object)$sets if (is.factor(s)) { levels(s) } else if (is.character(s)) { s } } ) #' @describeIn name_elements Name elements #' @export name_elements setMethod("name_elements", signature = signature(object = "TidySet"), function(object) { e <- elements(object)$elements if (is.factor(e)) { levels(e) } else if (is.character(e)) { e } } ) #' @describeIn name_elements Rename elements #' @export name_elements<- setMethod("name_elements<-", signature = signature( object = "TidySet", value = "characterORfactor" ), function(object, value) { old <- name_elements(object) if (is.factor(value)) { value <- as.character(value) } elements <- elements(object) if (is.factor(elements$elements)) { levels(elements$elements) <- value } if (length(value) == length(old)) { elements$elements <- value } else if (length(value) > length(old)) { stop("More elements provided than existing.\n\t", "Use add_elements() if you want to add elements.", call. = FALSE) } else { stop("Less names provided than existing.\n\t", "Use filter() if you want to remove some elements", call. = FALSE) } object@elements <- unique(elements) if (anyDuplicated(object@elements$elements) > 0) { stop("Duplicated elements but with different information", call. = FALSE ) } old_relations <- object@relations$elements if (is.factor(old_relations)) { old_relations <- levels(old_relations) replace <- match(old_relations, old) levels(object@relations$elements) <- value[replace] } else { replace <- match(old_relations, old) object@relations$elements <- value[replace] } validObject(object) object } ) #' @describeIn name_sets Rename sets #' @export name_sets<- setMethod("name_sets<-", signature = signature( object = "TidySet", value = "characterORfactor" ), function(object, value) { old <- name_sets(object) if (is.factor(value)) { value <- as.character(value) } sets <- sets(object) if (is.factor(sets$sets)) { levels(sets$sets) <- value } if (length(value) == length(old)) { sets$sets <- value } else if (length(value) > length(old)) { stop("More sets provided than existing.\n\t", "Use add_sets() if you want to add sets.", call. = FALSE) } else { stop("Less names provided than existing.\n\t", "Use filter() if you want to remove some sets.", call. = FALSE) } object@sets <- unique(sets) if (anyDuplicated(object@sets$sets) > 0) { stop("Duplicated sets but with different information", call. = FALSE ) } old_relations <- object@relations$sets if (is.factor(old_relations)) { old_relations <- levels(old_relations) replace <- match(old_relations, old) levels(object@relations$sets) <- value[replace] } else { replace <- match(old_relations, old) object@relations$sets <- value[replace] } validObject(object) object } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/names.R
# Null doesn't use a symbol # Other it is a symbol collapse_sets <- function(sets, symbol = "union") { symbol <- check_symbol(symbol) if (length(symbol) == 0) { stop("Unrecognized set symbol. See set_symbols") } if (length(sets) > 1) { paste0(sets, collapse = set_symbols[symbol]) } else { sets } } check_symbol <- function(symbol) { p <- pmatch(symbol, names(set_symbols)) names(set_symbols)[p] } #' Name an operation #' #' Helps setting up the name of an operation. #' @param start,middle Character used as a start symbol or to divide #' \code{sets1} and \code{sets2}. #' @param sets1,sets2 Character of sets #' @param collapse_symbol Name of the symbol that joins the sets on #' \code{sets1} and \code{sets2}. #' @seealso [set_symbols()] #' @return A character vector combining the sets #' @export #' @examples #' naming(sets1 = c("a", "b")) #' naming(sets1 = "a", middle = "union", sets2 = "b") #' naming(sets1 = "a", middle = "intersection", sets2 = c("b", "c")) #' naming(sets1 = "a", middle = "intersection", sets2 = c("b", "c")) #' naming( #' start = "complement", sets1 = "a", middle = "intersection", #' sets2 = c("b", "c"), collapse_symbol = "intersection" #' ) naming <- function(start = NULL, sets1, middle = NULL, sets2 = NULL, collapse_symbol = "union") { msg <- "Symbol should be of length 1" longer <- any(c(length(collapse_symbol), length(start), length(middle)) > 1) if (longer) { stop(msg) } start <- check_symbol(start) if (!is.null(sets2) && is.null(middle)) { stop("sets1 and sets2 should be separated by a symbol") } middle <- check_symbol(middle) nSets1 <- length(sets1) nSets2 <- length(sets2) # Join the sets sets1 <- collapse_sets(sets1, collapse_symbol) sets2 <- collapse_sets(sets2, collapse_symbol) # Add parenthesis if (length(middle) != 0 && middle == check_symbol(collapse_symbol)) { return(paste0(set_symbols[start], sets1, set_symbols[middle], sets2)) } if (!is.null(sets2) && nSets1 > 1) { sets1 <- paste0("(", sets1, ")") } if (nSets2 > 1) { sets2 <- paste0("(", sets2, ")") } paste0(set_symbols[start], sets1, set_symbols[middle], sets2) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/naming.R
#' Are some sets as elements of other sets? #' #' Check if some elements are also sets of others. This is also known as #' hierarchical sets. #' @param object A TidySet object. #' @return A logical value: TRUE if there are some sets included as elements of #' others. #' @family methods #' @export #' @seealso adjacency #' @examples #' relations <- list(A = letters[1:3], B = c(letters[4:5])) #' TS <- tidySet(relations) #' is_nested(TS) #' TS2 <- add_relation(TS, data.frame(elements = "A", sets = "B")) #' # Note that A is both a set and an element of B #' TS2 #' is_nested(TS2) is_nested <- function(object) { UseMethod("is_nested") } #' @rdname is_nested #' @export is_nested.TidySet <- function(object) { any(name_elements(object) %in% name_sets(object)) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/nested.R
#' Read an OBO file #' #' Read an Open Biological and Biomedical Ontologies (OBO) formatted file #' @param x Path to a file in OBO format. #' @return A TidySet object. #' @family IO functions #' @references The format is described [here]( #' https://owlcollab.github.io/oboformat/doc/GO.format.obo-1_4.html) #' @export #' @examples #' oboFile <- system.file( #' package = "BaseSet", "extdata", #' "go-basic_subset.obo" #' ) #' gs <- getOBO(oboFile) #' head(gs) getOBO <- function(x) { data <- readLines(x) # Remove empty lines n <- vapply(data, nchar, numeric(1L)) data <- data[n != 0] # Look for terms kv0 <- strsplit(data, ": ", fixed = TRUE) kv <- kv0[lengths(kv0) == 2] k <- vapply(kv, "[", character(1L), i = 1) # Keys v <- vapply(kv, "[", character(1L), i = 2) # Values d <- which(k == "id") # Which position indicate a beginning of description keys <- k[d[1]:length(k)] df <- data.frame(matrix(ncol = length(unique(keys)), nrow = 0), stringsAsFactors = FALSE ) colnames(df) <- unique(keys) # For each term parse it in a tidy data frame for (i in seq_along(d)) { if (i == length(d)) { l <- seq(from = d[i], to = length(kv), by = 1) } else { l <- seq(from = d[i], to = d[i + 1] - 1, by = 1) } ch <- v[l] names(ch) <- k[l] keys <- unique(k[l]) m <- max(table(k[l])) lr <- lapply(keys, function(a, y) { rep_len(y[names(y) == a], m) }, y = ch) names(lr) <- keys not_pres <- setdiff(colnames(df), keys) sub_df <- as.data.frame(lr, stringsAsFactors = FALSE) sub_df[, not_pres] <- NA df <- rbind(df, sub_df) } # Clean the data a bit if ("is_obsolete" %in% colnames(df)) { df <- df[is.na(df[, "is_obsolete"]), , drop = FALSE] } strs <- strsplit(df$is_a, " ! ") df$sets <- vapply(strs, "[", character(1L), i = 1) df$set_name <- vapply(strs, "[", character(1L), i = 2) strs <- strsplit(df$xref, ":") df$ref_origin <- vapply(strs, "[", character(1L), i = 1) df$ref_code <- vapply(strs, "[", character(1L), i = 2) df$fuzzy <- 1 colnames(df)[colnames(df) == "id"] <- "elements" df <- df[!is.na(df$sets), , drop = FALSE] keep_columns <- setdiff(colnames(df), c("xref", "is_obsolete", "is_a")) df <- df[, keep_columns] tidySet.data.frame(df) } # Using data downloaded from # https://geneontology.org/gene-associations/goa_human_rna.gaf.gz on 20190711 # About the format: # https://geneontology.org/docs/go-annotation-file-gaf-format-2.1/ #' Read a GAF file #' #' Read a GO Annotation File (GAF) formatted file #' #' @references The format is defined [here]( #' https://geneontology.org/docs/go-annotation-file-gaf-format-2.1/). #' @param x A file in GAF format #' @return A TidySet object #' @export #' @family IO functions #' @importFrom utils read.delim #' @examples #' gafFile <- system.file( #' package = "BaseSet", "extdata", #' "go_human_rna_valid_subset.gaf" #' ) #' gs <- getGAF(gafFile) #' head(gs) getGAF <- function(x) { df <- read.delim(x, header = FALSE, comment.char = "!", stringsAsFactors = FALSE ) gaf_columns <- c( "DB", "DB_Object_ID", "DB_Object_Symbol", "Qualifier", "O_ID", "DB_Reference", "Evidence_Code", "With_From", "Aspect", "DB_Object_Name", "DB_Object_Synonym", "DB_Object_Type", "Taxon", "Date", "Assigned_By", "Annotation_Extension", "Gene_Product_Form_ID" ) colnames(df) <- gaf_columns # Check which optional columns are missing optional_columns <- c(4, 8, 10, 11, 16, 17) remove <- apply(df[, optional_columns], 2, function(x) { all(is.na(x)) }) df <- df[, -optional_columns[remove]] # Modify if they are GeneOntolgoy GO <- grepl("^GO:", df$O_ID) df$Aspect[GO] <- gsub("P", "BP", df$Aspect[GO]) df$Aspect[GO] <- gsub("C", "CC", df$Aspect[GO]) df$Aspect[GO] <- gsub("F", "MF", df$Aspect[GO]) # Classification of the columns according to where do they belong elements <- c(1, 2, 3, 10, 11, 12, 13, 17) sets <- c(5, 6, 9, 16) # Change the name of the columns to be ready to use tidySet.data.frame colnames(df) <- gsub("O_ID", "sets", colnames(df)) colnames(df) <- gsub("DB_Object_Symbol", "elements", colnames(df)) TS <- tidySet(df) # Check that the columns really have information that allows them to be # moved to the new slot. columns_gaf <- function(names, originals) { # If there is a missing column names[names %in% originals] } sets_columns <- columns_gaf(gaf_columns[sets], colnames(df)) nColm <- vapply(sets_columns, function(x) { nrow(unique(df[, c("sets", x)])) }, numeric(1)) sets_columns <- sets_columns[nColm <= length(unique(df$sets))] elements_columns <- columns_gaf(gaf_columns[elements], colnames(df)) nColm <- vapply(sets_columns, function(x) { nrow(unique(df[, c("elements", x)])) }, numeric(1)) elements_columns <- elements_columns[nColm <= length(unique(df$elements))] TS <- move_to(TS, "relations", "sets", sets_columns) TS <- move_to(TS, "relations", "elements", elements_columns) TS }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/obo.R
#' @include AllClasses.R AllGenerics.R NULL remove_elements <- function(object, elements) { if (length(elements) == 0) { return(object) } keep_at_elements <- !object@elements$elements %in% elements new_elements <- object@elements[keep_at_elements, , drop = FALSE] rownames(object@elements) <- NULL object@elements <- droplevels(new_elements) object } remove_sets <- function(object, sets) { if (length(sets) == 0) { return(object) } keep_at_set <- !object@sets$sets %in% sets new_set <- object@sets[keep_at_set, , drop = FALSE] rownames(object@sets) <- NULL object@sets <- droplevels(new_set) object } remove_relations <- function(object, elements, sets, relations = paste(elements, sets)) { if (length(sets) != length(elements)) { stop("sets and elements should be of the same length", call. = FALSE) } if (length(sets) == 0) { return(object) } old_relations <- elements_sets(object) remove_relation <- !old_relations %in% relations object@relations <- object@relations[remove_relation, , drop = FALSE] rownames(object@relations) <- NULL object@relations <- droplevels(object@relations) object } rm_relations_with_sets <- function(object, sets) { if (length(sets) == 0) { return(object) } keep_at_relations <- !object@relations$sets %in% sets new_relations <- object@relations[keep_at_relations, , drop = FALSE] object@relations <- droplevels(new_relations) rownames(object@relations) <- NULL object@relations <- droplevels(object@relations) object } rm_relations_with_elements <- function(object, elements) { if (length(elements) == 0) { return(object) } keep_at_relations <- !object@relations$elements %in% elements new_relations <- object@relations[keep_at_relations, , drop = FALSE] object@relations <- droplevels(new_relations) rownames(object@relations) <- NULL object@relations <- droplevels(object@relations) object } # elements sets of relations elements_sets <- function(object) { paste(object@relations$elements, object@relations$sets) } `%e-e%` <- function(object1, object2) { setdiff(object1@relations$elements, object2@relations$elements) } `%s-s%` <- function(object1, object2) { setdiff(object1@relations$sets, object2@relations$sets) } `%r-r%` <- function(object1, object2) { relations1 <- elements_sets(object1) relations2 <- elements_sets(object2) setdiff(relations1, relations2) } #' Apply to fuzzy #' #' Simplify and returns unique results of the object. #' @param relations A data.frame or similar with fuzzy, sets and elements #' columns. #' @param FUN A function to perform on the fuzzy numbers. #' @param ... Other named arguments passed to `FUN`. #' @return A modified TidySet object #' @keywords internal fapply <- function(relations, FUN, ...) { if (ncol(relations) > 3) { warning("Dropping columns. Consider using `move_to`") } # Handle the duplicate cases relations <- unique(relations[, c("sets", "elements", "fuzzy")]) basic <- paste(relations$elements, relations$sets) fuzzy <- split(relations$fuzzy, basic) # Helper function probably useful for intersection too iterate <- function(fuzzy, fun, ...) { fun(fuzzy, ...) } FUN <- match.fun(FUN) fuzzy <- vapply(fuzzy, iterate, fun = FUN, numeric(1L), ... = ...) relations2 <- unique(relations[, c("sets", "elements")]) basic2 <- paste(relations2$elements, relations2$sets) # Sort again to match the new relations cbind(relations2, fuzzy = fuzzy[match(basic2, names(fuzzy))]) } merge_tidySets <- function(object1, object2) { new_relations <- merge(object1@relations, object2@relations, all = TRUE, sort = FALSE) new_sets <- merge(object1@sets, object2@sets, all = TRUE, sort = FALSE) new_elements <- merge(object1@elements, object2@elements, all = TRUE, sort = FALSE) object2@relations <- unique(new_relations) object2@sets <- unique(new_sets) object2@elements <- unique(new_elements) rownames(object2@relations) <- NULL rownames(object2@sets) <- NULL rownames(object2@elements) <- NULL object2 } elements_in_set <- function(object, sets) { as.character(object@relations$elements[object@relations$sets %in% sets]) } sets_for_elements <- function(object, elements) { as.character(object@relations$sets[object@relations$elements %in% elements]) } replace_interactions <- function(object, new_relations, keep) { stopifnot(is.logical(keep)) old_relations <- object@relations if (keep) { # To ensure that the number of columns match new_columns <- setdiff(colnames(old_relations), colnames(new_relations)) new_relations[, new_columns] <- NA new_relations <- rbind(old_relations, new_relations) } object@relations <- unique(new_relations) object } check_sets <- function(object, sets) { sets %in% object@relations$sets } #' @importFrom dplyr n_distinct check_fuzziness <- function(object) { r <- relations(object) fuzziness <- tapply(r$fuzzy, paste(r$elements, r$sets), FUN = n_distinct) all(fuzziness == 1) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/operations.R
#' Create the power set #' #' Create the power set of the object: All the combinations of the elements of #' the sets. #' @param object A TidySet object. #' @param set The name of the set to be used for the power set, if not provided #' all are used. #' @param name The root name of the new set, if not provided the standard #' notation "P()" is used. #' @param ... Other arguments passed down if possible. #' @return A TidySet object with the new set. #' @family methods #' @export #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b"), #' elements = letters[seq_len(6)] #' ) #' TS <- tidySet(relations) #' power_set(TS, "a", name = "power_set") power_set <- function(object, set, name, ...) { UseMethod("power_set") } #' @export #' @method power_set TidySet #' @importFrom utils combn power_set.TidySet <- function(object, set, name = NULL, keep = TRUE, keep_relations = keep, keep_elements = keep, keep_sets = keep, ...) { if (!is.logical(keep)) { stop("keep must be a logical value.", call. = FALSE) } if (any(!c(set) %in% name_sets(object))) { stop("Sets must be on the object", call. = FALSE) } elements_orig <- name_elements(filter(object, sets == !!set)) length_sets <- seq(1, length(elements_orig) - 1) new_sets <- lapply(length_sets, function(x) { combn(elements_orig, x, simplify = FALSE) }) # Power sets naming from wiki and other sources: # https://en.wikipedia.org/wiki/Power_set if (is.null(name)) { name <- paste0("P(", set, ")") } names(new_sets) <- paste0(name, "_", length_sets, "_") list_sets <- unlist(new_sets, recursive = FALSE) # Improve the naming to some convention to not use length_number or new_object <- tidySet(list_sets[lengths(list_sets) >= 1]) if (keep_relations) { out <- relations(new_object) new_colnames <- setdiff(colnames(object@relations), colnames(out)) out[, new_colnames] <- NA object@relations <- rbind(object@relations, out) new_colnames <- setdiff(colnames(object@sets), "sets") sets <- data.frame(sets = new_object@sets$sets) sets[, new_colnames] <- NA object@sets <- rbind(object@sets, sets) } else { object <- new_object } droplevels(object) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/power_set.R
#' @include AllClasses.R NULL #' Method to show the TidySet object #' #' Prints the resulting table of a TidySet object. Does not shown elements or #' sets without any relationship (empty sets). To see them use [sets()] or #' [elements()]. #' @param object A TidySet #' #' @return A table with the information of the relationships. #' @export setMethod("show", signature = signature(object = "TidySet"), function(object) { validObject(object) o <- as.data.frame(object) print(o) } )
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/print.R
#' @include AllClasses.R AllGenerics.R #' @importFrom dplyr pull #' @importFrom rlang !! #' @export dplyr::pull #' Pull from a TidySet #' #' Use pull to extract the columns of a TidySet object. You can use activate #' with filter or use the specific function. The S3 method filters using all #' the information on the TidySet. #' @param .data The TidySet object #' @param var The literal variable name, a positive integer or a negative #' integer column position. #' @param name Column used to name the output. #' @param ... Currently not used. #' @return A TidySet object #' @export #' @seealso [dplyr::pull()] and [activate()] #' @family methods #' @examples #' relations <- data.frame( #' sets = c(rep("a", 5), "b", rep("a2", 5), "b2"), #' elements = rep(letters[seq_len(6)], 2), #' fuzzy = runif(12) #' ) #' a <- tidySet(relations) #' a <- mutate_element(a, type = c(rep("Gene", 4), rep("lncRNA", 2))) #' pull(a, type) #' # Equivalent to pull_relation #' b <- activate(a, "relations") #' pull_relation(b, elements) #' pull_element(b, elements) #' # Filter element #' pull_element(a, type) #' # Filter sets #' pull_set(a, sets) #' @rdname pull_ #' @export #' @method pull TidySet pull.TidySet <- function(.data, var = -1, name = NULL, ...) { a <- active(.data) if (is.null(a)) { return(dplyr::pull(as.data.frame(.data), !!enquo(var), ...)) } switch(a, elements = pull_element(.data, !!enquo(var), !!enquo(name), ...), sets = pull_set(.data, !!enquo(var), !!enquo(name), ...), relations = pull_relation(.data, !!enquo(var), !!enquo(name), ...)) } #' @rdname pull_ #' @export pull_set <- function(.data, var = -1, name = NULL, ...) { UseMethod("pull_set") } #' @rdname pull_ #' @export pull_element <- function(.data, var = -1, name = NULL, ...) { UseMethod("pull_element") } #' @rdname pull_ #' @export pull_relation <- function(.data, var = -1, name = NULL, ...) { UseMethod("pull_relation") } #' @export #' @method pull_set TidySet pull_set.TidySet <- function(.data, var = -1, name = NULL, ...) { sets <- sets(.data) dplyr::pull(sets, !!enquo(var), !!enquo(name), ...) } #' @export #' @method pull_element TidySet pull_element.TidySet <- function(.data, var = -1, name = NULL, ...) { elements <- elements(.data) dplyr::pull(elements, !!enquo(var), !!enquo(name), ...) } #' @export #' @method pull_relation TidySet pull_relation.TidySet <- function(.data, var = -1, name = NULL, ...) { relations <- relations(.data) dplyr::pull(relations, !!enquo(var), !!enquo(name), ...) }
/scratch/gouwar.j/cran-all/cranData/BaseSet/R/pull.R