content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' Genotype Combination Maker #' #' Make data frame of possible genotype combinations #' @param x Number of alleles. #' @note This function is for internal BIGDAWG use only. makeComb <- function(x) { if(x >= 2) { tmp <- t(combn(x,2)) tmp <- rbind(tmp,t(matrix(rep(1:x,2),byrow=T,ncol=x))) tmp <- tmp[do.call(order, as.data.frame(tmp)),] return(tmp) } else ( return(NA) ) } #' Observed Frequency #' #' Get observed frequency of genotypes #' @param x Single genotype. #' @param genos.locus Locus genotypes. #' @note This function is for internal BIGDAWG use only. getObsFreq <- function(x,genos.locus) { if(x[1]==x[2]) { length(which(genos.locus[,1]==x[1] & genos.locus[,2]==x[2])) } else { return(sum(length(which(genos.locus[,1]==x[1] & genos.locus[,2]==x[2])), length(which(genos.locus[,1]==x[2] & genos.locus[,2]==x[1])))) } } #' Chi square matrices #' #' Chi Square contingency matrix builder with rare cell binning #' @param Locus Locus of interest. #' @param genos.sub Genotypes for locus of interest. #' @param Allele.Freq Allele frequencies. #' @param Allele.Combn Allele combinations. #' @note This function is for internal BIGDAWG use only. getCS.Mat <- function(Locus,genos.sub,Allele.Freq,Allele.Combn) { GTYPES <- Allele.Combn[[Locus]] if(!is.na(GTYPES)[1]) { nSID <- nrow(genos.sub) ColNames <- gsub(".1","",colnames(genos.sub),fixed=T) # column names genos.locus <- genos.sub[,which(ColNames==Locus)] freq <- Allele.Freq[[Locus]] GTYPES <- Allele.Combn[[Locus]] GTYPES <- lapply(seq_len(nrow(GTYPES)), function(x) GTYPES[x,]) #Expected Counts freq.Exp <- lapply(GTYPES,FUN=function(x) ifelse(x[1]==x[2],prod(freq[x])*nSID,2*prod(freq[x])*nSID)) #Observed Counts GTYPES <- lapply(GTYPES,FUN=function(x) names(freq[x])) freq.Obs <- lapply(GTYPES,getObsFreq,genos.locus=genos.locus) freq.mat <- cbind(do.call(rbind,GTYPES), do.call(rbind,freq.Obs), do.call(rbind,freq.Exp)) colnames(freq.mat) <- c("Allele.1","Allele.2","Obs","Exp") #bin rare cells freq.bin <- freq.mat[which(as.numeric(freq.mat[,'Exp'])<5),] freq.bin <- matrix(data=freq.bin,ncol=ncol(freq.mat),dimnames=dimnames(freq.mat)) if(nrow(freq.bin)>0) { freq.bin <- matrix(c("binned.1","binned.2",sum(as.numeric(freq.bin[,'Obs'])),sum(as.numeric(freq.bin[,'Exp']))), ncol=ncol(freq.bin), dimnames=dimnames(freq.bin)) } #Final Matrix for ChiSq if(nrow(freq.bin)>0) { freq.final <- rbind(freq.mat[which(as.numeric(freq.mat[,'Exp'])>=5),],freq.bin) } else { freq.final <- freq.mat } #Calculate (Obs - Exp)^2 / Exp if(nrow(freq.final)>1) { freq.final <- cbind(freq.final, apply(freq.final[,c('Obs','Exp')], MARGIN=1, FUN=function(x) ((as.numeric(x['Obs']) - as.numeric(x['Exp']))^2)/as.numeric(x['Exp']))) } else { freq.final <- cbind(freq.final,0) } colnames(freq.final)[ncol(freq.final)] <- "O-E2|E" return(freq.final) } else { return(NA) } } #' Chi square test statistic #' #' Calculate chi square test statistic #' @param Locus Locus of interest. #' @param Freq.Final Contingency Matrix getCS.Mat output. #' @note This function is for internal BIGDAWG use only. getCS.stat <- function(Locus,Freq.Final) { df <- Freq.Final[[Locus]] if(!is.null(nrow(df))) { return(sum(as.numeric(df[,'O-E2|E']))) } else { return(NA) } } #' Recompute number of alleles #' #' Using Freq.Final, recompute number of alleles #' @param x Locus specific contingency matrix getCS.Mat output. #' @note This function is for internal BIGDAWG use only. getAllele.Count <- function(x) { if(!is.null(nrow(x))) { return( length(unique(c(x[,'Allele.1'],x[,'Allele.2']))) ) } else { return(NA) } } #' Hardy Weinbergy Equilibrium Function #' #' This is the workhorse function for each group analysis. #' @param genos.sub data frame of genotype files post processing. #' @param loci list of loci. #' @param nloci number of loci in list #' @note This function is for internal BIGDAWG use only. HWE.ChiSq <- function(genos.sub,loci,nloci) { #Format genotypes df.1 <- data.frame(genos.sub[,seq(1,nloci*2,2)]) df.2 <- data.frame(genos.sub[,seq(2,nloci*2,2)]) colnames(df.2) <- colnames(df.1) df <- rbind(df.1,df.2) colnames(df) <- do.call(rbind,loci) rm(df.1,df.2) #Allele info Alleles <- lapply(loci,FUN=function(x) sort(unique(df[,x]))); names(Alleles) <- loci # unique allele names nAlleles <- lapply(loci,FUN=function(x) length(na.omit(unique(df[,x])))); names(nAlleles) <- loci # no. unique alleles nAlleles.tot <- lapply(loci,FUN=function(x) length(df[,x])); names(nAlleles.tot) <- loci # total no. alleles #Possible Genotypes Allele.Combn <- lapply(nAlleles,makeComb); names(Allele.Combn) <- loci #Get Allele Counts and Frequencies Allele.cnts <- lapply(loci,FUN=function(x) table(df[,x])); names(Allele.cnts) <- loci Allele.Freq <- lapply(loci,FUN=function(x) Allele.cnts[[x]]/nAlleles.tot[[x]]); names(Allele.Freq) <- loci #Get Observed and Expected Frequencies Matrix for genotypes Freq.Final <- lapply(loci,FUN=getCS.Mat,genos.sub=genos.sub,Allele.Freq=Allele.Freq,Allele.Combn=Allele.Combn) names(Freq.Final) <- loci #Calculate Chi Square Statistic for each Locus Freq.chisq <- lapply(loci,FUN=getCS.stat,Freq.Final=Freq.Final) names(Freq.chisq) <- loci #Recompute number of alleles and genotypes at each locus from binned contingency matrices (Freq.Final) nAlleles.bin <- lapply(Freq.Final,FUN=getAllele.Count) names(nAlleles.bin) <- loci nGenotypes.bin <- lapply(Freq.Final,nrow) names(nGenotypes.bin) <- loci nGenotypes.bin[which(as.numeric(lapply(nGenotypes.bin,FUN=is.null))==1)] <- NA #Get degrees of freedom for each locus #Alleles = a ; possible genotypes =g ; df = g - (a - 1) Allele.dof <- lapply(loci,FUN=function(x) nGenotypes.bin[[x]] - (nAlleles.bin[[x]] - 1) ); names(Allele.dof) <- loci #Get P.values from Chi Square distribution Freq.pvals <- lapply(loci,FUN=function(x) 1-pchisq(as.numeric(Freq.chisq[[x]]), as.numeric(Allele.dof[[x]]))) names(Freq.pvals) <- loci #Format Output Test.out <- cbind(loci, do.call(rbind,Freq.chisq), do.call(rbind,Allele.dof), do.call(rbind,Freq.pvals), rep('NS',nloci)) colnames(Test.out) <- c("Locus","X.square","df","p.value","sig") rownames(Test.out) <- NULL Test.out[which(as.numeric(Test.out[,'p.value'])<0.05),"sig"] <- "*" Test.out <- matrix(unlist(Test.out),ncol=ncol(Test.out),dimnames=dimnames(Test.out)) rownames(Test.out) <- NULL Test.out[,'X.square'] <- sapply(as.numeric(Test.out[,'X.square']),FUN=round,digits=4) Test.out[,'p.value'] <- sapply(as.numeric(Test.out[,'p.value']),FUN=function(x) format.pval(x)) #Flag for invalid degrees of freedom flagLoci <- which(as.numeric(Test.out[,'df'])<1) #Flag for invalid chi square matrices Freq.Flag <- lapply(loci,FUN=function(x) ifelse(nrow(Freq.Final[[x]])>2,0,1)) flagLoci <- unique(c(flagLoci,which(Freq.Flag==1))) #Flag for invalid chi square matrices flagLoci <- unique(c(flagLoci,which(is.na(Test.out[,'X.square'])))) if(length(flagLoci)>0){ Test.out[Test.out[,'Locus'] %in% unlist(loci[flagLoci]),2:ncol(Test.out)] <- "NCalc" } return(Test.out) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/HWE_support_functions.R
#' Hardy-Weinbery Wrapper #' #' Wrapper for main HWE function #' @param Tab Data frame of genotype files post processing. #' @param Output Data return carryover from main BIGDAWG function #' @param Verbose Summary display carryover from main BIGDAWG function #' @note This function is for internal BIGDAWG use only. HWE.wrapper <- function(Tab,Output,Verbose) { HWE <- HWE(Tab) if(Output) { sink("HWE.txt") print(HWE,quote=F) sink() } cat("\n> HARDY-WEINBERG ANALYSIS COMPLETED\n") if(Verbose) { cat("\nControls (Group 0):\n") HWE.con <- as.data.frame(HWE[['controls']]) print(HWE.con,row.names=F,quote=F) cat("\nCases (Group 1):\n") HWE.cas <- as.data.frame(HWE[['cases']]) print(HWE.cas,row.names=F,quote=F) cat("\n") } return(HWE) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/HWE_wrapper.R
#' Haplotype Analysis Function for Multicore #' #' This is the workhorse function for the haplotype analysis. #' @param genos.sub The genotype columns of the loci(locus) set being analyzed. #' @param grp Case/Control or Phenotype groupings. #' @param Strict.Bin Logical specify if strict rare cell binning should be used in ChiSq test #' @param Verbose Summary display carryover from main BIGDAWG function #' @note This function is for internal BIGDAWG use only. H.MC <- function(genos.sub,grp,Strict.Bin,Verbose) { loci.sub <- unique(gsub(".1","",colnames(genos.sub),fixed=T)) nloci.sub <- as.numeric(length(loci.sub)) Haplotype <- paste(loci.sub,collapse="~") cat("Estimating Haplotypes ...",Haplotype,"\n") ### estimate haplotypes Tab.out <- haplo.stats::haplo.em(geno=genos.sub,locus.label=loci.sub) ## extract haplotype freqs for cases and controls Subjects <- as.list(seq_len(nrow(genos.sub))) Tab.Haps <- lapply(Subjects,FUN=getHap,HaploEM=Tab.out) Tab.Haps <- cbind(grp,do.call(rbind,Tab.Haps)) colnames(Tab.Haps)[2:3] <-c("Haplotype.1","Haplotype.2") ## Build Contingency Matrix of Counts haps <- sort(unique(c(Tab.Haps[,'Haplotype.1'],Tab.Haps[,'Haplotype.2']))) haps_counts <- mat.or.vec(nr=length(haps),nc=2) rownames(haps_counts) <- haps colnames(haps_counts) <- c('Group.0','Group.1') #Group 0 Tab.Haps.grp0 <- Tab.Haps[which(Tab.Haps[,'grp']==0),c('Haplotype.1','Haplotype.2')] haps_grp0 <- table(Tab.Haps.grp0) haps_counts[match(names(haps_grp0),haps),'Group.0'] <- haps_grp0 #Group 1 Tab.Haps.grp1 <- Tab.Haps[which(Tab.Haps[,'grp']==1),c('Haplotype.1','Haplotype.2')] haps_grp1 <- table(Tab.Haps.grp1) haps_counts[match(names(haps_grp1),haps),'Group.1'] <- haps_grp1 ### get expected values for cells, bin small cells, and run chi square if(Strict.Bin) { Result <- RunChiSq(haps_counts) } else { Result <- RunChiSq_c(haps_counts) } if( !(Result$Flag) ) { haps_binned <- NULL Final_binned <- haps_counts overall.chisq <- NULL ## Convert counts to frequencies haps_freq <- haps_counts haps_freq[,'Group.0'] <- haps_freq[,'Group.0']/(nrow(Tab.Haps.grp0)*2) haps_freq[,'Group.1'] <- haps_freq[,'Group.1']/(nrow(Tab.Haps.grp1)*2) ## make a nice table of ORs, ci, p values ccdat <-TableMaker(haps_counts) ORout <- lapply(ccdat, cci.pval) #OR list ORout <- do.call(rbind,ORout) #OR matrix rmRows <- which(ORout[,'sig']=="NA") if( length(rmRows > 0) ) { ORout <- ORout[-rmRows,,drop=F] } } else { haps_binned <- Result$Binned Final_binned <- Result$Matrix overall.chisq <- Result$Test ## Convert counts to frequencies haps_freq <- haps_counts haps_freq[,'Group.0'] <- haps_freq[,'Group.0']/(nrow(Tab.Haps.grp0)*2) haps_freq[,'Group.1'] <- haps_freq[,'Group.1']/(nrow(Tab.Haps.grp1)*2) ## make a nice table of ORs, ci, p values ccdat <-TableMaker(Final_binned) ORout <- lapply(ccdat, cci.pval) #OR list ORout <- do.call(rbind,ORout) #OR rmRows <- which(ORout[,'sig']=="NA") if( length(rmRows > 0) ) { ORout <- ORout[-rmRows,,drop=F] } } ####################################################### Build Output List #haps_binned - Binned Haplotypes if( is.null(row.names(haps_binned)) ) { names <- "Nothing.binned" } else { names <- rownames(haps_binned) } haps_binned_fix <- cbind(names,haps_binned) colnames(haps_binned_fix) <- c(Haplotype,colnames(haps_binned)) rownames(haps_binned_fix) <- NULL if( sum(grepl("\\^",haps_binned_fix[,Haplotype]))>0 ) { haps_binned_fix[,Haplotype] <- gsub("\\^","Abs",haps_binned_fix[,Haplotype]) } #final_binned - Contingency Table for ChiSq Final_binned_fix <- cbind(rownames(Final_binned),Final_binned) colnames(Final_binned_fix) <- c(Haplotype,colnames(Final_binned)) rownames(Final_binned_fix) <- NULL if( sum(grepl("^",Final_binned_fix[,Haplotype]))>0 ) { Final_binned_fix[,Haplotype] <- gsub("\\^","Abs",Final_binned_fix[,Haplotype]) } #haps_freq - Frequencies haps_freq_fix <- cbind(rownames(haps_freq),haps_freq) colnames(haps_freq_fix) <- c(Haplotype,colnames(haps_freq)) rownames(haps_freq_fix) <- NULL if(sum(grepl("\\^",haps_freq_fix[,Haplotype]))>0) { haps_freq_fix[,Haplotype] <- gsub("\\^","Abs",haps_freq_fix[,Haplotype]) } #ORout - ODDs Ratios ORout_fix <- cbind(rownames(ORout),ORout) colnames(ORout_fix) <- c(Haplotype,colnames(ORout)) rownames(ORout_fix) <- NULL if(sum(grepl("\\^",ORout_fix[,Haplotype]))>0) { ORout_fix[,Haplotype] <- gsub("\\^","Abs",ORout_fix[,Haplotype]) } #Haplotype - Replace Abs symbols if(sum(grepl("\\^",Tab.Haps[,'Haplotype.1'])) + sum(grepl("\\^",Tab.Haps[,'Haplotype.2'])) >0) { Tab.Haps[,'Haplotype.1'] <- gsub("\\^","Abs",Tab.Haps[,'Haplotype.1']) Tab.Haps[,'Haplotype.2'] <- gsub("\\^","Abs",Tab.Haps[,'Haplotype.2']) } colnames(Tab.Haps)[2:3] <- c(paste(Haplotype,".Hap1",sep=""),paste(Haplotype,".Hap2",sep="")) #Overall ChiSq NULL if( is.null(overall.chisq) ) { overall.chisq <- data.frame(rbind(rep("NCalc",4))) colnames(overall.chisq) <- c("X.square", "df", "p.value", "sig") } H.tmp <- list() H.tmp[['Haplotypes']] <- Tab.Haps[,2:3] # table of subject haplotypes H.tmp[['freq']] <- haps_freq_fix # rounded frequencies H.tmp[['binned']] <- haps_binned_fix # binned haplotypes H.tmp[['OR']] <- ORout_fix # odd ratio table H.tmp[['chisq']] <- overall.chisq # chi sq test statistic H.tmp[['table']] <- Final_binned_fix # final table for chi sq #if(Verbose) { # overall.chisq$X.square <- round(as.numeric(levels(overall.chisq$X.square)),digits=5) # print(overall.chisq, row.names=F) # cat("\n") #} return(H.tmp) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/H_MC.R
#' Haplotype List Builder #' #' Builds table of haplotypes from combinations #' @param Combn Combination of loci to extraction from genos #' @param genos The genotype columns of the loci set being analyzed. #' @param loci Character vector of unique loci being analyzed. #' @param loci.ColNames Character vector of genos column names. #' @note This function is for internal BIGDAWG use only. buildHAPsets <- function(Combn,genos,loci,loci.ColNames) { # Range in matrix Set.H <- loci.ColNames %in% loci[Combn] return(genos[,Set.H]) } #' Haplotype Name Builder #' #' Builds table of names for HAPsets #' @param Combn Combination of loci to extraction from genos #' @param loci Character vector of unique loci being analyzed. #' @note This function is for internal BIGDAWG use only. buildHAPnames <- function(Combn,loci) { return(paste(loci[Combn],collapse="~")) } #' Haplotype Table Maker #' #' Builds table of haplotypes #' @param HaploEM Haplotype output object from haplo.stat::haplo.em function. #' @param SID Index number (i.e., row number) of sample ID from genotype matrix. #' @note This function is for internal BIGDAWG use only. getHap <- function(SID,HaploEM) { # SID subject number # haplotype object from Haplo.em # Range in matrix Range <- which(HaploEM$indx.subj==SID) HapGet <- which.max(HaploEM$post[Range]) # Which haplotype (when more than one possibility) Hap1.no <- HaploEM$hap1code[Range][HapGet] Hap2.no <- HaploEM$hap2code[Range][HapGet] # Combine into a haplotype string Hap1 <- paste(HaploEM$haplotype[Hap1.no,],collapse="~") Hap2 <- paste(HaploEM$haplotype[Hap2.no,],collapse="~") # Output haplotype return(c(Hap1,Hap2)) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/H_support_functions.R
#' Haplotype Wrapper for Multicore #' #' Wrapper for main H function #' @param SID Character vector of subject IDs. #' @param Tabsub Data frame of genotype calls for set being analyzed. #' @param loci Character vector of unique loci being analyzed. #' @param loci.ColNames Character vector of genos column names. #' @param genos The genotype columns of the loci set being analyzed. #' @param grp Case/Control or Phenotype groupings. #' @param All.Pairwise Haplotype argument carryover from main BIGDAWG function #' @param Strict.Bin Logical specify if strict rare cell binning should be used in ChiSq test #' @param Output Data return carryover from main BIGDAWG function #' @param Verbose Summary display carryover from main BIGDAWG function #' @param Cores Cores carryover from main BIGDAWG function #' @note This function is for internal BIGDAWG use only. H.MC.wrapper <- function(SID,Tabsub,loci,loci.ColNames,genos,grp,All.Pairwise,Strict.Bin,Output,Verbose,Cores) { cat(">>>> STARTING HAPLOTYPE ANALYSIS...","\n") # Define Pairwise Combinations to Run When Selected if(All.Pairwise) { # Define Combinations Combos <- t(combn(length(loci),2)) Combos <- lapply(seq_len(nrow(Combos)),FUN=function(x) Combos[x,]) cat("\nYou have opted to run all pairwise combinations for the haplotype analysis.\n") cat("There are", length(Combos), "possible locus combinations to run.\n" ) # Define Pairwise Sets HAPsets <- parallel::mclapply(Combos,FUN=buildHAPsets,genos=genos,loci=loci,loci.ColNames=loci.ColNames,mc.cores=Cores) HAPnames <- unlist(parallel::mclapply(Combos,FUN=buildHAPnames,loci=loci,mc.cores=Cores)) names(HAPsets) <- HAPnames } else { HAPsets <- list() HAPsets[[paste(loci,collapse='~')]] <- genos } # Run H #Start <- Sys.time() H.list <- parallel::mclapply(HAPsets,H.MC,grp=grp,Strict.Bin=Strict.Bin,Verbose=Verbose,mc.cores=Cores) #Sys.time() - Start if(All.Pairwise) { #chisq tmp.cs <- parallel::mclapply(H.list,"[[","chisq",mc.cores=Cores) tmp.cs <- do.call(rbind,tmp.cs) tmp.cs <- cbind(rownames(tmp.cs),tmp.cs) colnames(tmp.cs)[1] <- "Pairwise.Loci" rownames(tmp.cs) <- NULL } if(Output) { # Gathering List Elements H.out <- list() if(All.Pairwise) { #Frequencies tmp <- parallel::mclapply(H.list,"[[","freq",mc.cores=Cores) nrow.tmp <- lapply(tmp,nrow) haps.tmp <- rep(names(nrow.tmp),nrow.tmp) tmp <- cbind(haps.tmp,do.call(rbind,tmp)) colnames(tmp)[1:2] <- c("Pairwise.Loci","Haplotype") H.out[['freq']] <- tmp #binned tmp <- parallel::mclapply(H.list,"[[","binned",mc.cores=Cores) nrow.tmp <- lapply(tmp,nrow) haps.tmp <- rep(names(nrow.tmp),nrow.tmp) tmp <- cbind(haps.tmp,do.call(rbind,tmp)) colnames(tmp)[1:2] <- c("Pairwise.Loci","Haplotype") H.out[['binned']] <- tmp #OR tmp <- parallel::mclapply(H.list,"[[","OR",mc.cores=Cores) nrow.tmp <- lapply(tmp,nrow) haps.tmp <- rep(names(nrow.tmp),nrow.tmp) tmp <- cbind(haps.tmp,do.call(rbind,tmp)) colnames(tmp)[1:2] <- c("Pairwise.Loci","Haplotype") H.out[['OR']] <- tmp #chisq H.out[['chisq']] <- tmp.cs #table tmp <- parallel::mclapply(H.list,"[[","table",mc.cores=Cores) nrow.tmp <- lapply(tmp,nrow) haps.tmp <- rep(names(nrow.tmp),nrow.tmp) tmp <- cbind(haps.tmp,do.call(rbind,tmp)) colnames(tmp)[1:2] <- c("Pairwise.Loci","Haplotype") H.out[['table']] <- tmp #Haplotypes tmp <- parallel::mclapply(H.list,"[[","Haplotypes",mc.cores=Cores) nrow.tmp <- lapply(tmp,nrow) haps.tmp <- rep(names(nrow.tmp),nrow.tmp) tmp <- cbind(SID,haps.tmp,do.call(rbind,tmp)) colnames(tmp) <- c("SAMPLE.ID","Pairwise.Loci","Haplotype.1","Haplotype.2") H.out[['Haplotypes']] <- tmp } else { #Frequencies-binned-OR-chisq-table H.out[['freq']] <- H.list[[1]][['freq']] H.out[['binned']] <- H.list[[1]][['binned']] H.out[['OR']] <- H.list[[1]][['OR']] H.out[['chisq']] <- H.list[[1]][['chisq']] H.out[['table']] <- H.list[[1]][['table']] #Haplotyeps tmp <- H.list[[1]][['Haplotypes']] tmp <- cbind(SID,tmp) colnames(tmp)[1] <- "SAMPLE.ID" H.out[['Haplotypes']] <- tmp } ## write to file write.table(H.out[['freq']], "haplotype_freqs.txt", sep="\t", quote = F, row.names=F, col.names=T) write.table(H.out[['binned']], "haplotype_binned.txt", sep="\t", quote = F, row.names=F, col.names=T) write.table(H.out[['OR']], "haplotype_OR.txt", sep="\t", quote = F, row.names=F, col.names=T) write.table(H.out[['chisq']], "haplotype_chisq.txt", sep="\t", row.names = F, quote = F) write.table(H.out[['table']], "haplotype_table.txt", sep="\t", row.names = F, quote = F) write.table(H.out[['Haplotypes']], "haplotype_HapsbySubject.txt", sep="\t", row.names = F, quote = F) } if(All.Pairwise) { PairSetName <- paste("Pairwise.Set",seq_len(length(H.list)),sep="") # Hapset Legend File if(Output) { tmp <- cbind(PairSetName,names(HAPsets)) colnames(tmp) <- c("Set_Name","Loci") write.table(tmp,file="haplotype_PairwiseSets.txt",sep="\t",quote=F,col.names=T,row.names=F) } names(H.list) <- PairSetName } else { H.list <- H.list[[1]] } cat("> HAPLOTYPE ANALYSIS COMPLETED","\n") if(Verbose) { if(All.Pairwise) { overall.chisq <- tmp.cs } else { overall.chisq <- H.list[['chisq']] } print(as.data.frame(overall.chisq), row.names=F) cat("\n") } return(H.list) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/H_wrapper_MC.R
#' Locus Analysis Function #' #' This is the workhorse function for the locus level analysis. #' @param loci.ColNames The column names of the loci being analyzed. #' @param Locus Locus being analyzed. #' @param genos Genotype table #' @param grp Case/Control or Phenotype groupings. #' @param Strict.Bin Logical specify if strict rare cell binning should be used in ChiSq test #' @note This function is for internal BIGDAWG use only. L <- function(loci.ColNames,Locus,genos,grp,Strict.Bin) { # pull out locus specific columns getCol <- seq(1,length(loci.ColNames),1)[loci.ColNames %in% Locus] HLA_grp <- cbind(grp,genos[,getCol]) rownames(HLA_grp) <- NULL nAllele <- length(na.omit(HLA_grp[,2])) + length(na.omit(HLA_grp[,3])) ## extract alleles and counts for Grp1 and Grp0 Alleles <- sort(unique(c(HLA_grp[,2],HLA_grp[,3]))) # Build Contingency Matrix of Counts Allele.df <- list() for(i in 1:length(Alleles)) { Allele.df[[i]] <- cbind(Locus, Alleles[i], sum(length(which(subset(HLA_grp, grp==0)[,2]==Alleles[i])), length(which(subset(HLA_grp, grp==0)[,3]==Alleles[i]))), sum(length(which(subset(HLA_grp, grp==1)[,2]==Alleles[i])), length(which(subset(HLA_grp, grp==1)[,3]==Alleles[i])))) }; rm(i) Allele.df <- do.call(rbind,Allele.df) colnames(Allele.df) <- c("Locus","Allele","Group.0","Group.1") Allele.con <- matrix(as.numeric(Allele.df[,3:4]), ncol=2, dimnames=list(Allele.df[,'Allele'],c("Group.0", "Group.1"))) if(nrow(Allele.con)>1) { ### get expected values for cells, bin small cells, and run chi square if(Strict.Bin) { Result <- RunChiSq(Allele.con) } else { Result <- RunChiSq_c(Allele.con) } if( !Result$Flag ) { alleles_binned <- NA Final_binned <- NA overall.chisq <- NA ORout <- NA } else { alleles_binned <- Result$Binned Final_binned <- Result$Matrix overall.chisq <- Result$Test ## make a nice table of ORs, ci, p values ccdat <-TableMaker(Final_binned) ORout <- lapply(ccdat, cci.pval) #OR ORout <- do.call(rbind,ORout) colnames(ORout) <- c("OR","CI.lower","CI.upper","p.value","sig") rmRows <- which(ORout[,'sig']=="NA") if( length(rmRows > 0) ) { ORout <- ORout[-rmRows,,drop=F] } } } else { alleles_binned <- NA Final_binned <- NA overall.chisq <- NA ORout <- NA } ####################################################### Build Output List L.tmp <- list() ## Alleles.binned_out if(sum(is.na(alleles_binned))==2) { alleles_binned <- NA } if( !is.na(alleles_binned) ) { Allele.binned.tmp <- cbind(rep(Locus,nrow(alleles_binned)), rownames(alleles_binned), alleles_binned) rownames(Allele.binned.tmp) <- NULL colnames(Allele.binned.tmp) <- c("Locus","Allele","Group.0","Group.1") if(sum(grepl("\\^",Allele.binned.tmp[,'Allele']))>0) { Allele.binned.tmp[,'Allele'] <- gsub("\\^","Abs",Allele.binned.tmp[,'Allele']) } L.tmp[['binned']] <- Allele.binned.tmp } else { binned.out <- cbind(Locus,'Nothing.binned',NA,NA) colnames(binned.out) <- c("Locus","Allele","Group.0","Group.1") rownames(binned.out) <- NULL L.tmp[['binned']] <- binned.out } ## Allele.freq_out Allele.freq.out <- cbind(rep(Locus,nrow(Allele.con)), rownames(Allele.con), round(Allele.con[,'Group.0']/sum(Allele.con[,'Group.0']),digits=5), round(Allele.con[,'Group.1']/sum(Allele.con[,'Group.1']),digits=5)) rownames(Allele.freq.out) <- NULL colnames(Allele.freq.out) <- c("Locus","Allele","Group.0","Group.1") if(sum(grepl("\\^",Allele.freq.out[,'Allele']))>0) { Allele.freq.out[,'Allele'] <- gsub("\\^","Abs",Allele.freq.out[,'Allele']) } L.tmp[['freq']] <- Allele.freq.out ## ORtable_out if(!is.na(ORout)) { ORtable_out.tmp <- cbind(rep(Locus,nrow(ORout)), rownames(ORout), ORout) rownames(ORtable_out.tmp) <- NULL colnames(ORtable_out.tmp) <- c("Locus","Allele","OR","CI.lower","CI.upper","p.value","sig") if(sum(grepl("\\^",ORtable_out.tmp[,'Allele']))>0) { ORtable_out.tmp[,'Allele'] <- gsub("\\^","Abs",ORtable_out.tmp[,'Allele']) } L.tmp[['OR']] <- ORtable_out.tmp } else { ORtable_out.tmp <- cbind(Locus,Alleles,"NCalc","NCalc","NCalc","NCalc","NCalc") rownames(ORtable_out.tmp) <- NULL colnames(ORtable_out.tmp) <- c("Locus","Allele","OR","CI.lower","CI.upper","p.value","sig") if(sum(grepl("\\^",ORtable_out.tmp[,'Allele']))>0) { ORtable_out.tmp[,'Allele'] <- gsub("\\^","Abs",ORtable_out.tmp[,'Allele']) } L.tmp[['OR']] <- ORtable_out.tmp } ## overall.chisq_out if(!is.na(overall.chisq)) { overall.chisq.tmp <- cbind(Locus, overall.chisq) rownames(overall.chisq.tmp) <- NULL colnames(overall.chisq.tmp) <- c("Locus","X.square","df","p.value","sig") L.tmp[['chisq']] <- overall.chisq.tmp } else { overall.chisq.tmp <- cbind(Locus,"NCalc","NCalc","NCalc","NCalc") rownames(overall.chisq.tmp) <- NULL colnames(overall.chisq.tmp) <- c("Locus","X.square","df","p.value","sig") L.tmp[['chisq']] <- overall.chisq.tmp } ## Final_binned_out if(!is.na(Final_binned)) { Final_binned.tmp <- cbind(rep(Locus,nrow(Final_binned)), rownames(Final_binned), Final_binned) rownames(Final_binned.tmp) <- NULL colnames(Final_binned.tmp) <- c("Locus","Allele","Group.0","Group.1") if(sum(grepl("\\^",Final_binned.tmp[,'Allele']))>0) { Final_binned.tmp[,'Allele'] <- gsub("\\^","Abs",Final_binned.tmp[,'Allele']) } L.tmp[['table']] <- Final_binned.tmp } else { Final_binned.tmp <- cbind(Locus,Alleles,"NCalc","NCalc") rownames(Final_binned.tmp) <- NULL colnames(Final_binned.tmp) <- c("Locus","Allele","Group.0","Group.1") if(sum(grepl("\\^",Final_binned.tmp[,'Allele']))>0) { Final_binned.tmp[,'Allele'] <- gsub("\\^","Abs",Final_binned.tmp[,'Allele']) } L.tmp[['table']] <- Final_binned.tmp } return(L.tmp) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/L.R
#' Locus Wrapper #' #' Wrapper for main L function #' @param nloci Number of loci being analyzed. #' @param loci Loci being analyzed. #' @param loci.ColNames The column names of the loci being analyzed. #' @param genos Genotype table #' @param grp Case/Control or Phenotype groupings. #' @param Strict.Bin Logical specify if strict rare cell binning should be used in ChiSq test #' @param Output Data return carryover from main BIGDAWG function #' @param Verbose Summary display carryover from main BIGDAWG function #' @note This function is for internal BIGDAWG use only. L.wrapper <- function(nloci,loci,loci.ColNames,genos,grp,Strict.Bin,Output,Verbose) { cat("\n>>>> STARTING LOCUS LEVEL ANALYSIS...\n") Allele.binned <- list() # Alleles binned during chi-square test Allele.freq <- list() # Alleles Frequencies overall.chisq <- list() # Chi-Square Table ORtable <- list() # Odds Ratio Table Final_binned <- list() # Contingency Table for(j in 1:nloci) { # Get Locus Locus <- loci[j] # Run Locus Level Analysis L.list <- L(loci.ColNames,Locus,genos,grp,Strict.Bin) # Build Output Lists Allele.binned[[Locus]] <- L.list[['binned']] Allele.freq[[Locus]] <- L.list[['freq']] overall.chisq[[Locus]] <- L.list[['chisq']] ORtable[[Locus]] <- L.list[['OR']] Final_binned[[Locus]] <- L.list[['table']] }# END locus loop Out <- list() Out[['AB']] <- do.call(rbind,Allele.binned) Out[['AF']] <- do.call(rbind,Allele.freq) Out[['CS']] <- do.call(rbind,overall.chisq) Out[['OR']] <- do.call(rbind,ORtable) Out[['FB']] <- do.call(rbind,Final_binned) if(Output) { ## write to file write.table(Out[['AF']], file = paste("Locus_freqs.txt",sep=""), sep="\t", row.names = F, col.names=T, quote = F) write.table(Out[['FB']], file = paste("Locus_table.txt",sep=""), sep="\t", row.names = F, col.names=T, quote = F) write.table(Out[['AB']], file = paste("Locus_binned.txt",sep=""), sep="\t", row.names = F, col.names=T, quote = F) write.table(Out[['OR']], file = paste("Locus_OR.txt",sep=""), sep="\t", row.names = F, col.names=T, quote = F) write.table(Out[['CS']], file = paste("Locus_chisq.txt",sep=""), sep="\t", row.names = F, col.names=T, quote = F) } cat("> LOCUS LEVEL ANALYSIS COMPLETED","\n") if(Verbose) { print(as.data.frame(Out[['CS']]),row.names=F) cat("\n") } return(Out) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/L_wrapper.R
#' Genotype List String to Tabular Data Conversion #' #' Expands GL strings to columns of adjacent locus pairs. #' @param df Data frame containing GL strings #' @param System Character Genetic system HLA or KIR #' @param HZY.Red Logical Should homozygote genotypes be a single allele for non-DRB345. #' @param Abs.Fill Logical Should absent loci special designations be used #' @param Cores Integer How many cores can be used. #' @note This function is for internal use only. Tab2GL.wrapper <- function(df,System,HZY.Red,Abs.Fill,Cores) { # Define data locus columns assuming Locus columns come in pairs colnames(df) <- sapply(colnames(df),FUN=gsub,pattern="\\.1|\\.2|\\_1|\\_2",replacement="") DataCol <- which(colnames(df) %in% names(which(table(colnames(df))==2))==TRUE) MiscCol <- setdiff(1:ncol(df),DataCol) # Check for identical rows of miscellanous information from non-data columns. Ambiguous Data Flag. Misc.tmp <- apply(df[,MiscCol],MARGIN=1,FUN=paste,collapse=":") if( length(which(table(Misc.tmp)>1))>0 ) { Err.Log(FALSE,"Table.Amb") ; stop("Conversion stopped.",call.=F) }; rm(Misc.tmp) # Remove empty data rows rmRows <- which(rowSums(apply(df[,DataCol],MARGIN=c(1,2),FUN=nchar))==0) if( length(rmRows)!=0 ) { df <- df[-rmRows,] ; rownames(df) <- NULL } # Pre-format data to SystemLoci*Allele if necessary if( sum(grepl(System,colnames(df)[DataCol]))==0 ) { colnames(df)[DataCol] <- paste(System,colnames(df)[DataCol],sep="") } for(i in DataCol) { if(sum(grepl(colnames(df)[i],df[,i]))==0) { df[,i] <- sapply(df[,i], FUN = Append.System, df.name=colnames(df)[i] ) } } # Pad Absent Calls for DRBx? if( System=="HLA-") { getCol <- grep("DRB3|DRB4|DRB5",colnames(df)) if(length(getCol)>0) { if(Abs.Fill) { df[,getCol] <- sapply(getCol,FUN=function(i) Filler(df[,i], colnames(df)[i], Type="Fill")) } else { df[,getCol] <- sapply(getCol,FUN=function(i) Filler(df[,i], Type="Remove")) } } } # Run Conversion df.list <- lapply(seq(1,nrow(df)), FUN=function(k) df[k,DataCol]) GL <- parallel::mclapply(df.list,FUN=Tab2GL.Sub,System=System,HZY.Red=HZY.Red,mc.cores=Cores) GL <- do.call(rbind,GL) if(ncol(GL)==1) { colnames(GL) <- "GL.String" } else if(ncol(GL)==2) { colnames(GL) <- c("GL.String","DR.HapFlag") } GL <- cbind(df[,MiscCol],GL) return(GL) } #' Genotype List String Condenser #' #' Condenses column of loci into a GL string using "^" #' @param x Row of loci to condense #' @param System Character Genetic system HLA or KIR #' @param HZY.Red Logical Should homozygote genotypes be a single allele for non-DRB345. #' @note This function is for internal use only. Tab2GL.Sub <- function(x,System,HZY.Red) { # Identify Loci in data and for HLA-DRB1 expected DRB345 Loci x <- x[which(x!="")] colnames(x) <- sapply(colnames(x),FUN=gsub,pattern="\\.1|\\.2|\\_1|\\_2",replacement="") Loci <- unique(colnames(x)) if(System=="HLA-") { if( sum(grepl("DRB1",Loci))>0 ) { Loci <- c(Loci,DRB345.Exp(x[grep("DRB1",colnames(x))])) } if( sum(grepl("\\^",Loci))>0 ) { Loci <- Loci[-grep("\\^",Loci)] } Loci <- unique(Loci) } # Append Locus to ambiguous Allele/Allele calls x[] <- sapply(x,Format.Allele,Type="on") # Condense Alleles (+) GLS <- lapply(Loci,Tab2GL.Loci,Genotype=x,System=System,HZY.Red=HZY.Red) GLS <- do.call(rbind,GLS) GLS[,1] <- sapply(GLS[,1],FUN=gsub,pattern="NA+NA|\\+NA|NA\\+",replacement="") GLS[GLS=="OK"] <- "" # Condense Chromosomes (^) Out <- paste(as.character(na.omit(GLS[,1])),collapse="^") Flag <- paste(GLS[which(GLS[,2]!=""),2],collapse="") Out <- c(Out,Flag) } #' Locus Condenser for Tab2GL #' #' Condenses alleles calls of a single locus string using "+" #' @param Locus Locus to condense #' @param Genotype Row of loci to condense #' @param System Character Genetic system HLA or KIR #' @param HZY.Red Logical Should homozygote genotypes be a single allele for non-DRB345. #' @note This function is for internal use only. Tab2GL.Loci <- function(Locus,Genotype,System,HZY.Red) { Alleles <- Genotype[grep(Locus,Genotype)] if(System=="HLA-") { if(Locus=="HLA-DRB3" || Locus=="HLA-DRB4" || Locus=="HLA-DRB5") { if( sum(grepl("DRB1",Genotype))>0 ) { # Assumptions for DRB345 DRB.GTYPE <- DRB345.Check.Zygosity(Locus, Genotype[grep("DRB",Genotype)] ) DRB.GTYPE[1,grepl("\\^",DRB.GTYPE)] <- NA Alleles <- c(DRB.GTYPE[,'Locus_1'],DRB.GTYPE[,'Locus_2']) # for inconsistent DR haplotypes DRB345.Flag <- DRB.GTYPE[,'Flag'] } else if( sum(grepl(grep("DRB",Genotype)))>0 ) { # DRB345 but no DRB1 (ZYgosity Check Not Determined) DRB345.Flag <- "DRB345_ND" } # fi no DRB1 but DRB345 } else { DRB345.Flag <- NULL } # fi DRB345 } # fi HLA if( sum(is.na(Alleles))==0 && HZY.Red && Alleles[1]==Alleles[2] ) { # Homozygous Reduction GLS <- Alleles[1] } else { # Remove NA Strings and Collapse Alleles <- Alleles[!is.na(Alleles)] GLS <- paste(Alleles,collapse="+") } if(System=="HLA-") { DR.HapFlag <- ifelse(!is.null(DRB345.Flag), paste(unlist(DRB345.Flag),collapse=",") , "") Out <- c(GLS,DR.HapFlag) } else { Out <- GLS } return(Out) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/TAB2GL.R
#' Check Input Parameters #' #' Check input parameters for invalid entries. #' @param HLA Logical indicating whether data is HLA class I/II genotyping data only. #' @param Loci.Set Input list defining which loci to use for analyses (combinations permitted). #' @param Exon Numeric Exon(s) for targeted amino acid analysis. #' @param All.Pairwise Logical indicating whether all pairwise loci should be analyzed in haplotype analysis. #' @param Trim Logical indicating if HLA alleles should be trimmed to a set resolution. #' @param Res Numeric setting what desired resolution to trim HLA alleles. #' @param EVS.rm Logical indicating if expression variant suffixes should be removed. #' @param Missing Numeric setting allowable missing data for running analysis (may use "ignore"). #' @param Cores.Lim Integer setting the number of cores accessible to BIGDAWG (Windows limit is 1 core). #' @param Return Logical Should analysis results be returned as list. #' @param Output Logical Should analysis results be written to output directory. #' @param Merge.Output Logical Should analysis results be merged into a single file for easy access. #' @param Verbose Logical Should a summary of each analysis be displayed in console. #' @note This function is for internal use only. Check.Params <- function (HLA,Loci.Set,Exon,All.Pairwise,Trim,Res,EVS.rm,Missing,Cores.Lim,Return,Output,Merge.Output,Verbose) { # Logicals: HLA=TRUE, All.Pairwise=FALSE, EVS.rm=FALSE, Trim=FALSE, Return=FALSE, Merge.FALSE, Verbose=TRUE, TRUE, # Numerics: Res=2, Missing=2, Cores.Lim=1L # Untested: Data, Results.Dir, Run.Tests, Loci.Set if( is.na(as.logical(HLA)) ) { Err.Log(FALSE,"P.Error","HLA") ; stop("Analysis Stopped.",call.=FALSE) } if( !missing(Loci.Set) && !is.list(Loci.Set) ) { Err.Log(FALSE,"P.Error","Loci.Set") ; stop("Analysis Stopped.",call.=FALSE) } if( !missing(Exon) && !is.numeric(Exon) ) { Err.Log(FALSE,"P.Error","Exon") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(All.Pairwise) ) { Err.Log(FALSE,"P.Error","All.Pairwise") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(EVS.rm) ) { Err.Log(FALSE,"P.Error","EVS.rm") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(Trim) ) { Err.Log(FALSE,"P.Error","Trim") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(Return) ) { Err.Log(FALSE,"P.Error","Return") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(Merge.Output) ) { Err.Log(FALSE,"P.Error","Merge.Output") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(Verbose) ) { Err.Log(FALSE,"P.Error","Verbose") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(Output) ) { Err.Log(FALSE,"P.Error","Output") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.numeric(Res) ) { Err.Log(FALSE,"P.Error","Res") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.numeric(Cores.Lim) && !is.integer(Cores.Lim) ) { Err.Log(FALSE,"P.Error","Cores.Lim") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.numeric(Missing) ) { if(Missing!="ignore") { Err.Log(FALSE,"P.Error","Missing") ; stop("Analysis Stopped.",call.=FALSE) } } } #' Check Input Parameters for GLS conversion #' #' Check input parameters for invalid entries. #' @param Convert String Direction for conversion. #' @param File.Output String Type of output. #' @param System String Genetic system (HLA or KIR) of the data being converted #' @param HZY.Red Logical Reduction of homozygote genotypes to single allele. #' @param DRB345.Check Logical Check DR haplotypes for consistency and flag unusual haplotypes. #' @param Cores.Lim Integer How many cores can be used. #' @note This function is for internal use only. Check.Params.GLS <- function (Convert,File.Output,System,HZY.Red,DRB345.Check,Cores.Lim) { if( is.na(match(Convert,c("GL2Tab","Tab2GL"))) ) { Err.Log(FALSE,"P.Error","Convert") ; stop("Analysis Stopped.",call.=FALSE) } if( is.na(match(File.Output,c("R","txt","csv","pypop"))) ) { Err.Log(FALSE,"P.Error","File.Output") ; stop("Analysis Stopped.",call.=FALSE) } if( is.na(match(System,c("HLA","KIR"))) ) { Err.Log(FALSE,"P.Error","System") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(HZY.Red) ) { Err.Log(FALSE,"P.Error","HZY.Red") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.logical(DRB345.Check) ) { Err.Log(FALSE,"P.Error","DRB345.Check") ; stop("Analysis Stopped.",call.=FALSE) } if( !is.numeric(Cores.Lim) || !is.integer(Cores.Lim) ) { Err.Log(FALSE,"P.Error","Cores.Lim") ; stop("Analysis Stopped.",call.=FALSE) } } #' Check Cores Parameters #' #' Check cores limitation for OS compatibility #' @param Cores.Lim Integer How many cores can be used. #' @param Output Logical Should analysis results be written to output directory. Check.Cores <- function(Cores.Lim,Output) { if ( Cores.Lim!=1L ) { Cores.Max <- as.integer( floor( parallel::detectCores() * 0.9) ) if(Sys.info()['sysname']=="Windows" && as.numeric(Cores.Lim)>1) { Err.Log(Output,"Windows.Cores") ; stop("Analysis Stopped.",call. = F) } else if( Cores.Lim > Cores.Max ) { Cores <- Cores.Max } else { Cores <- Cores.Lim } } else { Cores <- Cores.Lim } return(Cores) } #' HLA Formatting Check for Amino Acid Analysis #' #' Checks data to see if HLA data is properly formatted . #' @param x All columns of HLA genotyping data. #' @note This function is for internal BIGDAWG use only. CheckHLA <- function(x) { #Return TRUE if properly formatted HLA # temporary reassignment for test x[is.na(x)] <- "00:00" # NA cells x[x=="^"] <- "00:00" # absent cells x[x==""] <- "00:00" # empty cells # test for colon delimiters test <- apply(x,MARGIN=c(1,2),FUN=function(z) length(unlist(strsplit(as.character(z),split=":")))) test <- apply(test,MARGIN=2,FUN=min) Flag <- as.logical(min(test)==2) return(Flag) } #' HLA Loci Legitimacy Check for Amino Acid Analysis #' #' Checks available loci against data to ensure complete overlap. #' @param x Loci available in exon protein list alignment object. #' @param y Unique column names #' @note This function is for internal BIGDAWG use only. CheckLoci <- function(x,y) { #Returns TRUE if absent locus(loci) encountered #x=Loci available in ExonPtnList #y=Loci.Set from data Output <- list() y <- unique(unlist(y)) y <- gsub("HLA-","",y) Flag <- ( !sum(y %in% x) == length(y) ) Output[['Flag']] <- Flag if(Flag) { Output[['Loci']] <- paste(y[!y %in% x],collapse=",") } else { Output[['Loci']] <- NA } return(Output) } #' HLA Allele Legitimacy Check for Amino Acid Analysis #' #' Checks available alleles against data to ensure complete overlap. #' @param x Exon protein list alignment object. #' @param y Genotypes from data file #' @note This function is for internal BIGDAWG use only. CheckAlleles <- function(x,y) { # Returns TRUE if unknown allele(s) encountered # Checks at 2 levels of resolution: Full, 3-Field, 2-Field, 1-Field # Define Loci Loci <- unique( gsub("\\.1|\\.2|\\_1|\\_2","",colnames(y)) ) Output <- list() for(i in Loci ) { # Database Alleles x.locus <- x[[i]][,'Allele'] x.locus[] <- sapply(x.locus, FUN = gsub, pattern="[[:alpha:]]", replacement="") # Current Data Alleles y.locus <- y[,grep(i,colnames(y))] y.locus <- unique(c(y.locus[,1],y.locus[,2])) y.locus[] <- sapply(y.locus, FUN = gsub, pattern="[[:alpha:]]", replacement="") y.locus <- na.omit(y.locus) y.locus <- y.locus[y.locus!="^"] y.locus <- y.locus[y.locus!=""] # Check Each Allele against Database at defined resolution Resolution <- c("Full",3,2,1) ; r = 1 repeat{ Res <- Resolution[r] #cat(r,":",Res,"\n") if( Res=="Full" ) { y.locus.sub <- y.locus ; x.locus.sub <- x.locus } else { y.locus.sub <- sapply(y.locus,GetField,Res=Res) x.locus.sub <- sapply(x.locus,GetField,Res=Res) } A.check <- y.locus.sub %in% x.locus.sub if( sum(A.check)==length(y.locus.sub) ) { A.Flag <- FALSE ; break } else { r <- r + 1 ; A.Flag <- TRUE } if( r > length(Resolution) ) { break } } if(A.Flag) { Alleles <- y.locus[!A.check] ; Alleles <- paste(Alleles,collapse=",") } Output[[i]] <- list( Flag = ifelse(A.Flag,TRUE,FALSE), Alleles = ifelse(A.Flag,Alleles,"") ) } Flags <- unlist(lapply(Output,"[","Flag")) ; Alleles <-lapply(Output,"[","Alleles") if( sum(Flags)>0 ) { getFlags <- which(Flags==TRUE) Alleles.Flagged <- sapply(getFlags,FUN= function(z) paste(Loci[z], unlist(Alleles[[z]]) , sep="*" ) ) Out <- list( Flag = TRUE, Alleles = Alleles.Flagged ) } else { Out <- list( Flag=FALSE , Alleles="" ) } return(Out) } #' Data Summary Function #' #' Summary function for sample population within data file. #' @param Tab Loci available in exon protein list alignment object. #' @param All.ColNames Column names from genotype data. #' @param rescall HLA resolution set for analysis. #' @param HLA HLA BIGDAWG argument passed to function #' @param Verbose Summary display carryover from BIGDAWG function. #' @param Output Data output carryover form BIGDAWG function #' @note This function is for internal BIGDAWG use only. PreCheck <- function(Tab,All.ColNames,rescall,HLA,Verbose,Output) { Grp0 <- which(Tab[,2]==0) Grp1 <- which(Tab[,2]==1) nGrp0 <- length(Tab[Grp0,2]) nGrp1 <- length(Tab[Grp1,2]) if(min(nGrp0,nGrp1)==0) { Err.Log(Output,"Case.Con") stop("Analysis Stopped.",call. = F) } Loci <- as.list(unique(All.ColNames[3:length(All.ColNames)])) nLoci <- length(Loci) GTYPE <- Tab[,3:ncol(Tab)] colnames(GTYPE) <- All.ColNames[3:length(All.ColNames)] nGTYPE <- unlist(lapply(Loci,function(x) length(unique(unlist(GTYPE[,which(colnames(GTYPE)==x)]))))) Grp0un <- unlist(lapply(Loci,function(x) length(unique(unlist(GTYPE[Grp0,which(colnames(GTYPE)==x)]))))) Grp1un <- unlist(lapply(Loci,function(x) length(unique(unlist(GTYPE[Grp1,which(colnames(GTYPE)==x)]))))) nMissing <- unlist(lapply(Loci,function(x) sum(is.na(GTYPE[,which(colnames(GTYPE)==x)])))) Grp0miss <- unlist(lapply(Loci,function(x) sum(is.na(GTYPE[Grp0,which(colnames(GTYPE)==x)])))) Grp1miss <- unlist(lapply(Loci,function(x) sum(is.na(GTYPE[Grp1,which(colnames(GTYPE)==x)])))) if(Verbose) { cat(" Sample Summary\n") cat(" Sample Size (n):",nrow(Tab),"\n") cat(" ...Number of Controls/Cases:",paste(paste(nGrp0,nGrp1,sep="/"),collapse=", "),"\n") cat(" Allele Count (2n):",nrow(Tab)*2,"\n") cat(" Total loci in file:",nLoci,"\n") cat(" Unique loci:",paste(Loci,collapse=", "),"\n") cat(" Unique alleles per locus:",paste(nGTYPE,collapse=", "),"\n") cat(" ...Unique in Controls/Cases:",paste(paste(Grp0un,Grp1un,sep="/"),collapse=", "),"\n") cat(" Missing alleles per locus:",paste(nMissing,collapse=", "),"\n") cat(" ...Missing in Controls/Cases:",paste(paste(Grp0miss,Grp1miss,sep="/"),collapse=", "),"\n") cat("\n") } if(HLA) { Grp0res <- max(unlist(lapply(Loci,function(x) max(unlist(lapply(strsplit(unlist(GTYPE[Grp0,which(colnames(GTYPE)==x)]),split=":"),length)))))) Grp1res <- max(unlist(lapply(Loci,function(x) max(unlist(lapply(strsplit(unlist(GTYPE[Grp1,which(colnames(GTYPE)==x)]),split=":"),length)))))) if(max(Grp0res,Grp1res)>4) { Err.Log(Output,"High.Res") stop("Analysis Stopped.",call. = F) } if(Verbose){ cat(" Observed Allele Resolution\n") cat(" Max Resolution Controls:",paste(Grp0res,"-Field",sep=""),"\n") cat(" Max Resolution Cases:",paste(Grp1res,"-Field",sep=""),"\n") cat(" Defined Resolution:",rescall,"\n") if(Grp0res!=Grp1res){ cat(" ***** Warning. \n") } if(Grp0res!=Grp1res){ cat(" ***** There may exist a Case-Control field resolution imbalance.\n") } if(Grp0res!=Grp1res){ cat(" ***** Considering trimming to",paste(min(Grp0res,Grp1res),"-Field resolution.",sep=""),"\n") } cat("\n") } } if(HLA) { Out <- list(Sample.Size=nrow(Tab), No.Controls=nGrp0, No.Cases=nGrp1, Allele.Count=nrow(Tab)*2, Total.Loci=nLoci, Loci=paste(Loci,collapse=", "), AllelePerLocus=paste(nGTYPE,collapse=", "), MissingPerLocus=paste(nMissing,collapse=", "), MaxResGrp0=paste(Grp0res,"-Field",sep=""), MaxResGrp1=paste(Grp1res,"-Field",sep=""), Suggested.Res=paste(min(Grp0res,Grp1res),"-Field",sep=""), SetRes=rescall) } else { Out <- list(Sample.Size=nrow(Tab), No.Controls=nGrp0, No.Cases=nGrp1, Allele.Count=nrow(Tab)*2, Total.Loci=nLoci, Loci=paste(Loci,collapse=", "), AllelePerLocus=paste(nGTYPE,collapse=", "), MissingPerLocus=paste(nMissing,collapse=", "), SetRes=rescall) } return(do.call(rbind,Out)) } #' Check Data Structure #' #' Check data structure for successful conversion. #' @param Data String Type of output. #' @param System Character Genetic system HLA or KIR #' @param Convert String Direction for conversion. #' @note This function is for internal use only. Check.Data <- function (Data,System,Convert) { if(Convert=="Tab2GL") { # Check for column formatting consistency if( ncol(Data) < 3 ) { Err.Log(FALSE,"Table.Col") ; stop("Analysis Stopped.",call.=F) } # Check for GL string field delimiters Presence if ( sum(grepl("\\+",Data[,ncol(Data)])) > 0 || sum(grepl("\\^",Data[,ncol(Data)])) > 0 || sum(grepl("\\|",Data[,ncol(Data)])) > 0 ) { Err.Log(FALSE,"Tab.Format") ; stop("Analysis Stopped.",call.=F) } # Check for repeating column names colnames(Data) <- sapply(colnames(Data),FUN=gsub,pattern="\\.1|\\.2|\\_1|\\_2",replacement="") DataCol <- which(table(colnames(Data))==2) if( length(DataCol)==0 ) { Err.Log(FALSE,"Table.Pairs") ; stop("Analysis Stopped.",call.=F) } } if(Convert=="GL2Tab") { LastCol <- ncol(Data) #Check for System Name in GL String test <- na.omit(Data[,LastCol]) test <- test[-which(sapply(test,nchar)==0)] if(length(grep(System,test))!=length(test)) { Err.Log(FALSE,"GL.Format") ; stop("Analysis Stopped.",call.=F) } # Check for GL string field delimiters Absence if ( sum(grepl("\\+",Data[,LastCol])) == 0 && sum(grepl("\\^",Data[,LastCol])) == 0 && sum(grepl("\\|",Data[,LastCol])) == 0 ) { Err.Log(FALSE,"GL.Format") ; stop("Analysis Stopped.",call.=F) } # Check for ambiguous data at genotype "|" if( sum(grepl("\\|",Data[,LastCol]))>0 ) { Check.Rows <- paste(grep("\\|",Data[,LastCol]),collapse=",") Err.Log(FALSE,"GTYPE.Amb",Check.Rows) ; stop("Analysis Stopped.",call.=F) } } } #' GL String Locus Check #' #' Check GL string for loci appearing in multiple gene fields. #' @param x GL String to check against #' @param Loci Loci to check #' @note This function is for internal use only. CheckString.Locus <- function(x,Loci) { Calls <- sapply(x,FUN=function(x) strsplit(x,"\\+")) Calls.loci <- lapply(Calls,FUN=function(x) unlist(lapply(strsplit(x,"\\*"),"[",1))) Calls.loci.1 <- unlist(lapply(Calls.loci,"[",1)) Calls.loci.1 <- colSums(sapply(Loci, FUN = function(z) Calls.loci.1 %in% z)) Calls.loci.2 <- as.character(unlist(lapply(Calls.loci,"[",2))) Calls.loci.2 <- colSums(sapply(Loci, FUN = function(z) Calls.loci.2 %in% z)) test.CS <- colSums(rbind(Calls.loci.1,Calls.loci.2)) if( max(test.CS)>2 ) { Loci.Err <- paste(Loci[which(test.CS>2)],collapse=",") GLS <- paste(x,collapse="^") Err.Log(FALSE,"Locus.MultiField",GLS,Loci.Err) stop("Analysis Stopped.",call.=FALSE) } return("ok") } #' GL String Allele Check #' #' GL String check for allele ambiguity formatting #' @param x GL String to check against #' @note This function is for internal use only. CheckString.Allele <- function(x) { x <- as.character(x) if(grepl("/",x)) { tmp <- strsplit(unlist(strsplit(x,"/")),"\\*") tmp.len <- length(unique(lapply(tmp,length))) if( tmp.len > 1 ) { Err.Log(FALSE,"Allele.Amb.Format",x) stop("Analysis Stopped.",call.=FALSE) } } return("ok") } #' Function to Check Release Versions #' #' This updates the protein aligment used in checking HLA loci and alleles as well as in the amino acid analysis. #' @param Package Logical to check for BIGDAWG package versions #' @param Alignment Logical to check the IMGT/HLA database version for the alignment bundled with BIGDAWG. #' @param Output Should any error be written to a file #' @note Requires active internet connection. CheckRelease <- function(Package=T,Alignment=T,Output=F) { if( !inherits(try(XML::readHTMLTable("http://cran.r-project.org/web/packages/BIGDAWG/index.html",header=F),silent=T),"try-error") ) { if(Package) { CranR <- as.character(XML::readHTMLTable("http://cran.r-project.org/web/packages/BIGDAWG/index.html",header=F)[[1]][1,2]) GitHubR <- read.table("https://raw.githubusercontent.com/IgDAWG/BIGDAWG/master/DESCRIPTION",sep="\t",stringsAsFactors=F,nrows=4) GitHubR <- unlist(strsplit(GitHubR[4,],split=" "))[2] CurrR <- as.character(packageVersion('BIGDAWG') ) } if(Alignment) { # Get IMGT Release Version # release_version not updated consistently #download.file("ftp://ftp.ebi.ac.uk/pub/databases/ipd/imgt/hla/release_version.txt",destfile="release_version.txt",method="libcurl") #Release <- read.table("release_version.txt",comment.char="",sep="\t") #Release <- apply(Release,MARGIN=1,FUN= function(x) gsub(": ",":",x)) #RV.current <- unlist(strsplit(Release[3],split=":"))[2] URL=file("ftp://ftp.ebi.ac.uk/pub/databases/ipd/imgt/hla/Allele_status.txt",method=getOption("url.method", "libcurl")) df <- read.table(URL,sep="\t",nrows=3,comment.char="") df.v <- unlist(strsplit(df[3,],split=" ")) RV.current <- paste(df.v[3:4],collapse=" ") # Get BIGDAWG UPL <- paste(path.package('BIGDAWG'),"/data/UpdatePtnAlign.RData",sep="") UpdatePtnList <- NULL ; rm(UpdatePtnList) if( file.exists(UPL) ) { load(UPL) EPL <- UpdatePtnList rm(UpdatePtnList,UPL) UPL.flag=T } else { EPL <- ExonPtnList UPL.flag=F } RV.BIGDAWG <- EPL$Release.Version } cat("\n") if(Package) { cat("BIGDAWG Package Versions:\n","Installed Version: ",CurrR,"\n CRAN Release Version: ",CranR,"\n Developmental version: ",GitHubR,"\n") } if(Package & Alignment) { cat("\n") } if(Alignment) { if(UPL.flag) { cat("IMGT/HLA Versions:\n","IMGT/HLA Version: ",RV.current,"\n BIGDAWG version (from update): ",RV.BIGDAWG,"\n") } else { cat("IMGT/HLA Versions:\n","IMGT/HLA Version: ",RV.current,"\n BIGDAWG version: ",RV.BIGDAWG,"\n") } } cat("\n") } else { Err.Log(Output,"No.Internet") stop("Analysis stopped.",call.=F) } }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/check_functions.R
#' Example HLA Dataset #' #' A synthetic dataset of HLA genotypes for using bigdawg. #' #' @format A data frame with 2000 rows and 14 variables "HLA_data" #' Exon protein alignments. #' #' Alignment object for use in the amino acid analysis. #' #' @format A list where each element is an alignment dataframe for a single locus. "ExonPtnList"
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/data.R
#' Prepare imported data #' #' Prepare imported data for processing, checks, and analysis. #' @param Tab Genotypes dataframe. #' @note This function is for internal BIGDAWG use only. prepData <- function(Tab) { Tab[] <- lapply(Tab, as.character) colnames(Tab) <- gsub( "HLA-","",colnames(Tab) ) colnames(Tab) <- gsub( "\\.1|\\.2|\\_1|\\_2","",colnames(Tab) ) colnames(Tab) <- toupper(colnames(Tab)) colnames(Tab) <- gsub( "DRB3.4.5|DRB3/4/5","DRB345",colnames(Tab) ) rownames(Tab) <- NULL Tab <- rmABstrings(Tab) return(Tab) } #' Replace absent allele strings #' #' Replaces allowable absent allele strings with ^ symbol. #' @param df Genotypes dataframe. #' @note This function is for internal BIGDAWG use only. rmABstrings <- function(df) { df[] <- apply(df, MARGIN=c(1,2), FUN=function(x) gsub("ABSENT|Absent|absent|Abs|ABS|ab|Ab|AB","^",x) ) df[df=="00"] <- "^" df[df=="00:00"] <- "^" df[df=="00:00:00"] <- "^" df[df=="00:00:00:00"] <- "^" return(df) } #' Replace or Fill 00:00 allele strings #' #' Replaces or Fills absent allele strings. #' @param x Genotype #' @param Locus Locus column to adjust. #' @param Type String specifying whether to pad ('Fill') or leave blank ('Remove') absent calls #' @note This function is for internal use only. Filler <- function(x,Locus=NULL,Type) { if (Type=="Fill") { which(x=="") Locus <- gsub("_1|_2","",Locus) x[which(x=="")] <- paste(Locus,"*00:00",sep="") } if (Type=="Remove") { x[] <- sapply(x, FUN=function(x) gsub("ABSENT|Absent|absent|Abs|ABS|ab|Ab|AB","",x) ) x[grep("\\*00",x)] <- "" } if (Type=="Sub") { x[] <- sapply(x, FUN=function(x) gsub("ABSENT|Absent|absent|Abs|ABS|ab|Ab|AB","^",x) ) x[grep("\\*00",x)] <- "^" } return(x) } #' Removes System and Locus from Alleles #' #' Removes the System and Locus designations for alleles calls in GL2Tab #' @param x Allele #' @note This function is for internal use only. Stripper <- function(x) { if( grepl("\\*",x) ) { if( is.na(x) ) { Fix <- x } else if ( x!="" ) { Fix <- unlist(strsplit(x,"\\*"))[2] } else { Fix <- x } } else { return (x) } return(Fix) } #' Expression Variant Suffix Removal #' #' Removes expression variant suffixes from HLA alleles in the exon protein alignment object. #' @param Locus Locus to be filtered against. #' @param EPList Exon Protein Alignment Object #' @note This function is for internal BIGDAWG use only. EVSremoval <- function(Locus,EPList) { if(Locus=='Release') { tmp <- EPList[[Locus]] return(tmp) } else if(Locus=='RefExons') { tmp <- EPList[[Locus]] return(tmp) } else { tmp <- EPList[[Locus]] tmp[,'Trimmed'] <- sapply(tmp[,'Trimmed'],gsub,pattern="[[:alpha:]]",replacement="") return(tmp) } } #' HLA trimming function #' #' Trim a properly formatted HLA allele to desired number of fields. #' @param x HLA allele. #' @param Res Resolution desired. #' @note This function is for internal BIGDAWG use only. GetField <- function(x,Res) { Tmp <- unlist(strsplit(as.character(x),":")) if (length(Tmp)<2) { return(x) } else if (Res==1) { return(Tmp[1]) } else if (Res > 1) { Out <- paste(Tmp[1:Res],collapse=":") return(Out) } } #' Haplotype missing Allele summary function #' #' Summary function for identifying missing alleles in a matrix of genotypes. #' @param geno Matrix of genotypes. #' @param miss.val Vector of codes for allele missing values. #' @note This function is for internal BIGDAWG use only and is ported from haplo.stats. summaryGeno.2 <- function (geno, miss.val = 0) { # Ported from R package haplo.stats v 1.7.7 # Authors: Sinnwell JP, Schaid DJ # URL: https://cran.r-project.org/web/packages/haplo.stats/index.html n.loci <- ncol(geno)/2 nr <- nrow(geno) geno <- haplo.stats::setupGeno(geno, miss.val) loc0 <- numeric(nr) loc1 <- numeric(nr) loc2 <- numeric(nr) for (i in 1:nr) { first.indx <- seq(1, (2 * n.loci - 1), by = 2) miss.one <- is.na(geno[i, first.indx]) | is.na(geno[i, first.indx + 1]) miss.two <- is.na(geno[i, first.indx]) & is.na(geno[i, first.indx + 1]) loc2[i] <- sum(miss.two) loc1[i] <- sum(miss.one - miss.two) loc0[i] <- sum(!miss.one) } tbl <- data.frame(missing0 = loc0, missing1 = loc1, missing2 = loc2) return(tbl) } #' Data Object Merge and Output #' #' Whole data set table construction of per haplotype for odds ratio, confidence intervals, and pvalues #' @param BD.out Output of analysis as list. #' @param Run Tests that are to be run as defined by Run.Tests. #' @param OutDir Output directory defined by Results.Dir or default. #' @note This function is for internal BIGDAWG use only. MergeData_Output <- function(BD.out,Run,OutDir) { FM.out <- data.frame(Analysis=character(), Locus=character(), Allele=character(), Group.0=numeric(), Group.1=numeric()) CN.out <- data.frame(Analysis=character(), Locus=character(), Allele=character(), Group.0=numeric(), Group.1=numeric()) OR.out <- data.frame(Analysis=character(), Locus=character(), Allele=character(), OR=numeric(), CI.Lower=numeric(), CI.Upper=numeric(), p.value=numeric(), sig=character()) CS.out <- data.frame(Analysis=character(), Locus=character(), x.square=numeric(), df=numeric(), p.value=numeric(), sig=character()) for(i in Run) { switch(i, H= { TestName <- "Haplotype" }, L= { TestName <- "Locus" }, A= { TestName <- "AminoAcid" } ) Test <- BD.out[[i]] for(k in 1:length(Test)) { Test.sub <- Test[[k]] #Frequencies tmp <- Test.sub$freq if(i=="A") { Allele <- paste(tmp[,'Position'],tmp[,'Residue'],sep="::") } switch(i, H = { tmp <- cbind(rep(TestName,nrow(tmp)),rep(colnames(tmp)[1],nrow(tmp)),tmp) }, L = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp) }, A = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp[,'Locus'],Allele,tmp[,c('Group.0','Group.1')]) }) colnames(tmp) <- c("Analysis","Locus","Allele","Group.0","Group.1") FM.out <- rbind(tmp,FM.out) ; rm(tmp) #Counts tmp <- Test.sub$table if(i=="A") { Allele <- paste(tmp[,'Position'],tmp[,'Residue'],sep="::") } switch(i, H = { tmp <- cbind(rep(TestName,nrow(tmp)),rep(colnames(tmp)[1],nrow(tmp)),tmp) }, L = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp) }, A = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp[,'Locus'],Allele,tmp[,c('Group.0','Group.1')]) }) colnames(tmp) <- c("Analysis","Locus","Allele","Group.0","Group.1") CN.out <- rbind(tmp,CN.out) ; rm(tmp) #Odds Ratios tmp <- Test.sub$OR if(i=="A") { Allele <- paste(tmp[,'Position'],tmp[,'Residue'],sep="::") } switch(i, H = { tmp <- cbind(rep(TestName,nrow(tmp)),rep(colnames(tmp)[1],nrow(tmp)),tmp) }, L = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp) }, A = { tmp <- cbind(rep(TestName,nrow(tmp)),tmp[,'Locus'],Allele,tmp[,c("OR","CI.lower","CI.upper","p.value","sig")]) }) colnames(tmp) <- c("Analysis","Locus","Allele","OR","CI.Lower","CI.Upper","p.value","sig") OR.out <- rbind(tmp,OR.out) ; rm(tmp) #ChiSq tmp <- Test.sub$chisq if(i=="A") { Locus <- paste(tmp[,'Locus'],tmp[,'Position'],sep="::") } switch(i, H = { tmp <- cbind(rep(TestName,nrow(tmp)), rep(names(Test)[k],nrow(tmp)), tmp) }, L = { tmp <- cbind(rep(TestName,nrow(tmp)), tmp) }, A = { tmp <- cbind(rep(TestName,nrow(tmp)), Locus, tmp[,c('X.square','df','p.value','sig')] ) } ) colnames(tmp)[1:2] <- c("Analysis","Locus") rownames(tmp) <- NULL CS.out <- rbind(tmp,CS.out); rm(tmp) }; rm(k) }; rm(i) setwd(OutDir) # Remove redundant entries # Especially relevant to multi-set runs FM.out <- unique(FM.out) CN.out <- unique(CN.out) CS.out <- unique(CS.out) OR.out <- apply(OR.out,MARGIN=c(1,2),as.character) OR.out <- unique(OR.out) write.table(FM.out,file="Merged_Frequencies.txt",sep="\t",col.names=T,row.names=F,quote=F) write.table(CN.out,file="Merged_Counts.txt",sep="\t",col.names=T,row.names=F,quote=F) write.table(CS.out,file="Merged_ChiSq.txt",sep="\t",col.names=T,row.names=F,quote=F) write.table(OR.out,file="Merged_OddsRatio.txt",sep="\t",col.names=T,row.names=F,quote=F) } #' File Name Extraction #' #' Function to extract file path. #' @param x File name. #' @note This function is for internal use only. getFileName <- function(x) { tmpDir <- dirname(x) tmpName <- basename(x) if(basename(x)==x) { outName <- paste("Converted_",x,sep="") } else { outName <- paste(tmpDir,"/Converted_",tmpName,sep="") } outName <- gsub(".txt","",outName) return(outName) } #' Build Output Matrix for GL2Tab Conversion #' #' Initializes output matrix format for GL2Tab conversion #' @param System Character Genetic system HLA- or KIR #' @param Loci The loci for header names #' @note This function is for internal use only. Build.Matrix <- function(System,Loci) { Loci.Grp <- rep(Loci,each=2) if(System=="HLA-") { Out <- mat.or.vec(nr=1,nc=length(Loci.Grp)+1) ; colnames(Out) <- c(Loci.Grp,"DR.HapFlag") } else { Out <- mat.or.vec(nr=1,nc=length(Loci.Grp)) ; colnames(Out) <- Loci.Grp } colnames(Out)[seq(1,length(Loci.Grp),by=2)] <- paste(Loci,"_1",sep="") colnames(Out)[seq(2,length(Loci.Grp),by=2)] <- paste(Loci,"_2",sep="") return(Out) } #' Tabular Data Locus Format Tool #' #' Correctly orders the expanded GL string #' @param x Single row of converted GL string #' @param Order Single row data frame for mapping converted GL strings #' @note This function is for internal use only. Format.Tab <- function(x,Order) { Order[,match(colnames(x),colnames(Order))] <- x return(Order) } #' Ambiguous Alleles Locus Name Formatting #' #' Remove or Append Locus name from/to allele in an ambiguous allele string #' @param x Allele String #' @param Type String specifying whether to strip ('off') or append ('on') locus prefix #' @note This function is for internal use only. Format.Allele <- function(x,Type) { if(Type=="off") { if(grepl("/",x)) { tmp <- strsplit(unlist(strsplit(x,"/")),"\\*") Fix <- paste(unlist(lapply(tmp,"[",1)[1]), paste(unlist(lapply(tmp,"[",2)),collapse="/"), sep="*") } else { Fix <- x } } if(Type=="on"){ if(grepl("/",x)) { Locus <- unlist(strsplit(x,"\\*"))[1] Fix <- paste( paste( Locus,unlist(strsplit(unlist(strsplit(x,"\\*"))[2],"/")) ,sep="*") ,collapse="/") } else { Fix <- x } } return(Fix) } #' Append Genetic System Locus Designation to Allele String #' #' Adds genetic system (HLA/KIR) to each allele name #' @param x Vector Column genotypes to append #' @param df.name String SystemLocus name for each allele. #' @note This function is for internal use only. Append.System <- function(x,df.name) { getAllele <- which(x!="") x[getAllele] <- paste(df.name,x[getAllele],sep="*") return(x) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/general_functions.R
#' Case-Control Odds ratio calculation and graphing #' #' cci function port epicalc version 2.15.1.0 (Virasakdi Chongsuvivatwong, 2012) #' @param caseexp Number of cases exposed #' @param controlex Number of controls exposed #' @param casenonex Number of cases not exosed #' @param controlnonex Number of controls not exposed #' @param cctable A 2-by-2 table. If specified, will supercede the outcome and exposure variables #' @param graph If TRUE (default), produces an odds ratio plot #' @param design Specification for graph; can be "case control","case-control", "cohort" or "prospective" #' @param main main title of the graph #' @param xlab label on X axis #' @param ylab label on Y axis #' @param xaxis two categories of exposure in graph #' @param yaxis two categories of outcome in graph #' @param alpha level of significance #' @param fisher.or whether odds ratio should be computed by the exact method #' @param exact.ci.or whether confidence limite of the odds ratio should be computed by the exact method #' @param decimal number of decimal places displayed #' @note This function is for internal BIGDAWG use only. cci <- function (caseexp, controlex, casenonex, controlnonex, cctable = NULL, graph = TRUE, design = "cohort", main, xlab, ylab, xaxis, yaxis, alpha = 0.05, fisher.or = FALSE, exact.ci.or = TRUE, decimal = 2) { if (is.null(cctable)) { frame <- cbind(Outcome <- c(1, 0, 1, 0), Exposure <- c(1, 1, 0, 0), Freq <- c(caseexp, controlex, casenonex, controlnonex)) Exposure <- factor(Exposure) expgrouplab <- c("Non-exposed", "Exposed") levels(Exposure) <- expgrouplab Outcome <- factor(Outcome) outcomelab <- c("Negative", "Positive") levels(Outcome) <- outcomelab table1 <- xtabs(Freq ~ Outcome + Exposure, data = frame) } else { table1 <- as.table(get("cctable")) } fisher <- fisher.test(table1) caseexp <- table1[2, 2] controlex <- table1[1, 2] casenonex <- table1[2, 1] controlnonex <- table1[1, 1] se.ln.or <- sqrt(1/caseexp + 1/controlex + 1/casenonex + 1/controlnonex) if (!fisher.or) { or <- caseexp/controlex/casenonex * controlnonex p.value <- chisq.test(table1, correct = FALSE)$p.value } else { or <- fisher$estimate p.value <- fisher$p.value } if (exact.ci.or) { ci.or <- as.numeric(fisher$conf.int) } else { ci.or <- or * exp(c(-1, 1) * qnorm(1 - alpha/2) * se.ln.or) } if (graph == TRUE) { caseexp <- table1[2, 2] controlex <- table1[1, 2] casenonex <- table1[2, 1] controlnonex <- table1[1, 1] if (!any(c(caseexp, controlex, casenonex, controlnonex) < 5)) { if (design == "prospective" || design == "cohort" || design == "cross-sectional") { if (missing(main)) main <- "Odds ratio from prospective/X-sectional study" if (missing(xlab)) xlab <- "" if (missing(ylab)) ylab <- paste("Odds of being", ifelse(missing(yaxis), "a case", yaxis[2])) if (missing(xaxis)) xaxis <- c("non-exposed", "exposed") axis(1, at = c(0, 1), labels = xaxis) } else { if (missing(main)) main <- "Odds ratio from case control study" if (missing(ylab)) ylab <- "Outcome category" if (missing(xlab)) xlab <- "" if (missing(yaxis)) yaxis <- c("Control", "Case") axis(2, at = c(0, 1), labels = yaxis, las = 2) mtext(paste("Odds of ", ifelse(xlab == "", "being exposed", paste("exposure being", xaxis[2]))), side = 1, line = ifelse(xlab == "", 2.5, 1.8)) } title(main = main, xlab = xlab, ylab = ylab) } } if (!fisher.or) { results <- list(or.method = "Asymptotic", or = or, se.ln.or = se.ln.or, alpha = alpha, exact.ci.or = exact.ci.or, ci.or = ci.or, table = table1, decimal = decimal) } else { results <- list(or.method = "Fisher's", or = or, alpha = alpha, exact.ci.or = exact.ci.or, ci.or = ci.or, table = table1, decimal = decimal) } class(results) <- c("cci", "cc") return(results) } #' Creation of a 2x2 table using the indicated orientation. #' #' make2x2 function port epicalc version 2.15.1.0 (Virasakdi Chongsuvivatwong, 2012) #' @param caseexp Number of cases exposed #' @param controlex Number of controls exposed #' @param casenonex Number of cases not exosed #' @param controlnonex Number of controls not exposed #' @note This function is for internal BIGDAWG use only. make2x2 <- function (caseexp, controlex, casenonex, controlnonex) { table1 <- c(controlnonex, casenonex, controlex, caseexp) dim(table1) <- c(2, 2) rownames(table1) <- c("Non-diseased", "Diseased") colnames(table1) <- c("Non-exposed", "Exposed") attr(attr(table1, "dimnames"), "names") <- c("Outcome", "Exposure") table1 } #' Table Maker #' #' Table construction of per haplotype for odds ratio, confidence intervals, and pvalues #' @param x Contingency table with binned rare cells. #' @note This function is for internal BIGDAWG use only. TableMaker <- function(x) { grp1_sum <- sum(x[,'Group.1']) grp0_sum <- sum(x[,'Group.0']) grp1_exp <- x[,'Group.1'] grp0_exp <- x[,'Group.0'] grp1_nexp <- grp1_sum - grp1_exp grp0_nexp <- grp0_sum - grp0_exp cclist <- cbind(grp1_exp, grp0_exp, grp1_nexp, grp0_nexp) tmp <- as.data.frame(t(cclist)) names(tmp) <- row.names(x) return(tmp) } #' Case Control Odds Ratio Calculation from Epicalc #' #' Calculates odds ratio and pvalues from 2x2 table #' @param x List of 2x2 matrices for calculation, output of TableMaker. #' @note This function is for internal BIGDAWG use only. cci.pval <- function(x) { tmp <- list() caseEx <- x[1] controlEx <- x[2] caseNonEx <- x[3] controlNonEx <- x[4] table1 <- make2x2(caseEx, controlEx, caseNonEx, controlNonEx) tmp1 <- cci(cctable=table1, design = "case-control", graph = FALSE) tmp[['OR']] <- round(tmp1$or,digits=2) tmp[['CI.L']] <- round(tmp1$ci.or[1],digits=2) tmp[['CI.U']] <- round(tmp1$ci.or[2],digits=2) tmp[['p.value']] <- format.pval(chisq.test(table1, correct=F)$p.value) tmp[['sig']] <- ifelse(chisq.test(table1, correct=F)$p.value <= 0.05,"*","NS") return(tmp) } #' Case Control Odds Ratio Calculation from Epicalc list variation #' #' Variation of the cci.pvalue function #' @param x List of 2x2 matrices to apply the cci.pvalue function. List output of TableMaker. #' @note This function is for internal BIGDAWG use only. cci.pval.list <- function(x) { tmp <- lapply(x, cci.pval) tmp <- do.call(rbind,tmp) colnames(tmp) <- c("OR","CI.lower","CI.upper","p.value","sig") return(tmp) } #' Strict Chi-squared Contingency Table Test #' #' Calculates chi-squared contingency table tests and bins all rare cells. #' @param x Contingency table. #' @note This function is for internal BIGDAWG use only. RunChiSq <- function(x) { ### get expected values for cells ExpCnts <- chisq.test(as.matrix(x))$expected ## pull out cells that don't need binning, bin remaining #unbinned OK.rows <- as.numeric(which(apply(ExpCnts,min,MARGIN=1)>=5)) if(length(OK.rows)==0) { # All rows have cells with expected less than 5. tmp.chisq <- data.frame(rbind(rep("NCalc",4))) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = NA, Binned = NA, Test = tmp.chisq, Flag = FALSE) } else { if(length(OK.rows)>=2) { unbinned <- x[OK.rows,] } else { unbinned <- do.call(cbind,as.list(x[OK.rows,])) rownames(unbinned) <- rownames(x)[OK.rows] } #binned Rare.rows <- as.numeric(which(apply(ExpCnts,min,MARGIN=1)<5)) if(length(Rare.rows)>=2) { binned <- x[Rare.rows,] New.df <- rbind(unbinned,colSums(x[Rare.rows,])) rownames(New.df)[nrow(New.df)] <- "binned" } else { binned <- cbind(NA,NA) colnames(binned) <- c("Group.0","Group.1") New.df <- x } if(nrow(New.df)>1) { # flag if final matrix fails Cochran's rule of thumb (more than 20% of exp cells are less than 5) # True = OK ; False = Not good for Chi Square ExpCnts <- chisq.test(New.df)$expected if(sum(ExpCnts<5)==0){ # all expected are greater than 5 flag <- TRUE } else if( sum(ExpCnts<5)/sum(ExpCnts>=0)<=0.2 && sum(ExpCnts>=1)==length(ExpCnts) ){ # expected counts < 5 are greater than or equal to 20% of cells # all individual counts are >= 1 flag <- TRUE } else { # else flag contingency table # invalid flag <- FALSE } ## chi square test on binned data df.chisq <- chisq.test(New.df) Sig <- if(df.chisq$p.value > 0.05) { "NS" } else { "*" } ## show results of overall chi-square analysis tmp.chisq <- data.frame(cbind(round(df.chisq$statistic,digits=4), df.chisq$parameter, format.pval(df.chisq$p.value), Sig)) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = New.df, Binned = binned, Test = tmp.chisq, Flag = flag) } else { tmp.chisq <- data.frame(rbind(rep("NCalc",4))) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = New.df, Binned = binned, Test = tmp.chisq, Flag = FALSE) } } return(chisq.out) } #' Contextual Binning Chi-squared Contingency Table Test #' #' Calculates chi-squared contingency table tests and bins rare cells at 20% capture rate. #' @param x Contingency table. #' @note This function is for internal BIGDAWG use only. RunChiSq_c <- function(x) { ### get expected values for cells ExpCnts <- chisq.test(as.matrix(x))$expected # Order Counts getOrder <- order(ExpCnts[,1],ExpCnts[,2],decreasing=T) ExpCnts <- ExpCnts[getOrder,] x.sub <- x[getOrder,] # Define Rows Safe.cells.rows <- as.numeric(which(apply(ExpCnts,min,MARGIN=1)>=5)) Rare.cells.rows <- as.numeric(which(apply(ExpCnts,min,MARGIN=1)<5)) # Define Flags Check.Rebinned <- FALSE No.Bin <- FALSE if(length(Safe.cells.rows)==0) { # All rows have cells with expected less than 5. tmp.chisq <- data.frame(rbind(rep("NCalc",4))) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = NA, Binned = NA, Test = tmp.chisq, Flag = FALSE) } else { ### pull out cells that don't need binning, bin remaining #unbinned if(length(Safe.cells.rows)>=2) { unbinned <- x.sub[Safe.cells.rows,] } else { unbinned <- do.call(cbind,as.list(x.sub[Safe.cells.rows,])) rownames(unbinned) <- rownames(x.sub)[Safe.cells.rows] } unbinned.tmp <- unbinned # Iterate through rows -- adding back rows until threshold exceeds 0.2 (20%) if( length(Rare.cells.rows)>=3 ) { threshold=0 ; i=1 repeat { # Process through adding back rows until threshold exceeds 0.2 (20%) get.putRow <- Rare.cells.rows[seq(1,i)] get.binRow <- Rare.cells.rows[seq(i+1,length(Rare.cells.rows))] if ( length(get.binRow)==1 ) { Stop = i - 1 ; break } unbinned.test <- rbind(unbinned.tmp, x.sub[get.putRow,], rbind(colSums(x.sub[get.binRow,])) ) unbinned.test.cs <- chisq.test(unbinned.test)$expected threshold <- sum(unbinned.test.cs<5)/ length(unbinned.test.cs) if( threshold<=0.2 ) { i = i + 1 } else { Stop = i - 1 ; break } }# End repeat if( Stop>0 ) { # Set up which rows to rescue and bin getRescued <- Rare.cells.rows[1:Stop] putBinned <- Rare.cells.rows[seq(i,length(Rare.cells.rows))] # binning must be more than 1 row if( length(putBinned)==1 ) { putBinned <- c(getRescued[length(getRescued)],putBinned) getResuced <- getRescued[-length(getRescued)] } # Reclaim any rescued rows to unbinned matrix if( length(getRescued) > 1 ) { unbinned <- rbind(unbinned, x.sub[getRescued,]) rownames(unbinned)[getRescued] <- rownames(x.sub)[getRescued] } else { rebin.tmp <- do.call(cbind,as.list(x.sub[getRescued,])) rownames(rebin.tmp) <- rownames(x.sub)[getRescued] unbinned <- rbind(unbinned, rebin.tmp) } # Bin remaining rows binned <- x.sub[putBinned,] rownames(binned) <- rownames(x.sub)[putBinned] Check.Rebinned <- TRUE } else { # Stop == 0 # No rows identified to rescue, bin all rare cell containing rows binned <- x.sub[Rare.cells.rows,] rownames(binned) <- rownames(x.sub)[Rare.cells.rows] } } else if ( length(Rare.cells.rows)==2 ) { # For Rare cells in only 2 rows threshold <- sum(ExpCnts<5) / length(x.sub) if( threshold > 0.2 ) { # must bin both binned <- x.sub[Rare.cells.rows,] rownames(binned) <- rownames(x.sub)[Rare.cells.rows] } else { # no binning required No.Bin <- TRUE } } else { # Rare.cells.rows == 1 # No binning possible No.Bin <- TRUE } # Playing no favorites # Check if rescued cell expected counts overlap binned expected counts if(Check.Rebinned) { # If check.rebinned = T # getRescued = rescued rows ... can be 1 row # putBinned = binned rows ... must be greater than 1 row # Rescued rows expected counts if(length(getRescued)>1) { rescue.expcnts <- apply(ExpCnts[getRescued,],MARGIN=1,paste,collapse=":") } else { rescue.expcnts <- paste(ExpCnts[getRescued,],collapse=":") } # Binned rows expected counts bin.expcnts <- apply(ExpCnts[putBinned,],MARGIN=1,paste,collapse=":") bin.expcnts.rev <- apply(ExpCnts[putBinned,c(2,1)],MARGIN=1,paste,collapse=":") rebin.hits <- unique(c(which((rescue.expcnts %in% bin.expcnts)==T), which((rescue.expcnts %in% bin.expcnts.rev)==T))) if ( length(rebin.hits)>0 ) { rebin.names <- names(rescue.expcnts[rebin.hits]) rebin.rows <- which((row.names(unbinned) %in% rebin.names)==T) binned <- rbind(binned,unbinned[rebin.rows,,drop=F]) unbinned <- unbinned[-rebin.rows,] } } # Create final matrix New.df if (No.Bin) { binned <- cbind(NA,NA) colnames(binned) <- c("Group.0","Group.1") New.df <- x.sub } else { # merge unbinned and column sums of binned New.df <- rbind(unbinned,colSums(binned)) rownames(New.df)[nrow(New.df)] <- "binned" # Reorder binned by row names binned <- binned[order(rownames(binned)),] } # Reorder New.df by row names putOrder <- order(row.names(New.df)) New.df <- New.df[putOrder,] if(nrow(New.df)>1) { ExpCnts <- chisq.test(New.df)$expected # flag if final matrix fails Cochran's rule of thumb (more than 20% of exp cells are less than 5) # True = OK ; False = Not good for Chi Square if(sum(ExpCnts<5)==0){ # all expected are greater than 5 flag <- TRUE } else if( sum(ExpCnts<5)/sum(ExpCnts>=0)<=0.2 && sum(ExpCnts>=1)==length(ExpCnts) ){ # expected counts < 5 are greater than or equal to 20% # all individual counts are >= 1 flag <- TRUE } else { # else flag contingency table flag <- FALSE } ## chi square test on binned data df.chisq <- chisq.test(New.df) Sig <- if(df.chisq$p.value > 0.05) { "NS" } else { "*" } ## show results of overall chi-square analysis tmp.chisq <- data.frame(cbind(round(df.chisq$statistic,digits=4), df.chisq$parameter, format.pval(df.chisq$p.value), Sig)) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = New.df, Binned = binned, Test = tmp.chisq, Flag = flag) } else { flag <- FALSE tmp.chisq <- data.frame(rbind(rep("NCalc",4))) colnames(tmp.chisq) <- c("X.square", "df", "p.value", "sig") chisq.out <- list(Matrix = New.df, Binned = binned, Test = tmp.chisq, Flag = FALSE) } } return(chisq.out) }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/stat_functions.R
#' Update function for protein aligment upon new IMGT HLA data release #' #' This updates the protein aligment used in checking HLA loci and alleles as well as in the amino acid analysis. #' @param Restore Logical specifying if the original alignment file be restored. #' @param Force Logical specifiying if update should be forced. #' @param Output Logical indicating if error reporting should be written to file. UpdateRelease <- function(Force=F,Restore=F,Output=F) { if( !inherits(try(XML::readHTMLTable("http://cran.r-project.org/web/packages/BIGDAWG/index.html",header=F),silent=T),"try-error") ) { MainDir <- getwd() on.exit(setwd(MainDir), add = TRUE) getDir <- path.package('BIGDAWG') putDir <- paste(getDir,"/data",sep="") if(!dir.exists(putDir)) { dir.create(putDir) } if(!Restore) { #Check current version against BIGDAWG version if(!Force) { setwd(putDir) # Get IMGT Release Version invisible(download.file("ftp://ftp.ebi.ac.uk/pub/databases/ipd/imgt/hla/release_version.txt",destfile="release_version.txt",method="libcurl")) Release <- read.table("release_version.txt",comment.char="",sep="\t") Release <- apply(Release,MARGIN=1,FUN=function(x) gsub(": ",":",x)) RV.current <- unlist(strsplit(Release[3],split=":"))[2] file.remove("release_version.txt") # Get BIGDAWG UPL <- paste(path.package('BIGDAWG'),"/data/UpdatePtnAlign.RData",sep="") UpdatePtnList <- NULL ; rm(UpdatePtnList) if( file.exists(UPL) ) { load(UPL) EPL <- UpdatePtnList rm(UpdatePtnList,UPL) UPL.flag=T } else { EPL <- ExonPtnList UPL.flag=F } RV.BIGDAWG <- EPL$Release.Version cat("Versions:\n","IMGT/HLA current: ",RV.current,"\n BIGDAWG version: ",RV.BIGDAWG,"\n") if(grepl(RV.current,RV.BIGDAWG)) { Flag <- T } else { Flag <- F } } else { Flag <- F }# End if() for setting Flag #Run Update if Flag = T if(Flag) { cat("\nYour database seems up to date. Use Force = T to force the update.") } else { # For creating UpdatePtnAlign.RData object # Define download directory setwd(putDir) Safe <- dir() Safe <- c(Safe[!grepl(".txt",Safe)],"UpdatePtnAlign.RData") #STEP 1: Define Loci and Read in Reference Exon Map Files Loci <- c("A","B","C","DPA1","DPB1","DQA1","DQB1","DRB1","DRB3","DRB4","DRB5") #Currently DRB1, DRB3, DRB4, DRB5 aligments in single file #Remove if split into individual files Loci.get <- c("A","B","C","DPA1","DPB1","DQA1","DQB1","DRB") #Exon Info RefTab <- BIGDAWG::ExonPtnList$RefExons #STEP 2: Download protein alignments and other ancillary files cat("Updating reference object for the amino acid analysis.\n") cat("Downloading alignment files from the IMGT/HLA.\n") GetFiles(Loci.get) Release <- read.table('Release.txt',sep="\t") # created during GetFiles download #STEP 3: Format alignments for exons of interest cat("Formatting alignment files.\n") for(i in 1:length(Loci)) { Locus <- Loci[i] ; ExonPtnAlign.Create(Locus,RefTab) } #STEP 4: Create ExonPtnAlign list object for BIGDAWG package AlignObj.Update(Loci,Release,RefTab) #STEP 5: Clean up cat("Cleaning up.\n") invisible(file.remove(dir()[which(dir() %in% Safe!=T)])) cat("Updated.\n") } } else if (Restore) { setwd(putDir) if(!file.exists('UpdatePtnAlign.RData')) { stop("No prior update to restore.", call.= F) } cat("Restoring original alignment reference object for amino acid analysis.\n") invisible(file.remove('UpdatePtnAlign.RData')) cat("Restored.\n") } } else { Err.Log(Output,"No.Internet") stop("Update stopped.",call.=F) } }
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/R/update_wrapper.R
--- title: "BIGDAWG" author: "Derek Pappas, Ph.D. ([email protected])" date: "2021-10-23" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BIGDAWG} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Overview 'Bridging ImmunoGenomic Data-Analysis Workflow Gaps' ('BIGDAWG') is an integrated analysis system that automates the manual data-manipulation and trafficking steps (the gaps in an analysis workflow) normally required for analyses of highly polymorphic genetic systems (e.g., the immunological human leukocyte antigen (HLA) and killer-cell Immunoglobulin-like receptor (KIR) genes) and their respective genomic data (immunogenomic) (Pappas DJ, Marin W, Hollenbach JA, Mack SJ. 2016. 'Bridging ImmunoGenomic Data Analysis Workflow Gaps (BIGDAWG): An integrated case-control analysis pipeline.' [Human Immunology. 77:283-287](https://pubmed.ncbi.nlm.nih.gov/26708359/)). Starting with unambiguous genotype data for case-control groups, 'BIGDAWG' performs tests of Hardy-Weinberg equilibrium, and carries out case-control association analyses for haplotypes, individual loci, specific HLA exons, and HLA amino acid positions. ## Input Data Data for BIGDAWG should be in a tab delimited text format (UTF-8 encoding). The first row must be a header line and must include column names for genotype data. The first two columns must contain subject IDs and phenotypes (0 = control, 1 = case), respectively. A phenotype is not limited to disease status and may include other phenotypes such as onset, severity, ancestry, etc. However, phenotype designatons in the dataset are restricted to the use of 0s and 1s. Genotype pairs must be located in adjacent columns. Column names for a given locus may use '_1', '.1','_2','.2' to distinguish each locus pair. Genotype calls may include any text (numeric or character) except the numbers 1 and 0. Data may also be passed to BIGDAWG as an R object (dataframe) following the same formatting as above for text files. You may also choose to run a synthetic HLA data set (see below) to observe a typical BIGDAWG analysis and experiment with parameter settings. For HLA alleles, you may choose to format your genotype calls with our without the locus prefix. For example, for HLA-A, a given genotype call maybe 01:01:01:01 or A\*01:01:01:01 or HLA-A\*01:01:01:01. Allele names can include any level of resolution, from a single field up to the full length name. For HLA-DRB3,-DRB4,-DRB5 genotype calls, you may choose to represent these as a single pair of columns or as separate pairs of columns for each locus. However, when submitted as a single pair of columns, all genotypes must be formatted as Locus*Allele (including non-DRB loci). The single pair column names may be DRB345, DRB3.4.5 or DRB3/4/5. Homozygous or hemizygous status for DRB3, DRB4 and DRB5 genotypes is based on the DRB1 haplotype as defined by Andersson, 1998 (Andersson G. 1998. Evolution of the HLA-DR region. [Front Biosci. 3:d739-45](https://pubmed.ncbi.nlm.nih.gov/9675159/)). If you wish to define your own zygosity, it is suggested you split them into separate pairs of columns for each locus manually. **Missing Information** When there is missing information, either for lack of genotyping information or absence of genotyped loci, BIGDAWG allows for conventions to differentiate the type of data that is missing. Data missing due to lack of a molecular genotyping result is considered not available (NA). Acceptable NA strings include: NA, ****, -, na and Na. Empty data cells will be considered NA. If your data is formatted as Locus*Allele, please include this formatting for all absent alleles as well (e.g., DRB1*NA). Data missing due to genomic structural variation (i.e., no locus present) is considered absence. Acceptable absence strings include: Absent, absent, Abs, ABS, ab, Ab, AB, @. The last symbol is the unicode at sign. BIGDAWG allows for a special allele name that indicates absence of an HLA locus: 00, 00:00, 00:00:00 and 00:00:00:00 are all acceptable indicators of HLA locus absence. When choosing to use 00's (zeros) to populate allele name fields, use similar or higher levels of resolution [http://hla.alleles.org](http://hla.alleles.org/nomenclature/naming.html) and follow the same naming convention as with other genotype calls (either with or without locus prefix). If using a single column pair for DRB3/4/5 and the "00" absence indicator, then do **NOT** affix a locus prefix for the absent calls. In this case, only include the locus prefix for known DRB345 genotypes (i.e., DRB3/4/5*00:00 is **NOT** an acceptable name). For HLA data, the 00:00 naming convention is preferred and absent designations will be converted to allow the amino acid analysis to test phenotypic associations with locus absence (see below). Finally, when missing alleles (due to lack of a genotype call) for a locus are included in the haplotype analysis, the haplotype estimation method may impute the identity of the missing alleles for that subject. If such imputation is not desired, the "Missing"" parameter should be set to 0. **Genotype List Strings** For for HLA alleles, you may submit your data formatted as a genotype list strings (GL strings) and BIGDAWG can automatically convert the data to a tabular format. The data should be 3-columns with the GL string in the third column (see table below). Data is restricted to **00:00** for absent designations when using GL strings, you should NOT use any other indicator of absence. Utilizing NA is not compatible with GL2Tab conversion. BIGDAWG also has a built-in function for converting between GL strings and tabular formatting using the GLSconvert() function. Please see the GLSconvert vignette for more detail. **Novel Alleles** BIGDAWG will accept novel allele names. However, it is suggested you follow the same naming convention for novel alleles as with other genotypes calls in your data, either with or without the locus prefix. For example, novel alleles could be submitted as follows: Novel, 01:Novel, or A*01:Novel. Unfortunately, the BIGDAWG amino acid analysis cannot accept novel allele designations and will display an error. If you would like to run the amino acid analysis, you should replace the novel allele with NA or omit the subject entirely. **Example of data architecture and acceptable values:** *Tabular* |SubjectID |Disease | A | A | B | B | DRB1 | DRB1 | DRB3 | DRB3 | |----------|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |subject1 |0 |01:01 |02:01 |08:01 |44:02 |01:01 |08:01 |Abs |Abs | |subject2 |1 |02:01 |24:02 |51:01 |51:01 |11:01 |14:01 |02:02 |02:11 | |subject3 |0 |03:01 |26:02 |NA |NA |10:01 |08:01 |00:00 |00:00 | *Genotype List String* |SubjectID |Disease |GLString | |----------|:------:|:-------------------------------------------------------------:| |Subject1 |0 |HLA-A\*01:01+HLA-A\*02:01\^HLA-B\*08:01+HLA-B\*44:02\^HLA-DRB1\*01:01+HLA-DRB1\*03:01^HLA-DRB3\*00:00+HLA-DRB3\*00:00 | |subject2 |1 |HLA-A\*02:01+HLA-A\*24:02\^HLA-B\*51:01+HLA-B\*51:01\^HLA-DRB1\*11:01+HLA-DRB1\*14:01^HLA-DRB3\*02:02+HLA-DRB3\*02:11 | |subject3 |0 |HLA-A\*01:01+HLA-A\*02:01\^HLA-DRB1\*01:01+HLA-DRB1\*03:01^HLA-DRB3\*00:00+HLA-DRB3\*00:00 | ## Data Output After the package is run, BIGDAWG will create a new folder entitled 'output hhmmss ddmmyy' in the working directory (unless otherwise specified by Results.Dir parameter, see below). Within the output folder will be a precheck file ('Data_Summary.txt') detailing the summary statistics of the dataset and the results of the Hardy-Weinberg equilibrium test ('HWE.txt'). If any errors are present, a log file ('Error_Log.txt') will be written. If no locus subsets are specified (see parameters section), a single subfolder entitled ‘Set1’ will contain the outputs of each association analysis run. If multiple locus subsets are defined, subfolders for each locus set will be created containing the respective analytic results for that subset. Within each set subfolder, a parameter file will detail the parameters that are relevant to that subset. When all pairwise combinations are run in the haplotype analysis, each pairwise set will be written to a single file. A separate file called "haplotype_PairwiseSets.txt" will be written to the corresponding set's directory and will breakdown the locus make up for each PairwiseSet. Data output to both the console and text files can be suppressed with Verbose=F and Output=F respectively. This is particularly useful when the user prefers to send the results to an R object (Return=T, see below) rather than to text files for further analysis. Finally, when multiple analyses are run (i.e., HWE, H, L, A) the data for each analysis (Chi Squares, Odds Ratios, Allele Frequencies, Allele Counts, etc.) can be written as set of files labeled 'Merge' for convenient access (Merge.Ouput=T). *Output As List Object* With the parameter "Return=T", the result is returned as a list with indices for each analysis result (HWE, H, L, A). Defined loci sets will exists as list sub-elements under each respective analysis. If no Loci.Sets were specified, only one list element will exist for Set1. For an example. If BIGDAWG outputs to an object called 'BD' in R (see examples below). BD (e.g., 3 loci sets defined) ..\$HWE - Results of the the Hardy-Weinberg equilibrium test ..\$H\$Set1 - Results of the 'Haplotype' analysis for Set1 ..\$H\$Set2 - Results of the 'Haplotype' analysis for Set2 ..\$H\$Set3 - Results of the 'Haplotype' analysis for Set3 ..\$L\... - Results of the 'Locus' analysis ..\$A\... - Results of the 'Amino Acid' analysis or when "All.Pairwise=T" (e.g., 3 loci, 3 pairwise comparisons possible, single locus set) ..\$HWE - Results of the the Hardy-Weinberg equilibrium test ..\$H\$Set1.PairwiseSet1 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 1 ..\$H\$Set1.PairwiseSet2 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 2 ..\$H\$Set1.PairwiseSet3 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 3 ..\$L\$Set1 - Results of the 'Locus' analysis for Set1 ..\$A\$Set1 - Results of the 'Amino Acid' analysis for Set1 names(BD) - Display complete list of available sets to index. For a complete list of available pairwise results, use names(BD\$H). This results list is written to the results directory as an R object called 'Analysis.RData' for later use. ## Error Messages and Codes BIGDAWG has a few built-in checks to ensure data format consistency and compatibility, especially for HLA data. BIGDAWG also does a parameter review before performing chi-squared tests and returns ‘NCalc’ (not calculated) when all genotypes have expected counts < 5 or the degrees of freedom do not allow for a test (e.g., dof < 1). ## Known Issues BIGDAWG's output includes locus frequencies to provide convenient access for future reference. However, the values have been rounded to 5 digits (arbitrarily chosen) to make the output more concise. This rounding may introduce errors in the frequencies wherein they do not sum to 1. If downstream use of the allele frequency is required, we suggest the user calculate frequencies directly from the counts tables for their own application. Please refer to ?round help documentation and the section Warning for more detail on rounding considerations. ## Parameters `BIGDAWG(Data, HLA=T, Run.Tests, Loci.Set, Exon, All.Pairwise=F, Trim=F, Res=2, Missing=0, Strict.Bin=F, Cores.Lim=1L, Results.Dir, Return=F, Output=T, Verbose=T)` **Data** Class: String. Required. No Default. e.g., `Data=HLA_data` -or- `Data="HLA_data"` -or- `Data="foo.txt"` -or- `Data=foo.txt` Specifies genotype data file name. May use file name within working directory or full file name path to specify file location. See Data Input section for details about file formatting. Use 'Data=HLA_data' to analyze the bundled synthetic dataset. **HLA** Class: logical. Optional. Default = T. Indicates whether or not your data is specific for HLA loci. If your data is not HLA, is a mix of HLA and data for other loci, or includes non-standard HLA allele names, you should set `HLA = F`. This will override the Trim and EVS.rm arguments, and will skip various tests and checks. Set `HLA = T` if and only if the dataset HLA alleles name are consistent with the most recent [IPD-IMGT/HLA](https://www.ebi.ac.uk/ipd/imgt/hla/) database release. **Run.Tests** Class: String or Character vector. Optional. Default = Run all tests. e.g., `Run.Tests = c("L","A")` -or- `Run.Tests = "HWE"` Specifies which tests to run in analysis. "HWE" will run the Hardy Weinberg Equilibrium test, "H" will run the haplotype association test, "L" will run the locus association test, and "A" will run the amino acid association test. Combinations of the test are permitted as indicated in the example. The amino acid test generally requires the most processing time. Taking advantage of multi-core machines can minimize this time (see below). Moreover, avoid defining multiple Loci.Sets (see below) with overlapping loci as processing time will increasing redundantly. Currently, the amino acid analysis is limited to the HLA-A, -B, -C, -DRB1, -DRB3, -DRB4, -DRB5, -DQA1, -DQB1, -DPA1 and -DPB1 loci. **Loci.Set** Class: List. Optional. Default = Use all loci. e.g., `Loci.Set=list(c("DRB1","DQB1"),c("A","DRB1","DPB1"), c("DRB1","DRB3"))` -or- `Loci.Set=list("A")` Input list defining which loci to use for analyses. Combinations are permitted. If you included HLA-DRB3,-DRB4,-DRB5 as a collapsed column pair ('DRB3/4/5'), you must specify the single locus in the Loci.set if you wish them to be included in an analysis set (i.e., 'DRB3' **NOT** 'DRB3/4/5'). The pair of alleles for a locus must be in adjacent columns in the analyzed data set. Running multiple sets is generally only relevant for the haplotype analysis without all pairwise combinations. For all other analyses, loci are treated independently. Consider running haplotype analysis independently when optioning multi-locus sets that included overlapping loci to avoid redundancies. Each locus set's output will be contained within a separate set folder numbered numerically corresponding to their order in the Loci.set parameter (see Data Output section). **Exon** Class: Numeric. Optional. e.g., `Exon=3` -or- `Exon=c(3,5,6)` -or- `Exon=c(2:3)` -or- `Exon=1:4` A single numeric or numeric vector that defines exons to target in the amino acid analysis. The amino acid alignment is parsed according to the overlap of the defined exons. When amino acid codons overlap exon boundaries, the exon with the majority overlap (2 out of 3 nucleotides) is assigned that residue. This argument is only relevant to the amino acid analysis. The defined exons are not required to be continuous. Multiple sets are not analyzed separately. The defined exons are applied to all loci in the the analysis. If an exon does not exist for a given locus, BIGDAWG will register an error and the analysis will stop. In such instances, it is recommended you analyze those loci separately. **All.Pairwise** Class: Logical. Optional. Default = F. Should pairwise combinations of loci be run in the haplotype analysis? Only relevant to haplotype analysis. When optioned, only pairwise combinations of loci will be run and not all the loci in a given data set. **EVS.rm** Class: Logical. Optional. Default = F. (`HLA=T` specific). Flags whether or not to strip expression variant suffixes from HLA alleles. Example: A\*01:11N will convert to A\*01:11. Should not be optioned for data that does not conform to HLA naming conventions. **Trim** Class: Logical. Optional. Default = F. (`HLA=T` specific). Flags whether or not to Trim HLA alleles to a specified resolution. Should not be optioned for data that does not conform to HLA naming conventions. **Resolution** Class: Numeric. Optional. Default = 2. (`HLA=T` specific). Sets the desired resolution when trimming HLA alleles. Used only when `Trim = T`. Fields for HLA formatting must follow current colon-delimited nomenclature conventions. Currently, the amino acid analysis will automatically truncate to 2-field resolution. Trimming is automatic and need not be optioned for amino acid analysis to run. This test will not run for data that does not conform to HLA naming conventions. **Missing** Class: String/Numeric. Optional. Default = 0. Sets the allowable per subject threshold for missing alleles. Relevant to running the haplotype analysis. Effects can be disastrous on processing time and memory allocation for large values (>2) of missing. Missing may be set as a number or as "ignore" to skip removal and retain all subjects. If you find BIGDAWG spending a lot time on the "Estimating Haplotypes..." step, reduced you missing to a value less than or equal to 2. **Strict.Bin** Class: Logical. Optional. Default = FALSE. Sets whether strict binning should be used during Chi Square testing. Strict binning (Strict.Bin = T) will bin all rare cells (expected count < 5). Otherwise, BIGDAWG will allow for up to 20% of the cells to have expected counts less than 5. Currently limited to the H, L, and A tests. This may rescue haplotypes/alleles/amino acids from binning and help identify significant loci/alleles. **Cores.Lim** Class: Integer. Optional. Default = 1 Core. Specifies the number of cores accessible by BIGDAWG in amino acid analysis. Not relevant to Windows operating systems which will use only a single core. More than 1 core is best when optioned in command line R and is not recommend when used in combination with a GUI, e.g. RStudio. **Results.Dir** Class: String. Optional. Default = see Data Output section. String name of a folder for BIGDAWG output. Subfolder for each locus set will be generated within any output folder specified. **Return** Class: Logical. Optional. Default = F. Specifies if BIGDAWG should output analysis results to a specified object. **Output** Class: Logical. Optional. Default = T. Turns on or off the writing of results to files. The default will write all results to text files in the output directory. **Merge.Output** Class: Logical. Optional. Default = F. Turns on or off the merging of all analysis results into single files labeled 'Merged_xxxx.txt". This process is rapid for smaller sets of loci (less than 50). However it can become increasingly cpu time intensive when there are many loci analyzed in conjunction with all pairwise combinations in the haplotype analysis. This parameter is subordinate to Output=T. **Verbose** Class: Logical. Optional. Default = T. Sets the levels of detail that should be displayed on the console. The default will display summaries of the analysis from each specified test. When turned off, only the completion status of each test is displayed. ## Examples These are examples only and need not be run as defined below. ``` # Install the BIGDAWG package install.packages("BIGDAWG") # Run the full analysis using the example set bundled with BIGDAWG BIGDAWG(Data="HLA_data") # Run the haplotype analysis with all pairwise combinations on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="H", All.Pairwise=T) # Run the Hardy-Weinberg and Locus analysis with non-HLA data while ignoring any missing data on a file called 'data.txt' BIGDAWG(Data="data.txt", HLA=F, Run.Tests=c("HWE","L"), Missing="ignore") # Run the amino acid analysis on exons 2 and 3, trimming data to 2-Field resolution on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="A", Exon=c(2,3), Trim=T, Res=2) # Run the haplotype analysis with subsets of loci on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="H", Loci.Set=list(c("DRB1","DQB1","DPB1"),c("DRB1","DQB1"))) # Run the full analysis, minimize console output, disable write to file, output to object 'BD' BD <- BIGDAWG(Data="data.txt", Output=F, Return=T, Verbose=F) ``` ## Updating the bundled IMGT/HLA protein alignment For the amino acid analysis, BIGDAWG is bundled with HLA protein alignment data using the above indicated database release. These bundled alignments can be updated to the most recent release [IPD-IMGT/HLA](https://www.ebi.ac.uk/ipd/imgt/hla/). Future database updates do not guarantee compatibility with the updating tool. ``` # Identify the installed and current release of the bundled IMGT/HLA database release # Requires active internet connection. CheckRelease() CheckRelease(Package=F) # restricts to IMGT/HLA database versions only # Update to the most recent IMGT/HLA database release UpdateRelease() # Force update UpdateRelease(Force=T) # Restore to original bundled update. UpdateRelease(Restore=T) ``` ## Updating BIGDAWG to latest developmental versions Developmental versions of BIGDAWG can be downloaded through [GitHub](https://github.com/IgDAWG/BIGDAWG/) or using the following code (requires R package 'devtools'). These versions will include the most up-to-date bug fixes as well as access to new features that are still under development. GitHub versions are constantly under development and if you prefer a more stable fixed release, install BIGDAWG from the CRAN respository. You may check BIGDAWG versions using CheckRelease(). Local builing of vignettes requires pandoc and pandoc-citeproc if you do not use RStudio [Pandoc](https://pandoc.org/installing.html). Before installation from GitHub, it is recommended that all other R packages be up to date. ``` # Identify the installed (local), release (CRAN), and developmental (GitHub) versions of BIGDAWG. # Requires active internet connection. CheckRelease() CheckRelease(Alignment=F) # restricts to BIGDAWG versions only # If 'devtools' package installation is required for installation via GitHub. install.packages("devtools") # Load latest BIGDAWG version from GitHub # May require closing and reopening of R Studio after install library("devtools") install_github("IgDAWG/BIGDAWG", build_vignettes = TRUE) # Requires Pandoc or RStudio install_github("IgDAWG/BIGDAWG") # No Pandoc or RStudio # For a temporary install of the developmental version library("devtools") dev_mode(on=T) install_github("IgDAWG/BIGDAWG") # .... run BIGDAWG analysis dev_mode(on=F) ``` *End of vignette.*
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/inst/doc/BIGDAWG.Rmd
--- title: "GLSconvert" author: "Derek Pappas, Ph.D. ([email protected])" date: "2020-02-10" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{GLSconvert} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ## Overview GLSconvert represents a suite of tools for cross converting HLA or KIR genotyping data from gene list text strings to multi-column tabular format as desribed in Milius RP, Mack SJ, Hollenbach JA, et al. 2013. Genotype List String: a grammar for describing HLA and KIR genotyping results in a text string. [Tissue Antigens. 82:106-112](https://pubmed.ncbi.nlm.nih.gov/23849068/). ###Anatomy of a GL String The figure below depicts a genotype List (GL) String representation of a multilocus unphased genotype. A GL String representing HLA-A genotype (A*02:69 and A*23:30, or A*02:302 and, either A*23:26 or A*23:39) and HLA-B genotype (B*44:02:13 and B*49:08) for a single individual is shown. GL String delimiters are parsed hierarchically starting from the locus delimiter (^), proceeding to the genotype delimiter (|), then the chromosome delimiter (+), and ending with the allele delimiter (/). A GL String should include the genetic system name (HLA or KIR) as part of the locus name. ![](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3715123/bin/tan0082-0106-f1.jpg) ### Input Data Data may be passed to GLSconvert() as a tab delimited text file name (full path recommended) or as a R object (data frame). Whether a text file or an R object, the first row must be a header line and include column names for the GL string/locus genotypes. Any column preceeding the genotyping information is considered to be identifying/miscellanous information. This would generally include at least the sample id. While there is no limit to the number of columns, the direction of the conversion may dictate a specific column order. Empty rows will be excluded from final output. _**GL String to Table**_ Formatting for GL string conversion requires that the last column of the data table must contain the GL string. |SubjectID |Exp ID |GLString | |----------|:------:|:---------------------------------------------:| |Subject1 |Center1 |HLA-A\*01:01+HLA-A\*02:01\^HLA-B\*08:01+HLA-B\*44:02\^HLA-DRB1\*01:01+HLA-DRB1\*03:01 | _**Table to GL String**_ Formatting for table conversion requires at least three columns. One (or more) column(s) of identifying information followed by column pairs for each locus. Genotype locus pairs must be located in adjacent columns. Column names for a given locus may use (not required) '_1', '.1','_2','.2' to distinguish each locus pair. Only columns defining genotypes names for each locus may repeat, all other column names must be unique. You may format your alleles as Locus*Allele or Allele following defined HLA and KIR naming conventions. |SubjectID |Exp ID | A | A | B | B | DRB1 | DRB1 | DRB3 | DRB3 | |----------|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |Subject1 |Center1 |01:01 |02:01 |08:01 |44:02 |01:01 |04:01 | | | **Ambiguity** Only ambiguity at the allele level is compatible with the GL conversion tool (separated by "/"). Ambiguity at the genotype (separated by "|") cannot be used with the GL conversion tool and will terminate the script. This includes tables submitted for conversion to GL strings containing rows of identical non-genotype identifying information (sample ID, experiment ID, etc.), these will be considered to be ambiguous genotypes in the table and the conversion tool will stop. **Homozygosity** Homozygous allele calls can be represented as single alleles in the GL string. For example, HLA-A\*01:01:01:01 + HLA-A\*01:01:01:01 can be written as HLA-A*01:01:01:01. This only applies to Tab2GL conversions. When a locus is represented by a single allele in a GL string, that allele will be reported in both fields for that locus in the converted table **HLA-DRB3, HLA-DRB4, and HLA-DRB5** HLA-DRB3, HLA-DRB4, and HLA-DRB5 are parsed for homozygous or hemizygous status based on the DRB1 haplotype as defined by Andersson, 1998 (Andersson G. 1998. Evolution of the HLA-DR region. [Front Biosci. 3:d739-45](https://pubmed.ncbi.nlm.nih.gov/9675159/)) and can be flagged for inconsistency. Inconsistent haplotypes will be indicated in a separate column called "DR.HapFlag" with the locus or loci that are inconsistent with the respective DRB1 status. You may choose not to have haplotypes flagged using the 'DRB345.Check' parameter (see below). **Missing Information** For HLA-DRB3, HLA-DRB4, and HLA-DRB5 (HLA-DRBx) When there is missing information, either for lack of genotyping calls or absence of genotyped loci, GLSconvert allows for a convention to differentiate data missing due to genomic structural variation (i.e., locus absence). The acceptable indicator of locus absence is the 2-Field designation HLA-DRBx*00:00 (x = 3,4,5). For example, HLA-DRB5\*00:00+HLA-DRB5\*00:00 would indicate absence of a HLA-DRB5 locus and not a failed or missing genotype call. You may choose to have GLSconvert fill in absent calls for these loci. For Tab2GL conversion, a NA can be used to indicate missing due to lack of genotyping call, however a NA is not compatible with GL2Tab conversion and should be avoided. ## Data Output Data can be output to either a text file (tab or comma delimitted) or R object (sent to data frame). See Output parameters below. When running the GL2Tab conversion, all adjacent pairs of loci will include '_1' and '_2' to distinguish each chromosome. Please note, subsequent programs used to analyze the data table such as BIGDAWG or Pypop may not accept files with ambiguous genotyping data. ## Parameters `GLSconvert(Data,Convert,Output="txt",System="HLA",HZY.Red=FALSE,DRB345.Check=FALSE,Strip.Prefix=TRUE,Cores.Lim=1L)` **Data** Class: String/Object. (No Default). e.g., Data="/your/path/to/file/foo.txt" -or- Data="foo.txt" -or- Data=foo (No Default) Specifies data file name or data object. File name is either full file name path to specify file location (recommended) or name of file within a set working directory. See Data Input section for details about file formatting. **This parameter is required for the conversion utility.** **Convert** Class: String. Options: "GL2Tab" -or- "Tab2GL" (No Default). Specifies data file name or data object. May use file name within working directory or full file name path to specify file location (recommended). See Data Input section for details about file formatting. **This parameter is required for the conversion utility.** **File.Output** Class: String. Options: "R" -or- "txt" -or- "csv" -or- "pypop" (Default = "txt"). Specifies the type of output for the converted genotypes. For file writing, if you specified the full path for a file name then the resultant file will be written to the same directory. Otherwise the file will be written to whichever working directory was defined at initiation of conversion. The converted file name will be of the form "Converted_foo.txt" depending on output setting. If the data was an R object, the file name will be "Converted.txt" if output to file is desired. To output as R object will require an assignment to some object (see examples below). **System** Class: String. Options: "HLA" or "KIR" (Default="HLA"). Defines the genetic system of the data being converted. This parameter is required for Tab2GL conversion and is ignored for GL2Tab. The default system is HLA. **HZY.Red** Class: Logical (Default=FALSE). Homozygous reduction: Should non-DRBx homozygotes be represent by a single allele name in GL string? For example: HLA-A*01:01:01:01+HLA-A*01:01:01:01 as HLA-A*01:01:01:01. The default behavior to keep both allele names in the GL string. This parameter is only used when Convert = Tab2GL, and only applies to non-DRBx genotype data **DRB345.Check** Class: Logical (Default=FALSE). Indicates whether DR haplotypes should be parsed for correct zygosity and unusual DR haplotypes flagged. Inconsistent loci will appear flagged in a separate column labeled 'DR.HapFlag' that follows the genotype columns. The default behavior will flag unusual haplotypes. HLA-DRBx alleles without a respective HLA-DRB1 will remain unchanged and the flag will say 'ND' for not determined. **Strip.Prefix** Class: Logical (Default=TRUE). Applies only to Convert="GL2Tab" conversions. Indicates whether alleles should be stripped of System/Locus prefixes in the final data when converting from GL strings to tabular format. For example, should HLA-A*01:01:01:01 be recorded as 01:01:01:01 in the final table. Required when outputting to a PyPop compatible file. The default will strip prefixes. **Abs.Fill** Class: Logical (Default=FALSE). Relevant only to data containing one or more of the loci: HLA-DRB3, HLA-DRB4, or HLA-DRB5. Directs GLSconvert to fill in missing information with the 2-Field designation HLA-DRBx\*00:00. For example, when data contain HLA-DRB5 typing, those subjects with no HLA-DRB5 will be given the designation HLA-DRB5\*00:00 or HLA-DRB5\*00:00+HLA-DRB5\*00:00 depending on the situation. If you have absent locus designations already present in your data, then GLconverion will remove them in the final output. ## Examples These are examples only and need not be run as defined below. ``` # Run the GL2Tab conversion on a data file with default output to text file and no prefix stripping GLSconvert(Data="/your/path/to/file/foo.txt", Convert="GL2Tab", Strip.Prefix=FALSE) # Run the Tab2GL conversion on a R object outputting to a R object with DRB345 Flagging foo.tab <- GLSconvert(Data=foo, Convert="Tab2GL", File.Output="R", DRB345.Check=TRUE) # Run the Tab2GL conversion on a text file outputting to a csv file and with homozygous allele reduction of non-DRB345 alleles GLSconvert(Data=foo, Convert="Tab2GL", File.Output="csv", HZY.Red=TRUE) # Run the GL2Tab conversion on a data file without the full path name setwd("/your/path/to/file") GLSconvert(Data="foo.txt", Convert="GL2Tab") ``` *End of vignette.*
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/inst/doc/GLSconvert.Rmd
--- title: "BIGDAWG" author: "Derek Pappas, Ph.D. ([email protected])" date: "2021-10-23" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{BIGDAWG} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ## Overview 'Bridging ImmunoGenomic Data-Analysis Workflow Gaps' ('BIGDAWG') is an integrated analysis system that automates the manual data-manipulation and trafficking steps (the gaps in an analysis workflow) normally required for analyses of highly polymorphic genetic systems (e.g., the immunological human leukocyte antigen (HLA) and killer-cell Immunoglobulin-like receptor (KIR) genes) and their respective genomic data (immunogenomic) (Pappas DJ, Marin W, Hollenbach JA, Mack SJ. 2016. 'Bridging ImmunoGenomic Data Analysis Workflow Gaps (BIGDAWG): An integrated case-control analysis pipeline.' [Human Immunology. 77:283-287](https://pubmed.ncbi.nlm.nih.gov/26708359/)). Starting with unambiguous genotype data for case-control groups, 'BIGDAWG' performs tests of Hardy-Weinberg equilibrium, and carries out case-control association analyses for haplotypes, individual loci, specific HLA exons, and HLA amino acid positions. ## Input Data Data for BIGDAWG should be in a tab delimited text format (UTF-8 encoding). The first row must be a header line and must include column names for genotype data. The first two columns must contain subject IDs and phenotypes (0 = control, 1 = case), respectively. A phenotype is not limited to disease status and may include other phenotypes such as onset, severity, ancestry, etc. However, phenotype designatons in the dataset are restricted to the use of 0s and 1s. Genotype pairs must be located in adjacent columns. Column names for a given locus may use '_1', '.1','_2','.2' to distinguish each locus pair. Genotype calls may include any text (numeric or character) except the numbers 1 and 0. Data may also be passed to BIGDAWG as an R object (dataframe) following the same formatting as above for text files. You may also choose to run a synthetic HLA data set (see below) to observe a typical BIGDAWG analysis and experiment with parameter settings. For HLA alleles, you may choose to format your genotype calls with our without the locus prefix. For example, for HLA-A, a given genotype call maybe 01:01:01:01 or A\*01:01:01:01 or HLA-A\*01:01:01:01. Allele names can include any level of resolution, from a single field up to the full length name. For HLA-DRB3,-DRB4,-DRB5 genotype calls, you may choose to represent these as a single pair of columns or as separate pairs of columns for each locus. However, when submitted as a single pair of columns, all genotypes must be formatted as Locus*Allele (including non-DRB loci). The single pair column names may be DRB345, DRB3.4.5 or DRB3/4/5. Homozygous or hemizygous status for DRB3, DRB4 and DRB5 genotypes is based on the DRB1 haplotype as defined by Andersson, 1998 (Andersson G. 1998. Evolution of the HLA-DR region. [Front Biosci. 3:d739-45](https://pubmed.ncbi.nlm.nih.gov/9675159/)). If you wish to define your own zygosity, it is suggested you split them into separate pairs of columns for each locus manually. **Missing Information** When there is missing information, either for lack of genotyping information or absence of genotyped loci, BIGDAWG allows for conventions to differentiate the type of data that is missing. Data missing due to lack of a molecular genotyping result is considered not available (NA). Acceptable NA strings include: NA, ****, -, na and Na. Empty data cells will be considered NA. If your data is formatted as Locus*Allele, please include this formatting for all absent alleles as well (e.g., DRB1*NA). Data missing due to genomic structural variation (i.e., no locus present) is considered absence. Acceptable absence strings include: Absent, absent, Abs, ABS, ab, Ab, AB, @. The last symbol is the unicode at sign. BIGDAWG allows for a special allele name that indicates absence of an HLA locus: 00, 00:00, 00:00:00 and 00:00:00:00 are all acceptable indicators of HLA locus absence. When choosing to use 00's (zeros) to populate allele name fields, use similar or higher levels of resolution [http://hla.alleles.org](http://hla.alleles.org/nomenclature/naming.html) and follow the same naming convention as with other genotype calls (either with or without locus prefix). If using a single column pair for DRB3/4/5 and the "00" absence indicator, then do **NOT** affix a locus prefix for the absent calls. In this case, only include the locus prefix for known DRB345 genotypes (i.e., DRB3/4/5*00:00 is **NOT** an acceptable name). For HLA data, the 00:00 naming convention is preferred and absent designations will be converted to allow the amino acid analysis to test phenotypic associations with locus absence (see below). Finally, when missing alleles (due to lack of a genotype call) for a locus are included in the haplotype analysis, the haplotype estimation method may impute the identity of the missing alleles for that subject. If such imputation is not desired, the "Missing"" parameter should be set to 0. **Genotype List Strings** For for HLA alleles, you may submit your data formatted as a genotype list strings (GL strings) and BIGDAWG can automatically convert the data to a tabular format. The data should be 3-columns with the GL string in the third column (see table below). Data is restricted to **00:00** for absent designations when using GL strings, you should NOT use any other indicator of absence. Utilizing NA is not compatible with GL2Tab conversion. BIGDAWG also has a built-in function for converting between GL strings and tabular formatting using the GLSconvert() function. Please see the GLSconvert vignette for more detail. **Novel Alleles** BIGDAWG will accept novel allele names. However, it is suggested you follow the same naming convention for novel alleles as with other genotypes calls in your data, either with or without the locus prefix. For example, novel alleles could be submitted as follows: Novel, 01:Novel, or A*01:Novel. Unfortunately, the BIGDAWG amino acid analysis cannot accept novel allele designations and will display an error. If you would like to run the amino acid analysis, you should replace the novel allele with NA or omit the subject entirely. **Example of data architecture and acceptable values:** *Tabular* |SubjectID |Disease | A | A | B | B | DRB1 | DRB1 | DRB3 | DRB3 | |----------|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |subject1 |0 |01:01 |02:01 |08:01 |44:02 |01:01 |08:01 |Abs |Abs | |subject2 |1 |02:01 |24:02 |51:01 |51:01 |11:01 |14:01 |02:02 |02:11 | |subject3 |0 |03:01 |26:02 |NA |NA |10:01 |08:01 |00:00 |00:00 | *Genotype List String* |SubjectID |Disease |GLString | |----------|:------:|:-------------------------------------------------------------:| |Subject1 |0 |HLA-A\*01:01+HLA-A\*02:01\^HLA-B\*08:01+HLA-B\*44:02\^HLA-DRB1\*01:01+HLA-DRB1\*03:01^HLA-DRB3\*00:00+HLA-DRB3\*00:00 | |subject2 |1 |HLA-A\*02:01+HLA-A\*24:02\^HLA-B\*51:01+HLA-B\*51:01\^HLA-DRB1\*11:01+HLA-DRB1\*14:01^HLA-DRB3\*02:02+HLA-DRB3\*02:11 | |subject3 |0 |HLA-A\*01:01+HLA-A\*02:01\^HLA-DRB1\*01:01+HLA-DRB1\*03:01^HLA-DRB3\*00:00+HLA-DRB3\*00:00 | ## Data Output After the package is run, BIGDAWG will create a new folder entitled 'output hhmmss ddmmyy' in the working directory (unless otherwise specified by Results.Dir parameter, see below). Within the output folder will be a precheck file ('Data_Summary.txt') detailing the summary statistics of the dataset and the results of the Hardy-Weinberg equilibrium test ('HWE.txt'). If any errors are present, a log file ('Error_Log.txt') will be written. If no locus subsets are specified (see parameters section), a single subfolder entitled ‘Set1’ will contain the outputs of each association analysis run. If multiple locus subsets are defined, subfolders for each locus set will be created containing the respective analytic results for that subset. Within each set subfolder, a parameter file will detail the parameters that are relevant to that subset. When all pairwise combinations are run in the haplotype analysis, each pairwise set will be written to a single file. A separate file called "haplotype_PairwiseSets.txt" will be written to the corresponding set's directory and will breakdown the locus make up for each PairwiseSet. Data output to both the console and text files can be suppressed with Verbose=F and Output=F respectively. This is particularly useful when the user prefers to send the results to an R object (Return=T, see below) rather than to text files for further analysis. Finally, when multiple analyses are run (i.e., HWE, H, L, A) the data for each analysis (Chi Squares, Odds Ratios, Allele Frequencies, Allele Counts, etc.) can be written as set of files labeled 'Merge' for convenient access (Merge.Ouput=T). *Output As List Object* With the parameter "Return=T", the result is returned as a list with indices for each analysis result (HWE, H, L, A). Defined loci sets will exists as list sub-elements under each respective analysis. If no Loci.Sets were specified, only one list element will exist for Set1. For an example. If BIGDAWG outputs to an object called 'BD' in R (see examples below). BD (e.g., 3 loci sets defined) ..\$HWE - Results of the the Hardy-Weinberg equilibrium test ..\$H\$Set1 - Results of the 'Haplotype' analysis for Set1 ..\$H\$Set2 - Results of the 'Haplotype' analysis for Set2 ..\$H\$Set3 - Results of the 'Haplotype' analysis for Set3 ..\$L\... - Results of the 'Locus' analysis ..\$A\... - Results of the 'Amino Acid' analysis or when "All.Pairwise=T" (e.g., 3 loci, 3 pairwise comparisons possible, single locus set) ..\$HWE - Results of the the Hardy-Weinberg equilibrium test ..\$H\$Set1.PairwiseSet1 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 1 ..\$H\$Set1.PairwiseSet2 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 2 ..\$H\$Set1.PairwiseSet3 - Results of the 'Haplotype' analysis for Set1 Pairwise Set 3 ..\$L\$Set1 - Results of the 'Locus' analysis for Set1 ..\$A\$Set1 - Results of the 'Amino Acid' analysis for Set1 names(BD) - Display complete list of available sets to index. For a complete list of available pairwise results, use names(BD\$H). This results list is written to the results directory as an R object called 'Analysis.RData' for later use. ## Error Messages and Codes BIGDAWG has a few built-in checks to ensure data format consistency and compatibility, especially for HLA data. BIGDAWG also does a parameter review before performing chi-squared tests and returns ‘NCalc’ (not calculated) when all genotypes have expected counts < 5 or the degrees of freedom do not allow for a test (e.g., dof < 1). ## Known Issues BIGDAWG's output includes locus frequencies to provide convenient access for future reference. However, the values have been rounded to 5 digits (arbitrarily chosen) to make the output more concise. This rounding may introduce errors in the frequencies wherein they do not sum to 1. If downstream use of the allele frequency is required, we suggest the user calculate frequencies directly from the counts tables for their own application. Please refer to ?round help documentation and the section Warning for more detail on rounding considerations. ## Parameters `BIGDAWG(Data, HLA=T, Run.Tests, Loci.Set, Exon, All.Pairwise=F, Trim=F, Res=2, Missing=0, Strict.Bin=F, Cores.Lim=1L, Results.Dir, Return=F, Output=T, Verbose=T)` **Data** Class: String. Required. No Default. e.g., `Data=HLA_data` -or- `Data="HLA_data"` -or- `Data="foo.txt"` -or- `Data=foo.txt` Specifies genotype data file name. May use file name within working directory or full file name path to specify file location. See Data Input section for details about file formatting. Use 'Data=HLA_data' to analyze the bundled synthetic dataset. **HLA** Class: logical. Optional. Default = T. Indicates whether or not your data is specific for HLA loci. If your data is not HLA, is a mix of HLA and data for other loci, or includes non-standard HLA allele names, you should set `HLA = F`. This will override the Trim and EVS.rm arguments, and will skip various tests and checks. Set `HLA = T` if and only if the dataset HLA alleles name are consistent with the most recent [IPD-IMGT/HLA](https://www.ebi.ac.uk/ipd/imgt/hla/) database release. **Run.Tests** Class: String or Character vector. Optional. Default = Run all tests. e.g., `Run.Tests = c("L","A")` -or- `Run.Tests = "HWE"` Specifies which tests to run in analysis. "HWE" will run the Hardy Weinberg Equilibrium test, "H" will run the haplotype association test, "L" will run the locus association test, and "A" will run the amino acid association test. Combinations of the test are permitted as indicated in the example. The amino acid test generally requires the most processing time. Taking advantage of multi-core machines can minimize this time (see below). Moreover, avoid defining multiple Loci.Sets (see below) with overlapping loci as processing time will increasing redundantly. Currently, the amino acid analysis is limited to the HLA-A, -B, -C, -DRB1, -DRB3, -DRB4, -DRB5, -DQA1, -DQB1, -DPA1 and -DPB1 loci. **Loci.Set** Class: List. Optional. Default = Use all loci. e.g., `Loci.Set=list(c("DRB1","DQB1"),c("A","DRB1","DPB1"), c("DRB1","DRB3"))` -or- `Loci.Set=list("A")` Input list defining which loci to use for analyses. Combinations are permitted. If you included HLA-DRB3,-DRB4,-DRB5 as a collapsed column pair ('DRB3/4/5'), you must specify the single locus in the Loci.set if you wish them to be included in an analysis set (i.e., 'DRB3' **NOT** 'DRB3/4/5'). The pair of alleles for a locus must be in adjacent columns in the analyzed data set. Running multiple sets is generally only relevant for the haplotype analysis without all pairwise combinations. For all other analyses, loci are treated independently. Consider running haplotype analysis independently when optioning multi-locus sets that included overlapping loci to avoid redundancies. Each locus set's output will be contained within a separate set folder numbered numerically corresponding to their order in the Loci.set parameter (see Data Output section). **Exon** Class: Numeric. Optional. e.g., `Exon=3` -or- `Exon=c(3,5,6)` -or- `Exon=c(2:3)` -or- `Exon=1:4` A single numeric or numeric vector that defines exons to target in the amino acid analysis. The amino acid alignment is parsed according to the overlap of the defined exons. When amino acid codons overlap exon boundaries, the exon with the majority overlap (2 out of 3 nucleotides) is assigned that residue. This argument is only relevant to the amino acid analysis. The defined exons are not required to be continuous. Multiple sets are not analyzed separately. The defined exons are applied to all loci in the the analysis. If an exon does not exist for a given locus, BIGDAWG will register an error and the analysis will stop. In such instances, it is recommended you analyze those loci separately. **All.Pairwise** Class: Logical. Optional. Default = F. Should pairwise combinations of loci be run in the haplotype analysis? Only relevant to haplotype analysis. When optioned, only pairwise combinations of loci will be run and not all the loci in a given data set. **EVS.rm** Class: Logical. Optional. Default = F. (`HLA=T` specific). Flags whether or not to strip expression variant suffixes from HLA alleles. Example: A\*01:11N will convert to A\*01:11. Should not be optioned for data that does not conform to HLA naming conventions. **Trim** Class: Logical. Optional. Default = F. (`HLA=T` specific). Flags whether or not to Trim HLA alleles to a specified resolution. Should not be optioned for data that does not conform to HLA naming conventions. **Resolution** Class: Numeric. Optional. Default = 2. (`HLA=T` specific). Sets the desired resolution when trimming HLA alleles. Used only when `Trim = T`. Fields for HLA formatting must follow current colon-delimited nomenclature conventions. Currently, the amino acid analysis will automatically truncate to 2-field resolution. Trimming is automatic and need not be optioned for amino acid analysis to run. This test will not run for data that does not conform to HLA naming conventions. **Missing** Class: String/Numeric. Optional. Default = 0. Sets the allowable per subject threshold for missing alleles. Relevant to running the haplotype analysis. Effects can be disastrous on processing time and memory allocation for large values (>2) of missing. Missing may be set as a number or as "ignore" to skip removal and retain all subjects. If you find BIGDAWG spending a lot time on the "Estimating Haplotypes..." step, reduced you missing to a value less than or equal to 2. **Strict.Bin** Class: Logical. Optional. Default = FALSE. Sets whether strict binning should be used during Chi Square testing. Strict binning (Strict.Bin = T) will bin all rare cells (expected count < 5). Otherwise, BIGDAWG will allow for up to 20% of the cells to have expected counts less than 5. Currently limited to the H, L, and A tests. This may rescue haplotypes/alleles/amino acids from binning and help identify significant loci/alleles. **Cores.Lim** Class: Integer. Optional. Default = 1 Core. Specifies the number of cores accessible by BIGDAWG in amino acid analysis. Not relevant to Windows operating systems which will use only a single core. More than 1 core is best when optioned in command line R and is not recommend when used in combination with a GUI, e.g. RStudio. **Results.Dir** Class: String. Optional. Default = see Data Output section. String name of a folder for BIGDAWG output. Subfolder for each locus set will be generated within any output folder specified. **Return** Class: Logical. Optional. Default = F. Specifies if BIGDAWG should output analysis results to a specified object. **Output** Class: Logical. Optional. Default = T. Turns on or off the writing of results to files. The default will write all results to text files in the output directory. **Merge.Output** Class: Logical. Optional. Default = F. Turns on or off the merging of all analysis results into single files labeled 'Merged_xxxx.txt". This process is rapid for smaller sets of loci (less than 50). However it can become increasingly cpu time intensive when there are many loci analyzed in conjunction with all pairwise combinations in the haplotype analysis. This parameter is subordinate to Output=T. **Verbose** Class: Logical. Optional. Default = T. Sets the levels of detail that should be displayed on the console. The default will display summaries of the analysis from each specified test. When turned off, only the completion status of each test is displayed. ## Examples These are examples only and need not be run as defined below. ``` # Install the BIGDAWG package install.packages("BIGDAWG") # Run the full analysis using the example set bundled with BIGDAWG BIGDAWG(Data="HLA_data") # Run the haplotype analysis with all pairwise combinations on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="H", All.Pairwise=T) # Run the Hardy-Weinberg and Locus analysis with non-HLA data while ignoring any missing data on a file called 'data.txt' BIGDAWG(Data="data.txt", HLA=F, Run.Tests=c("HWE","L"), Missing="ignore") # Run the amino acid analysis on exons 2 and 3, trimming data to 2-Field resolution on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="A", Exon=c(2,3), Trim=T, Res=2) # Run the haplotype analysis with subsets of loci on a file called 'data.txt' BIGDAWG(Data="data.txt", Run.Tests="H", Loci.Set=list(c("DRB1","DQB1","DPB1"),c("DRB1","DQB1"))) # Run the full analysis, minimize console output, disable write to file, output to object 'BD' BD <- BIGDAWG(Data="data.txt", Output=F, Return=T, Verbose=F) ``` ## Updating the bundled IMGT/HLA protein alignment For the amino acid analysis, BIGDAWG is bundled with HLA protein alignment data using the above indicated database release. These bundled alignments can be updated to the most recent release [IPD-IMGT/HLA](https://www.ebi.ac.uk/ipd/imgt/hla/). Future database updates do not guarantee compatibility with the updating tool. ``` # Identify the installed and current release of the bundled IMGT/HLA database release # Requires active internet connection. CheckRelease() CheckRelease(Package=F) # restricts to IMGT/HLA database versions only # Update to the most recent IMGT/HLA database release UpdateRelease() # Force update UpdateRelease(Force=T) # Restore to original bundled update. UpdateRelease(Restore=T) ``` ## Updating BIGDAWG to latest developmental versions Developmental versions of BIGDAWG can be downloaded through [GitHub](https://github.com/IgDAWG/BIGDAWG/) or using the following code (requires R package 'devtools'). These versions will include the most up-to-date bug fixes as well as access to new features that are still under development. GitHub versions are constantly under development and if you prefer a more stable fixed release, install BIGDAWG from the CRAN respository. You may check BIGDAWG versions using CheckRelease(). Local builing of vignettes requires pandoc and pandoc-citeproc if you do not use RStudio [Pandoc](https://pandoc.org/installing.html). Before installation from GitHub, it is recommended that all other R packages be up to date. ``` # Identify the installed (local), release (CRAN), and developmental (GitHub) versions of BIGDAWG. # Requires active internet connection. CheckRelease() CheckRelease(Alignment=F) # restricts to BIGDAWG versions only # If 'devtools' package installation is required for installation via GitHub. install.packages("devtools") # Load latest BIGDAWG version from GitHub # May require closing and reopening of R Studio after install library("devtools") install_github("IgDAWG/BIGDAWG", build_vignettes = TRUE) # Requires Pandoc or RStudio install_github("IgDAWG/BIGDAWG") # No Pandoc or RStudio # For a temporary install of the developmental version library("devtools") dev_mode(on=T) install_github("IgDAWG/BIGDAWG") # .... run BIGDAWG analysis dev_mode(on=F) ``` *End of vignette.*
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/vignettes/BIGDAWG.Rmd
--- title: "GLSconvert" author: "Derek Pappas, Ph.D. ([email protected])" date: "2020-02-10" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{GLSconvert} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- ## Overview GLSconvert represents a suite of tools for cross converting HLA or KIR genotyping data from gene list text strings to multi-column tabular format as desribed in Milius RP, Mack SJ, Hollenbach JA, et al. 2013. Genotype List String: a grammar for describing HLA and KIR genotyping results in a text string. [Tissue Antigens. 82:106-112](https://pubmed.ncbi.nlm.nih.gov/23849068/). ###Anatomy of a GL String The figure below depicts a genotype List (GL) String representation of a multilocus unphased genotype. A GL String representing HLA-A genotype (A*02:69 and A*23:30, or A*02:302 and, either A*23:26 or A*23:39) and HLA-B genotype (B*44:02:13 and B*49:08) for a single individual is shown. GL String delimiters are parsed hierarchically starting from the locus delimiter (^), proceeding to the genotype delimiter (|), then the chromosome delimiter (+), and ending with the allele delimiter (/). A GL String should include the genetic system name (HLA or KIR) as part of the locus name. ![](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3715123/bin/tan0082-0106-f1.jpg) ### Input Data Data may be passed to GLSconvert() as a tab delimited text file name (full path recommended) or as a R object (data frame). Whether a text file or an R object, the first row must be a header line and include column names for the GL string/locus genotypes. Any column preceeding the genotyping information is considered to be identifying/miscellanous information. This would generally include at least the sample id. While there is no limit to the number of columns, the direction of the conversion may dictate a specific column order. Empty rows will be excluded from final output. _**GL String to Table**_ Formatting for GL string conversion requires that the last column of the data table must contain the GL string. |SubjectID |Exp ID |GLString | |----------|:------:|:---------------------------------------------:| |Subject1 |Center1 |HLA-A\*01:01+HLA-A\*02:01\^HLA-B\*08:01+HLA-B\*44:02\^HLA-DRB1\*01:01+HLA-DRB1\*03:01 | _**Table to GL String**_ Formatting for table conversion requires at least three columns. One (or more) column(s) of identifying information followed by column pairs for each locus. Genotype locus pairs must be located in adjacent columns. Column names for a given locus may use (not required) '_1', '.1','_2','.2' to distinguish each locus pair. Only columns defining genotypes names for each locus may repeat, all other column names must be unique. You may format your alleles as Locus*Allele or Allele following defined HLA and KIR naming conventions. |SubjectID |Exp ID | A | A | B | B | DRB1 | DRB1 | DRB3 | DRB3 | |----------|:------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |Subject1 |Center1 |01:01 |02:01 |08:01 |44:02 |01:01 |04:01 | | | **Ambiguity** Only ambiguity at the allele level is compatible with the GL conversion tool (separated by "/"). Ambiguity at the genotype (separated by "|") cannot be used with the GL conversion tool and will terminate the script. This includes tables submitted for conversion to GL strings containing rows of identical non-genotype identifying information (sample ID, experiment ID, etc.), these will be considered to be ambiguous genotypes in the table and the conversion tool will stop. **Homozygosity** Homozygous allele calls can be represented as single alleles in the GL string. For example, HLA-A\*01:01:01:01 + HLA-A\*01:01:01:01 can be written as HLA-A*01:01:01:01. This only applies to Tab2GL conversions. When a locus is represented by a single allele in a GL string, that allele will be reported in both fields for that locus in the converted table **HLA-DRB3, HLA-DRB4, and HLA-DRB5** HLA-DRB3, HLA-DRB4, and HLA-DRB5 are parsed for homozygous or hemizygous status based on the DRB1 haplotype as defined by Andersson, 1998 (Andersson G. 1998. Evolution of the HLA-DR region. [Front Biosci. 3:d739-45](https://pubmed.ncbi.nlm.nih.gov/9675159/)) and can be flagged for inconsistency. Inconsistent haplotypes will be indicated in a separate column called "DR.HapFlag" with the locus or loci that are inconsistent with the respective DRB1 status. You may choose not to have haplotypes flagged using the 'DRB345.Check' parameter (see below). **Missing Information** For HLA-DRB3, HLA-DRB4, and HLA-DRB5 (HLA-DRBx) When there is missing information, either for lack of genotyping calls or absence of genotyped loci, GLSconvert allows for a convention to differentiate data missing due to genomic structural variation (i.e., locus absence). The acceptable indicator of locus absence is the 2-Field designation HLA-DRBx*00:00 (x = 3,4,5). For example, HLA-DRB5\*00:00+HLA-DRB5\*00:00 would indicate absence of a HLA-DRB5 locus and not a failed or missing genotype call. You may choose to have GLSconvert fill in absent calls for these loci. For Tab2GL conversion, a NA can be used to indicate missing due to lack of genotyping call, however a NA is not compatible with GL2Tab conversion and should be avoided. ## Data Output Data can be output to either a text file (tab or comma delimitted) or R object (sent to data frame). See Output parameters below. When running the GL2Tab conversion, all adjacent pairs of loci will include '_1' and '_2' to distinguish each chromosome. Please note, subsequent programs used to analyze the data table such as BIGDAWG or Pypop may not accept files with ambiguous genotyping data. ## Parameters `GLSconvert(Data,Convert,Output="txt",System="HLA",HZY.Red=FALSE,DRB345.Check=FALSE,Strip.Prefix=TRUE,Cores.Lim=1L)` **Data** Class: String/Object. (No Default). e.g., Data="/your/path/to/file/foo.txt" -or- Data="foo.txt" -or- Data=foo (No Default) Specifies data file name or data object. File name is either full file name path to specify file location (recommended) or name of file within a set working directory. See Data Input section for details about file formatting. **This parameter is required for the conversion utility.** **Convert** Class: String. Options: "GL2Tab" -or- "Tab2GL" (No Default). Specifies data file name or data object. May use file name within working directory or full file name path to specify file location (recommended). See Data Input section for details about file formatting. **This parameter is required for the conversion utility.** **File.Output** Class: String. Options: "R" -or- "txt" -or- "csv" -or- "pypop" (Default = "txt"). Specifies the type of output for the converted genotypes. For file writing, if you specified the full path for a file name then the resultant file will be written to the same directory. Otherwise the file will be written to whichever working directory was defined at initiation of conversion. The converted file name will be of the form "Converted_foo.txt" depending on output setting. If the data was an R object, the file name will be "Converted.txt" if output to file is desired. To output as R object will require an assignment to some object (see examples below). **System** Class: String. Options: "HLA" or "KIR" (Default="HLA"). Defines the genetic system of the data being converted. This parameter is required for Tab2GL conversion and is ignored for GL2Tab. The default system is HLA. **HZY.Red** Class: Logical (Default=FALSE). Homozygous reduction: Should non-DRBx homozygotes be represent by a single allele name in GL string? For example: HLA-A*01:01:01:01+HLA-A*01:01:01:01 as HLA-A*01:01:01:01. The default behavior to keep both allele names in the GL string. This parameter is only used when Convert = Tab2GL, and only applies to non-DRBx genotype data **DRB345.Check** Class: Logical (Default=FALSE). Indicates whether DR haplotypes should be parsed for correct zygosity and unusual DR haplotypes flagged. Inconsistent loci will appear flagged in a separate column labeled 'DR.HapFlag' that follows the genotype columns. The default behavior will flag unusual haplotypes. HLA-DRBx alleles without a respective HLA-DRB1 will remain unchanged and the flag will say 'ND' for not determined. **Strip.Prefix** Class: Logical (Default=TRUE). Applies only to Convert="GL2Tab" conversions. Indicates whether alleles should be stripped of System/Locus prefixes in the final data when converting from GL strings to tabular format. For example, should HLA-A*01:01:01:01 be recorded as 01:01:01:01 in the final table. Required when outputting to a PyPop compatible file. The default will strip prefixes. **Abs.Fill** Class: Logical (Default=FALSE). Relevant only to data containing one or more of the loci: HLA-DRB3, HLA-DRB4, or HLA-DRB5. Directs GLSconvert to fill in missing information with the 2-Field designation HLA-DRBx\*00:00. For example, when data contain HLA-DRB5 typing, those subjects with no HLA-DRB5 will be given the designation HLA-DRB5\*00:00 or HLA-DRB5\*00:00+HLA-DRB5\*00:00 depending on the situation. If you have absent locus designations already present in your data, then GLconverion will remove them in the final output. ## Examples These are examples only and need not be run as defined below. ``` # Run the GL2Tab conversion on a data file with default output to text file and no prefix stripping GLSconvert(Data="/your/path/to/file/foo.txt", Convert="GL2Tab", Strip.Prefix=FALSE) # Run the Tab2GL conversion on a R object outputting to a R object with DRB345 Flagging foo.tab <- GLSconvert(Data=foo, Convert="Tab2GL", File.Output="R", DRB345.Check=TRUE) # Run the Tab2GL conversion on a text file outputting to a csv file and with homozygous allele reduction of non-DRB345 alleles GLSconvert(Data=foo, Convert="Tab2GL", File.Output="csv", HZY.Red=TRUE) # Run the GL2Tab conversion on a data file without the full path name setwd("/your/path/to/file") GLSconvert(Data="foo.txt", Convert="GL2Tab") ``` *End of vignette.*
/scratch/gouwar.j/cran-all/cranData/BIGDAWG/vignettes/GLSconvert.Rmd
#' Bliss Independence Model #' #' This function returns fractional response levels for when these are based on #' Bliss Independence Model. #' #' @inheritParams generalizedLoewe Blissindependence <- function(doseInput, parmInput, ...) { pars <- parmInput increasing <- pars["m1"] >= pars["b"] && pars["m2"] >= pars["b"] decreasing <- pars["m1"] <= pars["b"] && pars["m2"] <= pars["b"] ## If agonist and antagonist, give an error if (!(increasing || decreasing)) { stop("Bliss independence does not work for diverging marginal curves.") } # Calculate prediction mono and rescale to max upper for percentage maxRange <- max(abs(pars["m1"]-pars["b"]), abs(pars["m2"]-pars["b"])) if (maxRange == 0) # special case of 2 flat profiles return(pars["b"] + rep(0, nrow(doseInput))) pred1 <- L4(doseInput[["d1"]], b = pars["h1"], logEC50 = pars["e1"], L = 0, U = 1) * abs(pars["m1"]-pars["b"]) / maxRange pred2 <- L4(doseInput[["d2"]], b = pars["h2"], logEC50 = pars["e2"], L = 0, U = 1) * abs(pars["m2"]-pars["b"]) / maxRange # Bliss independence combination - prediction value in percentage bliss <- pred1 + pred2 - pred1*pred2 # rescale direction <- 1*increasing - 1*decreasing pars["b"] + direction * maxRange * bliss }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/Blissindependence.R
#' Summary of \code{MarginalFit} object #' #' @param object Output of \code{\link{fitMarginals}} function #' @param ... Further arguments #' @export summary.MarginalFit <- function(object, ...) { ans <- list() ans$coef <- matrix(object$coef[c("h1", "h2", "m1", "m2", "e1", "e2")], ncol = 2, byrow = TRUE) colnames(ans$coef) <- if (!is.null(object$names)) object$names else c("Compound 1", "Compound 2") rownames(ans$coef) <- c("Slope", "Maximal response", "log10(EC50)") # show log10(EC50) in the summary output ans$coef["log10(EC50)", ] <- log10(exp(ans$coef["log10(EC50)", ])) # show slope always positive ans$coef["Slope", ] <- abs(ans$coef["Slope", ]) ans$baseline <- object$coef["b"] ans$vcov <- object$vcov ans$n <- nrow(object$data) ans$formula <- object$model$formula ans$transforms <- object$transforms class(ans) <- "summary.MarginalFit" ans } #' Print method for summary of \code{MarginalFit} object #' #' @param x Summary of \code{MarginalFit} object #' @param ... Further arguments #' @export print.summary.MarginalFit <- function(x, ...) { cat("Formula:", x$formula) cat("\n") cat("Transformations:", ifelse(is.null(x$transforms), "No", "Yes")) cat("\n\n") print(round(x$coef, 3)) cat("\n") cat("Common baseline at:", round(x$baseline, 3)) cat("\n") if (is.character(x$vcov)) warning(x$vcov) else if (any(eigen(x$vcov)$values < 0)) warning("Hessian is not positive definite. Estimates might be unstable.") else if (any(sqrt(na.omit(diag(x$vcov)[c("e1", "e2")])) > 0.3)) warning("Variance of EC50 estimates may be large.") if (x$n < 12) warning("There are less than 12 points in total. Estimates might be imprecise.") } #' Coefficients from marginal model estimation #' #' @inheritParams summary.MarginalFit #' @export coef.MarginalFit <- function(object, ...) { object$coef } #' Predict values on the dose-response curve #' #' @inheritParams summary.MarginalFit #' @param newdata An optional data frame in which to look for \code{d1} and #' \code{d2} variables with which to predict. If omitted, the fitted values #' are used. Doses that are passed to this function must correspond to #' marginal data, i.e. at least one of the doses must be zero. #' @export predict.MarginalFit <- function(object, newdata, ...) { if (missing(newdata)) { fitted(object) } else { dose1 <- newdata[,"d1"] dose2 <- newdata[,"d2"] response <- rep(NA, nrow(newdata)) ## If combination data is passed, throw an error if (any(dose1 > 1e-12 & dose2 > 1e-12)) stop("Predictions are only available for marginal data.") resp <- with(as.list(object$coef), { response[dose2 == 0] <- L4(dose1[dose2 == 0], h1, b, m1, e1) response[dose1 == 0] <- L4(dose2[dose1 == 0], h2, b, m2, e2) response }) if (!is.null(object$transforms$BiolT)) resp <- with(object$transforms, BiolT(resp, compositeArgs)) if (!is.null(object$transforms$PowerT)) resp <- with(object$transforms, PowerT(resp, compositeArgs)) resp } } #' Compute fitted values from monotherapy estimation #' #' @inheritParams summary.MarginalFit #' @export fitted.MarginalFit <- function(object, ...) { predict(object, newdata = object$data) } #' Estimate of coefficient variance-covariance matrix #' #' @inheritParams summary.MarginalFit #' @export vcov.MarginalFit <- function(object, ...) { object$vcov } #' Residuals from marginal model estimation #' #' @inheritParams summary.MarginalFit #' @export residuals.MarginalFit <- function(object, ...) { if (is.null(object$transforms)) PowerT <- function(x) x else PowerT <- function(x) object$transforms$PowerT(x, object$transforms$compositeArgs) PowerT(object$data$effect) - fitted.MarginalFit(object) } #' Residual degrees of freedom in marginal model estimation #' #' @inheritParams summary.MarginalFit #' @export df.residual.MarginalFit <- function(object, ...) { object$df } #' Plot monotherapy curve estimates #' #' @param x Output of \code{\link{fitMarginals}} function or a #' \code{"MarginalFit"} object #' @inheritParams summary.MarginalFit #' @param ncol Number of plots per row #' @param logScale Whether x-axis should be plotted on a logarithmic scale #' @param smooth Whether to draw a smooth fitted curve (deafult), or #' line segments connecting predicted points only #' @param dataScale Whether to draw plot on original data scale in case when #' transformations were used for fitting. Default (FALSE) is to plot on the #' \code{coef(x)} scale #' @return Returns a \code{ggplot} object. It can be consequently modified by #' using standard operations on \code{ggplot} objects (if \code{ggplot2} #' package is loaded). #' @importFrom ggplot2 aes facet_wrap geom_line geom_point ggplot #' scale_x_log10 theme_bw xlab ylab #' @importFrom scales trans_new #' @importFrom stats fitted #' @export plot.MarginalFit <- function(x, ncol = 2, logScale = TRUE, smooth = TRUE, dataScale = FALSE, ...) { data <- as.data.frame(x$data) transformF <- function(z, comp) { eps <- tapply(z, comp, function(x) min(x[x != 0])) z + 0.5 * eps[comp] } labnames <- c("Response", if (!is.null(x$names)) x$names else c("Compound 1", "Compound 2")) if (!is.null(attr(x$data, "orig.colnames"))) { labnames <- unlist(attr(x$data, "orig.colnames")) } ## Reorder the data so that non-zero drug 1 observations are stacked ## above non-zero drug 2 observations dat <- rbind(data[!data$d2, ], data[!data$d1, ]) if (!is.null(x$transforms$InvBiolT) & !dataScale) { dat$effect <- with(x$transforms, InvBiolT(dat$effect, compositeArgs)) } ## Assign the appropriate Compound 1/2 label to the row scale the doses dat$comp <- rep(labnames[2:3], c(sum(!data$d2), sum(!data$d1))) # make sure given order is unchanged dat$comp <- factor(dat$comp, levels = unique(dat$comp)) dat$dose <- with(dat, ifelse(!d2, d1, d2)) if (logScale) dat$dose <- with(dat, transformF(dose, comp)) # predicted smooth curve curveDat <- unique(dat[, c("d1", "d2", "comp", "dose")]) if (smooth) { # make finer grid for smooth prediction lines gridDat <- rbind( expand.grid(d1 = makeGrid(curveDat$d1[curveDat$comp == labnames[2]], log = logScale), d2 = 0, comp = labnames[2]), expand.grid(d1 = 0, d2 = makeGrid(curveDat$d2[curveDat$comp == labnames[3]], log = logScale), comp = labnames[3])) gridDat$dose <- with(gridDat, ifelse(!d2, d1, d2)) if (logScale) gridDat$dose <- with(gridDat, transformF(dose, comp)) curveDat <- gridDat } curveDat$predicted <- predict(x, curveDat) if (!is.null(x$transforms$InvPowerT)) { curveDat$predicted <- with(x$transforms, InvPowerT(curveDat$predicted, compositeArgs)) } if (!is.null(x$transforms$InvBiolT) & !dataScale) { curveDat$predicted <- with(x$transforms, InvBiolT(curveDat$predicted, compositeArgs)) } # draw a dotted line from 0 to the first non-0 dose curveDat$type <- FALSE if (any(curveDat$d1 + curveDat$d2 == 0, na.rm = TRUE)) { minD1 <- min(dat$d1[dat$d1 != 0], na.rm = TRUE) minD2 <- min(dat$d2[dat$d2 != 0], na.rm = TRUE) curveDat$type <- (curveDat$d1<=minD1+.Machine$double.eps & curveDat$d2 == 0) | (curveDat$d2<=minD2+.Machine$double.eps & curveDat$d1 == 0) # for non-smooth curves, we need to duplicate first non-0 dose to avoid line # breakage, as we are actually drawing 2 different lines if (!smooth) { auxDat <- rbind(curveDat[abs(curveDat$d1-minD1) < .Machine$double.eps, ], curveDat[abs(curveDat$d2-minD2) < .Machine$double.eps, ]) auxDat$type <- !auxDat$type curveDat <- rbind(curveDat, auxDat) } } p <- ggplot() + geom_line(data = curveDat, aes(x = .data$dose, y = .data$predicted, linetype = .data$type)) + geom_point(data = dat, aes(x = .data$dose, y = .data$effect)) + facet_wrap(~ comp, ncol = ncol, scales = "free_x") + scale_linetype_manual(values = c("TRUE" = "dotted", "FALSE" = "solid"), guide = "none") + xlab("Dose") + ylab("Effect") + theme_bw() if (logScale) p <- p + scale_x_log10() p } makeGrid <- function(x, n = 100, log = TRUE) { xRange <- range(x, na.rm = TRUE) minNonZero <- min(x[x != 0], na.rm = TRUE) if (log) { c(if (xRange[1] == 0) 0 else NULL, 10^seq(from = log10(minNonZero), to = log10(xRange[2]), length.out = n)) } else c(seq(from = xRange[1], to = xRange[2], length.out = n)) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/MarginalFit-methods.R
#' Method for plotting response surface objects #' #' @param x Output of \code{\link{fitSurface}} #' @param color Character indicating on what values surface coloring will be #' based. #' #' If \code{color = "z-score"}, surface coloring will be based on median of #' standardized off-axis Z-scores. Median function can be replaced by other #' function using an optional \code{colorfun} argument which will be passed to #' \code{plotResponseSurface}. Color breaks are determined here by standard #' deviation of off-axis Z-scores. For \code{color = "maxR"}, coloring will be #' based on values of maxR statistic and the quantile of its distribution #' (bootstrapped or not). If \code{color = "occupancy"}, coloring will be #' based on calculated occupancy rate for the respective dose combination. #' If \code{color = "effect-size"}, coloring will be #' based on effect size for the respective dose combination. #' #' @param greyScale If \code{greyScale = TRUE}, then plot is in grey scale, #' otherwise in colour. #' @param ... Further parameters passed to \code{\link{plotResponseSurface}}. #' \code{colorBy} argument in this method is computed automatically and thus #' cannot be passed to \code{\link{plotResponseSurface}}. #' @export plot.ResponseSurface <- function(x, color = c("z-score", "maxR", "occupancy", "effect-size"), greyScale = FALSE, ...) { color <- match.arg(color) inputs <- as.list(substitute(list(...)))[-1L] reverse <- FALSE # Blue is synergy, red is antagonism if(!exists("colorPalette", inputs)) { if(greyScale){ # inputs$colorPoints = c("black", "#AAAAAA", "#AAAAAA", "white") if(color == "effect-size"){ inputs$colorPalette <- c("#636363", "grey70", "#FEFCFF") } else { inputs$colorPalette <- c("#636363", rep("grey70", 2), "#FEFCFF") } } else { if(color == "effect-size"){ inputs$colorPalette <- c("red", "grey70", "blue") } else { inputs$colorPalette <- c("red", rep("grey70", 2), "blue") } } if (x$fitResult$coef["b"] >= x$fitResult$coef["m1"] && x$fitResult$coef["b"] >= x$fitResult$coef["m2"]) { # b >= m1, m2 decreasing curves so labels are changed; if b < m1, m2 colours preserved reverse <- TRUE inputs$colorPalette <- rev(inputs$colorPalette) } # TODO: what to do in the 'undefined' case - agonist+antagonist or both flat? } #TODO include the name of the `color` to be used in the legend of `plotResponseSurface()` if (color == "z-score") { boundary <- sd(x$offAxisTable[["z.score"]]) inputs$colorBy <- x$offAxisTable[, c("d1", "d2", "z.score")] if (!exists("breaks", inputs)) inputs$breaks <- c(-Inf, -boundary, 0, boundary, Inf) if (!exists("main", inputs)) inputs$main <- "Z-scores" } else if (color == "maxR") { inputs$colorBy <- x$maxR$Ymean[, c("d1", "d2", "R")] q <- attr(x$maxR$Ymean, "q") #TODO add `q` to the inputs so we can use it in plotResponseSurface if (!exists("breaks", inputs)) inputs$breaks <- c(-Inf, -q, 0, q, Inf) if (!exists("main", inputs)) inputs$main <- "maxR" } else if (color == "occupancy") { inputs$colorBy <- x$occupancy inputs$colorPalette <- c("#EFF3FF", "#BDD7E7", "#6BAED6", "#2171B5") if (!exists("breaks", inputs)) inputs$breaks <- c(0, 0.25, 0.5, 0.75, 1) if (!exists("main", inputs)) inputs$main <- "Occupancy rate" } else if (color == "effect-size") { if(is.null(x$confInt)) stop("No confidence intervals were calculated") if (!exists("main", inputs)) inputs$main <- "Effect size" # synOut <- x$maxR$Ymean # names(synOut)[names(synOut) == "call"] <- "synCall" effectOut <- x$confInt$offAxis names(effectOut)[names(effectOut) == "call"] <- "effectCall" effectOut$d1 <- as.numeric(gsub("(.+)_.+", "\\1", rownames(effectOut))) effectOut$d2 <- as.numeric(gsub(".+_(.+)", "\\1", rownames(effectOut))) x_new <- effectOut # x_new <- merge(synOut, effectOut, by = c("d1","d2")) inputs$colorBy <- x_new[, c("d1", "d2", "effectCall")] if (!exists("breaks", inputs)) inputs$breaks <- seq_len(4) } inputs$data <- x$data inputs$fitResult <- x$fitResult inputs$transforms <- x$transforms inputs$null_model <- x$null_model inputs$reverse <- reverse do.call(plotResponseSurface, inputs) } #' Method for plotting of contours based on maxR statistics #' #' @param x Output of \code{\link{fitSurface}} #' @param colorBy String indicating the characteristic to use for coloring ("maxR" or "effect-size"). By default, "maxR". #' @param reverse.x Reverse x axis? #' @param reverse.y Reverse y axis? #' @param swapAxes Swap x and y axes? #' @param greyScale If \code{greyScale = TRUE}, then plot is in grey scale, #' otherwise in colour. #' @param ... Further parameters passed to \code{\link{plot.maxR}} or \code{\link{plot.effect-size}} #' @export contour.ResponseSurface <- function(x, colorBy = "maxR", reverse.x = FALSE, reverse.y = FALSE, swapAxes = FALSE, greyScale = FALSE, ...) { if (!exists("maxR", x)) stop("maxR statistics were not found.") cpdNames <- if (!is.null(x$names)) x$names else c("Compound 1", "Compound 2") args <- list(...) if (!exists("xlab", args)) args$xlab <- paste0("Dose (", cpdNames[[1]], ")") if (!exists("ylab", args)) args$ylab <- paste0("Dose (", cpdNames[[2]], ")") ## Blue is synergy, red is antagonism if (!exists("colorPalette", args)) { if(greyScale){ args$colorPalette <- c( "#636363", "grey70", "white") #args$colorPalette <- c("#636363", "white", "#BDBDBD") } else { args$colorPalette <- c("red", "white", "blue") } names(args$colorPalette) <- c("Ant", "None", "Syn") if (x$fitResult$coef["b"] >= x$fitResult$coef["m1"] && x$fitResult$coef["b"] >= x$fitResult$coef["m2"]) { args$colorPalette <- rev(args$colorPalette) } # TODO: what to do in the 'undefined' case - agonist+antagonist or both flat? } if (colorBy == "maxR") { args$x <- x$maxR } else if (colorBy == "effect-size") { args$x <- x } args$reverse.x <- reverse.x args$reverse.y <- reverse.y args$swapAxes <- swapAxes class(args$x) <- c(colorBy, setdiff(class(args$x), c("maxR", "effect-size"))) do.call(plot, args) } #' Summary of \code{ResponseSurface} object #' #' @param object Output of \code{\link{fitSurface}} #' @param ... Further parameters #' @export summary.ResponseSurface <- function(object, ...) { ans <- list() ans$marginalFit <- summary(object$fitResult) ans$null_model <- object$null_model ans$shared_asymptote <- object$fitResult$shared_asymptote if (!is.null(object$meanR)) ans$meanR <- summary(object$meanR) if (!is.null(object$maxR)) ans$maxR <- summary(object$maxR) ans$occup <- if (!is.null(object$occupancy)) mean(object$occupancy$occupancy) else NULL ans$method <- object$method object$confInt$cutoff = object$cutoff ans$CI = summary(object$confInt) class(ans) <- "summary.ResponseSurface" ans } #' Print method for the summary function of \code{ResponseSurface} object #' #' @param x Summary of \code{ResponseSurface} object #' @param ... Further parameters #' @export print.summary.ResponseSurface <- function(x, ...) { cat("Null model: ") if (x$null_model == "loewe" & x$shared_asymptote == TRUE) cat("Standard Loewe Additivity") else if (x$null_model == "loewe" & x$shared_asymptote == FALSE) cat("Generalized Loewe Additivity") else if (x$null_model == "hsa" & x$shared_asymptote == TRUE) cat("Highest Single Agent with shared maximal response") else if (x$null_model == "hsa" & x$shared_asymptote == FALSE) cat("Highest Single Agent with differing maximal response") else if (x$null_model == "bliss" & x$shared_asymptote == TRUE) cat("Bliss independence with shared maximal response") else if (x$null_model == "bliss" & x$shared_asymptote == FALSE) cat("Bliss independence with differing maximal response") else if (x$null_model == "loewe2" & x$shared_asymptote == TRUE) cat("Standard Loewe Additivity") # FIXME: check else if (x$null_model == "loewe2" & x$shared_asymptote == FALSE) cat("Alternative generalization of Loewe Additivity") else cat(x$null_model) cat("\n") cat("Variance assumption used:", dQuote(x$method)) if (!is.null(x$occup)) { cat("\n") cat("Mean occupancy rate:", x$occup) } cat("\n\n") print(x$marginalFit) cat("\n") if (!is.null(x$meanR)) print(x$meanR) if (!is.null(x$maxR)) print(x$maxR) if (is.null(x$meanR) & is.null(x$maxR)) { cat("\n\n") cat("No test statistics were computed.") } if(!is.null(x$CI)) { cat("\nCONFIDENCE INTERVALS\n") print(x$CI) } cat("\n") } #' Predicted values of the response surface according to the given null model #' #' @param object Output of \code{\link{fitSurface}} #' @param ... Further parameters #' @export fitted.ResponseSurface <- function(object, ...) { doseInput <- object$data[, c("d1", "d2")] parmInput <- coef(object$fitResult) switch(object$null_model, "loewe" = generalizedLoewe(doseInput, parmInput)$response, "hsa" = hsa(doseInput, parmInput), "bliss" = Blissindependence(doseInput, parmInput), "loewe2" = harbronLoewe(doseInput, parmInput)) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/ResponseSurface-methods.R
#' Obtain confidence intervals for the raw effect sizes on every off-axis point and overall #' #' @param Total data frame with all effects and mean effects #' @inheritParams fitSurface #' @inheritParams meanR #' @inheritParams generateData #' @param posEffect a boolean, are effects restricted to be positive #' @param respS the observed response surface #' @return A list with components #' \item{offAxis}{The off-axis bootstrapped confidence intervals} #' \item{single}{A mean effect and percentile and studentized boostrap intervals} bootConfInt = function(Total, idUnique, bootStraps, transforms, respS, B.B, method, CP, reps, n1, cutoff, R, fitResult, bootRS, data_off, posEffect = all(Total$effect >= 0), transFun, invTransFun, model, rescaleResids, wild_bootstrap, wild_bootType, control, digits, ...) { Total <- Total[Total$d1 & Total$d2, ] sampling_errors <- Total$effect - Total$meaneffect # sampling errors for off Axis points A <- getA(data_off, fitResult, method, CP, reps, n1, transFun = transFun, invTransFun = invTransFun) bootEffectSizesList <- lapply(bootStraps, function(bb) { #Do use bootstrapped response surface for complete mimicry of variability dat_off_resam <- within(Total, { effect <- wildbootAddResids(Total$meaneffect, sampling_errors, method, rescaleResids, model, invTransFun, wild_bootstrap, wild_bootType) #Sample with replacement if (posEffect) { effect <- abs(effect) } }) #Transforms have occurred in Total already bootR <- getR(data = dat_off_resam, idUnique = dat_off_resam$d1d2, transforms = NULL, respS = if(bootRS) bb$respS else respS) bootA <- getA(dat_off_resam, bb$simFit, method, CP, reps, n1, transFun = transFun, invTransFun = invTransFun) list("R" = bootR, "A" = bootA) }) bootEffectSizes <- vapply(bootEffectSizesList, FUN.VALUE = c(R), function(x) x$R) bootAs <- vapply(bootEffectSizesList, FUN.VALUE = diag(A), function(x) sqrt(diag(x$A))) # specify here two ways of constructing confidence intervals # 1) simultaneous CI; i.e. controls FWER # 2) False coverage rate CI, i.e. controls false coverage proportion #Off axis confidence interval if (control == "FCR") { # Control False coverage rate tgrid <- seq(0, 10, by = 0.01) bootEffectSizesStand <- abs(bootEffectSizes-c(R))/bootAs # Tb - standardized statistic tcount <- c() for (i in 1:ncol(bootEffectSizesStand)){ count <- c() for (j in 1:length(tgrid)){ tt <- sum(bootEffectSizesStand[,i]>tgrid[j]) count <- c(count,tt) } tcount <- rbind(tcount,count) } prob <- apply(tcount,2,sum)/(length(R)*ncol(bootEffectSizesStand)) id <- min(which(prob <= 1-cutoff)) effectSizeQuant <- tgrid[id] # t-alpha we need for FCR confInt <- c(R) + outer(effectSizeQuant*sqrt(diag(A)), c("lower" = -1, "upper" = 1)) R <- round(R, digits = digits) # round the result t two decimal places confInt <- round(confInt, digits = digits) rownames(confInt) <- rownames(bootEffectSizesStand) } else if (control == "dFCR"){ # controlling directional false coverage rate tgrid <- seq(0, 5, by = 0.01) bootEffectSizesStand <- abs(bootEffectSizes-c(R))/bootAs # Tb - standardized statistic EE.med<-median(abs(R)) # estimated effect median value zz <- (abs(R)<EE.med) EE <- R EE[zz] <- 0 # Estimated effect size set to zero if smaller than median effect size Nb <- matrix(nrow = ncol(bootEffectSizes),ncol= length(tgrid)) for (j in 1:ncol(bootEffectSizes)){ thresh <- lapply(tgrid, function(i){ low <- bootEffectSizes[,j]-i*bootAs[,j] upp <- bootEffectSizes[,j]+ i*bootAs[,j] res <- ((low>0)&(upp>0)&(EE<=0))|((low<0)&(upp<0)&(EE>=0))|((low<0)&(upp>0)&((EE<low)|(EE>upp))) # directional false coverage list("res" = res) }) threshtot <- sapply(thresh, function(y) y[["res"]]) Nb[j,]<- apply(threshtot, 2, mean) } E.Nb <- apply(Nb,2,mean) t.alpha <- tgrid[which.min(abs(E.Nb-(1-cutoff)))] confInt <- c(R) + outer(t.alpha*sqrt(diag(A)), c("lower" = -1, "upper" = 1)) R <- round(R, digits = digits) # round the result to two decimal places confInt <- round(confInt, digits = digits) rownames(confInt) <- rownames(bootEffectSizesStand) } else { # default controls FWER # Off axis confidence interval, control FWER bootEffectSizesStand <- abs(bootEffectSizes-c(R))/bootAs maxEffectSizes <- apply(bootEffectSizesStand, 2, max) effectSizeQuant <- quantile(maxEffectSizes, cutoff, na.rm = TRUE) confInt <- c(R) + outer(effectSizeQuant*sqrt(diag(A)), c("lower" = -1, "upper" = 1)) R <- round(R, digits = digits) # round the result t two decimal places confInt <- round(confInt, digits = digits) rownames(confInt) <- rownames(bootEffectSizesStand) } coefFit <- fitResult$coef eq <- coefFit["m1"] == coefFit["b"] && coefFit["m2"] == coefFit["b"] inc <- coefFit["m1"] >= coefFit["b"] && coefFit["m2"] >= coefFit["b"] dec <- coefFit["m1"] <= coefFit["b"] && coefFit["m2"] <= coefFit["b"] call <- rep("None", length(R)) call[confInt[, "lower"] > 0] <- if (eq) { "Undefined" } else if (inc) { "Syn" } else if (dec) { "Ant" } else "Undefined" call[confInt[, "upper"] < 0] <- if (eq) { "Undefined" } else if (inc) { "Ant" } else if (dec) { "Syn" } else "Undefined" confInt <- data.frame("estimate" = R, confInt, "call" = call) #Single measure of effect size singleMeasure <- mean(R) bootR <- colMeans(bootEffectSizes) bootRstand <- (bootR-singleMeasure)/vapply(bootEffectSizesList, FUN.VALUE = double(1), function(x) mean(x$A)) sdA <- mean(A) studentizedCI <- singleMeasure + sdA*quantile(bootRstand, c((1-cutoff)/2, (1+cutoff)/2), na.rm = TRUE) names(studentizedCI) <- c("lower", "upper") overallCall <- if (eq || any(is.na(studentizedCI))) { "Undefined" } else { if (studentizedCI["lower"] > 0) { if (inc) { "Syn" } else if (dec) { "Ant" } else "Undefined" } else if (studentizedCI["upper"] < 0) { if (inc) { "Ant" } else if (dec) { "Syn" } else "Undefined" } else { "None" } } ans <- list("offAxis" = confInt, "single" = list("meanEffect" = singleMeasure, "confIntMeanEffect" = studentizedCI, "Call" = overallCall), "cutoff" = cutoff) class(ans) <- append("BIGLconfInt", class(ans)) return(ans) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/bootConfInt.R
#' Generate data from parameters of marginal monotherapy model #' #' This function is used to generate data for bootstrapping of the null #' distribution for various estimates. Optional arguments such as specific #' choice of sampling vector or corrections for heteroskedasticity can be #' specified in the function arguments. #' #' @param pars Coefficients of the marginal model along with their appropriate #' naming scheme. These will typically be estimated using #' \code{\link{fitMarginals}}. Futhermore, \code{pars} can simply be a #' \code{MarginalFit} object and \code{transforms} object will be #' automatically extracted. #' @param sigma Standard deviation to use for randomly generated error terms. This #' argument is unused if \code{error = 4} so that sampling error vector is #' provided. #' @param data Data frame with dose columns \code{("d1", "d2")} to generate the #' effect for. Only \code{"d1"} and \code{"d2"} columns of the dose-response #' dataframe should be passed to this argument. \code{"effect"} column should #' not be passed and if it is, the column will be replaced by simulated data. #' @param null_model Specified null model for the expected response surface. #' Currently, allowed options are \code{"loewe"} for generalized Loewe model, #' \code{"hsa"} for Highest Single Agent model, \code{"bliss"} for Bliss additivity, #' and \code{"loewe2"} for the alternative Loewe generalization. #' @param error Type of error for resampling. \code{error = 1} (Default) adds #' normal errors to the simulated effects, \code{error = 2} adds errors sampled #' from a mixture of two normal distributions, \code{error = 3} generates errors #' from a rescaled chi-square distribution. \code{error = 4} will use bootstrap. #' Choosing this option, the error terms will be resampled from the vector #' specified in \code{sampling_errors}. #' @param sampling_errors Sampling vector to resample errors from. Used only if #' \code{error = 4}. #' @param wild_bootstrap Whether special bootstrap to correct for #' heteroskedasticity should be used. If \code{wild_bootstrap = TRUE}, errors #' are generated from \code{sampling_errors} multiplied by a random variable #' following Rademacher distribution. Argument is used only if \code{error = 4}. #' @param model The mean-variance model #' @param means The vector of mean values of the response surface, for variance modelling #' @param invTransFun the inverse transformation function, back to the variance domain #' @param ... Further arguments #' @inheritParams fitSurface #' @inheritParams predictOffAxis #' @importFrom stats lm.fit rnorm rchisq rbinom #' @return Dose-response dataframe with generated data including \code{"effect"} #' as well as \code{"d1"} and \code{"d2"} columns. #' @export #' @examples #' coefs <- c("h1" = 1, "h2" = 1.5, "b" = 0, #' "m1" = 1, "m2" = 2, "e1" = 0.5, "e2" = 0.1) #' #' ## Dose levels are set to be integers from 0 to 10 #' generateData(coefs, sigma = 1) #' #' ## Dose levels are taken from existing dataset with d1 and d2 columns #' data <- subset(directAntivirals, experiment == 1) #' generateData(data = data[, c("d1", "d2")], pars = coefs, sigma = 1) generateData <- function(pars, sigma, data = NULL, transforms = NULL, null_model = c("loewe", "hsa", "bliss", "loewe2"), error = 1, sampling_errors = NULL, means = NULL, model = NULL, method = "equal", wild_bootstrap = FALSE, wild_bootType = "normal", rescaleResids, invTransFun, newtonRaphson = FALSE, bootmethod = method, ...) { if(bootmethod == "model") bootmethod <- "unequal" if(wild_bootstrap){ wild_bootstrap <- FALSE # we don't need wild bootstrap here; for on-axis observations wild_bootType <- NULL } ## Argument matching null_model <- match.arg(null_model) if (inherits(pars, "MarginalFit")) { transforms <- pars$transforms pars <- pars$coef } if (is.null(data)) data <- expand.grid("d1" = rep(0:10, each = 2), "d2" = rep(0:10, each = 2)) if ("effect" %in% colnames(data)) { warning("effect column is unneeded for generateData() function and will be dropped.") data <- data[, c("d1", "d2")] } ## Use identity transformation if no transform functions are supplied if (is.null(transforms)) { idF <- function(z, ...) z transforms <- list("PowerT" = idF, "InvPowerT" = idF, "BiolT" = idF, "compositeArgs" = NULL) } ySim <- switch(null_model, "loewe" = generalizedLoewe(data, pars, asymptotes = 2, newtonRaphson = newtonRaphson)$response, "hsa" = hsa(data[, c("d1", "d2")], pars), "bliss" = Blissindependence(data[, c("d1", "d2")], pars), "loewe2" = harbronLoewe(data[, c("d1", "d2")], pars, newtonRaphson = newtonRaphson)) ySim <- with(transforms, PowerT(BiolT(ySim, compositeArgs), compositeArgs)) charEr = as.character(error) if(charEr %in% c("1", "2", "3")){ errors = switch(charEr, ## Normal "1" = {rnorm(length(ySim), 0, sigma)}, ## Two normals "2" = {ru <- sample(seq_len(2), replace = TRUE, size = length(ySim)) mus <- c(-sigma, sigma) sigmas <- c(sigma/2, sigma/3) rnorm(length(ySim), mus[ru], sigmas[ru])}, ## Distribution with right-tail outliers "3" = {sigma*(rchisq(length(ySim), df = 4)-4)/8 }) } else if(charEr == "4"){ if (wild_bootstrap) { ## Use Rademacher distribution to account for heteroskedasticity errors = sampling_errors*(2*rbinom(length(ySim), size = 1, prob = 0.5)-1) } else { if(bootmethod == "equal"){ errors = sampleResids(means = ySim, sampling_errors = sampling_errors, method = "equal", rescaleResids = FALSE) } else { idd1d2 = with(data, d1&d2) errors = integer(length(ySim)) #On-axis points errors[!idd1d2] = sampleResids(means = ySim[!idd1d2], sampling_errors = sampling_errors[!idd1d2], method = "equal", rescaleResids = FALSE) #Off-axis points errors[idd1d2] = sampleResids(means = ySim[idd1d2], sampling_errors = sampling_errors[idd1d2], method = bootmethod, rescaleResids = rescaleResids, model = model, invTransFun = invTransFun) } } } else { stop("Unavailable error type.") } ySim <- with(transforms, InvPowerT(ySim + errors, compositeArgs)) if(all(data$effect>0)){ ySim = abs(ySim) } return(data.frame("effect" = ySim, data)) } #' Estimate CP matrix from bootstraps #' #' This function is generally called from within \code{\link{fitSurface}}. #' #' @param bootStraps the bootstraps carried out already #' @param sigma0 standard deviation of the null model on the real data #' @param doseGrid a grid of dose combinations #' @inheritParams fitSurface #' @inheritParams generateData #' @importFrom stats lm.fit var #' @return Estimated CP matrix getCP = function(bootStraps, null_model, transforms, sigma0, doseGrid){ pred <- vapply(bootStraps, FUN.VALUE = bootStraps[[1]]$respS, function(b) {b$respS/sigma0}) var(t(pred)) } #' Simulate data from a given null model and monotherapy coefficients #' #' @param ... Further parameters that will be passed to #' \code{\link{generateData}} #' @param doseGrid A grid of dose combinations #' @param startvalues Starting values for the non-linear equation, #' from the observed data #' @inheritParams fitSurface #' @inheritParams generateData #' @return List with \code{data} element containing simulated data and #' \code{fitResult} element containing marginal fit on the simulated data. #' @export #' @examples #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' fitResult <- fitMarginals(data) #' simDat <- simulateNull(data, fitResult, expand.grid(d1 = data$d1, d2 = data$d2), #' null_model = "hsa") simulateNull <- function(data, fitResult, doseGrid, transforms = fitResult$transforms, startvalues, null_model = c("loewe", "hsa", "bliss", "loewe2"), ...) { ## Argument matching null_model <- match.arg(null_model) method <- fitResult$method coefFit0 <- fitResult$coef sigma0 <- fitResult$sigma model <- fitResult$model control <- { if (method %in% c("nls", "nlslm")) list("maxiter" = 200) } ## Parameter estimates may at times return an error due to non-convergence. If ## necessary, repeat the step until it functions properly and 1000 times at ## most. counter <- 0 initPars <- coefFit0 repeat { simData <- generateData(pars = coefFit0, sigma = sigma0, data = data[, c("d1", "d2")], transforms = transforms, null_model = null_model, ...) ## In cases where added errors put the response into negative domain, revert ## it back to the positive one. Usually, values of such observations tend to ## be quite small. simData$effect <- abs(simData$effect) simData$d1d2 = data$d1d2 ## construct a list of arguments, including ... passed to original ## `fitMarginals` call (saved as `extraArgs`) paramsMarginal <- list("data" = simData, "method" = method, "start" = initPars, "model" = model, "transforms" = transforms, "control" = control) if (!is.null(fitResult$extraArgs) && is.list(fitResult$extraArgs)) # use `modifyList` here, since `control` could be user-defined paramsMarginal <- modifyList(paramsMarginal, fitResult$extraArgs) simFit <- try({ do.call(fitMarginals, paramsMarginal) }, silent = TRUE) counter <- counter + 1 initPars <- NULL if (counter > 1000) stop(paste("Data simulation process failed. ", "Check that transformation functions correspond ", "to the marginal model.")) if (!inherits(simFit, "try-error")) break } #Also precalculate response surface, quite computation intensive respS <- predictOffAxis(fitResult = simFit, transforms = transforms, startvalues = startvalues, doseGrid = doseGrid, null_model = null_model, ...) return(list("data" = simData, "simFit" = simFit, "respS" = respS)) } bootFun = function(i, args) { if(args$progressBar) args$pb$tick() do.call(simulateNull, args) }#Wrapper with index
/scratch/gouwar.j/cran-all/cranData/BIGL/R/bootstrap.R
globalVariables("rowname") #' Summary of confidence intervals object #' #' @param object Output from \code{\link{bootConfInt}} #' @param ... Further arguments #' @export summary.BIGLconfInt <- function(object, ...) { ans <- list() ans$estimate = object$single$meanEffect ans$sigLevel = paste0(round(object$cutoff*100), "%") ans$singleCI = object$single$confIntMeanEffect ans$call = object$single$Call ans$confInt = object$offAxis[object$offAxis$call %in% c("Syn", "Ant"),] ans$confInt[, c("estimate", "lower", "upper")] = round(ans$confInt[, c("estimate", "lower", "upper")], 4) ans$totals <- data.frame("Syn" = sum(object$offAxis$call == "Syn"), "Ant" = sum(object$offAxis$call == "Ant"), "Total" = nrow(object$offAxis)) rownames(ans$totals) = "" class(ans) <- append("summary.BIGLconfInt", class(ans)) ans } #' Print summary of BIGLconfInt object #' #' @param x Summary of BIGLconfInt object #' @inheritParams summary.BIGLconfInt #' @export print.summary.BIGLconfInt <- function(x, ...) { #Overall cat("Overall effect\n") cat(sep = "", "Estimated mean departure from null response surface with ", x$sigLevel, " confidence interval:\n", round(x$estimate, 4), " [", round(x$singleCI[1], 4), ", ", round(x$singleCI[2], 4), "]\n") cat("Evidence for effects in data:", x$call, "\n\n") #Pointwise cat("Significant pointwise effects\n") print(x$confInt) cat("\nPointwise", x$sigLevel, "confidence intervals summary:\n") print(x$totals) cat("\n") } #' Plot confidence intervals in a contour plot #' #' @param x off axis confidence intervals, a data frame #' @param color analysis with which to colour cells, either \code{effect-size} or \code{maxR} #' @param showAll show all intervals in the plot or only significant ones, logical defaulting to \code{TRUE} #' @param digits Numeric value indicating the number of digits used for numeric values #' @param xlab String for the x axis label #' @param ylab String for the y axis label #' @param greyScale If \code{greyScale = TRUE}, then plot is in grey scale, #' otherwise in colour. #' @param ... additional arguments, currently ignored #' @importFrom stats setNames #' @export #' @note written after the contour() function in the \code{drugCombo} package plot.BIGLconfInt <- function(x, color = "effect-size", showAll = TRUE, digits = 3, xlab, ylab, greyScale = FALSE, ...) { if (missing(xlab)) xlab <- sprintf("Dose (%s)", x$names[1]) if (missing(ylab)) ylab <- sprintf("Dose (%s)", x$names[2]) if ("maxR" %in% names(x)) { synOut <- x$maxR$Ymean names(synOut)[names(synOut) == "call"] <- "synCall" effectOut <- x$confInt$offAxis names(effectOut)[names(effectOut) == "call"] <- "effectCall" effectOut$d1 <- as.numeric(gsub("(.+)_.+", "\\1", rownames(effectOut))) effectOut$d2 <- as.numeric(gsub(".+_(.+)", "\\1", rownames(effectOut))) x <- merge(synOut, effectOut, by = c("d1","d2")) } else { x <- x$confInt$offAxis names(x)[names(x) == "call"] <- "effectCall" #show doses on equidistant grid d1d2 <- rownames(x) d1d2split <- sapply(d1d2, function(y) strsplit(y, split = "_")[[1]]) x$d1 <- as.numeric(d1d2split[1,]) x$d2 <- as.numeric(d1d2split[2,]) } # prepare fill legend synCalls <- c("None", "Ant", "Syn") if (color == "effect-size") { x$synLabel <- factor(x$effectCall, labels = synCalls, levels = c("None", "Ant", "Syn")) } else { x$synLabel <- factor(x$synCall, labels = synCalls, levels = c("None", "Ant", "Syn")) } if(greyScale){ legendColors <- c("grey70", "#636363", "#FEFCFF") } else { legendColors <- c("white", "pink", "lightblue") } names(legendColors) <- synCalls # subset to only the colors that are present in the data # legendColors <- legendColors[names(legendColors) %in% as.character(unique(x$synLabel))] # text to show fmt <- sprintf("%%.%if\n(%%.%if, %%.%if)", digits, digits, digits) if (isTRUE(showAll)) { x$label <- sprintf(fmt, x$estimate, x$lower, x$upper) } else { x$label <- ifelse(x$synLabel != "None", sprintf(fmt, x$estimate, x$lower, x$upper), "") } x$d1 <- factor(x$d1, levels = sort(unique(x$d1)), labels = sort(unique(x$d1)), ordered = TRUE) x$d2 <- factor(x$d2, levels = sort(unique(x$d2)), labels = sort(unique(x$d2)), ordered = TRUE) p <- ggplot(data = x, aes(x = .data$d1, y = .data$d2)) + geom_tile(aes(fill = .data$synLabel), color = "grey") + geom_text(aes(label = .data$label), show.legend = FALSE, size = 3) + # invisible points, used only for labels geom_point(aes(color = .data$synLabel), alpha = 0) + # round dose labels to digits scale_x_discrete(labels = format(as.numeric(levels(x$d1)), digits = digits)) + scale_y_discrete(labels = format(as.numeric(levels(x$d2)), digits = digits)) + scale_fill_manual(values = legendColors, guide = "none", drop = FALSE) + scale_color_manual( # for a nicer legend values = setNames(1:3, nm = synCalls), limits = force, drop = FALSE, guide = guide_legend(title = "call:", override.aes = list(alpha = 1, shape = 22, size = 8, color = "grey", fill = legendColors)) ) + theme_minimal() + xlab(xlab) + ylab(ylab) + theme( panel.grid.major = element_blank(), legend.position = "bottom", axis.text.x = element_text(angle = 45, hjust = 1) ) p } #' Plot confidence intervals from BIGL object in a contour plot #' #' @param BIGLobj Output from \code{\link{fitSurface}} #' @param ... passed on to \code{\link{plot.BIGLconfInt}} #' @export plotConfInt <- function(BIGLobj, ...) { newBIGLobj <- BIGLobj class(newBIGLobj) <- ("BIGLconfInt") plot(newBIGLobj, ...) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/confInt-functions.R
#' Partial data with combination experiments of direct-acting antivirals #' #' A dataset containing 11 combination experiments of direct-acting antivirals. #' #' @name directAntivirals #' @docType data #' @format A data frame with 3520 rows and 6 variables: #' \itemize{ #' \item experiment: ID of experiment (1-11) #' \item cpd1: name of the first compound (4 different compounds) #' \item cpd2: name of the second compound (11 different compounds) #' \item effect: observed effect (cell count) #' \item d1: dose of the first compound #' \item d2: dose of the second compound #' } NULL #' Full data with combination experiments of direct-acting antivirals #' #' A dataset containing 11 combination experiments of direct-acting antivirals. #' This dataset is larger than \code{directAntivirals} dataset as it includes #' concentrations at levels of \code{1e6} which can render plots visually #' unappealing. #' #' @name directAntivirals_ALL #' @docType data #' @format A data frame with 4224 rows and 6 variables: #' \itemize{ #' \item experiment: ID of experiment (1-11) #' \item cpd1: name of the first compound (4 different compounds) #' \item cpd2: name of the second compound (11 different compounds) #' \item effect: observed effect (cell count) #' \item d1: dose of the first compound #' \item d2: dose of the second compound #' } NULL
/scratch/gouwar.j/cran-all/cranData/BIGL/R/data.R
#' Plot of effect-size object #' #' @param x Object of class \code{effect-size}. #' @param colorPalette Vector of color values #' @param logScale logScale #' @param zTransform zTransform #' @param digitsFunc Function to be applied to numeric values like doses. This expects a single parameter. #' @param digits Numeric value indicating the number of digits used for numeric values. Whether \code{digitsFunc} is provided, this will be ignored. #' @param ... Further arguments that are passed to \code{\link{format}} function #' for formatting of axis labels #' @inheritParams graphics::title #' @inheritParams contour.ResponseSurface #' @importFrom graphics axis filled.contour points title #' @importFrom grDevices extendrange rgb #' @importFrom ggplot2 ggplot scale_colour_stepsn coord_flip #' @importFrom data.table rbindlist #' @importFrom scales reverse_trans #' @export `plot.effect-size` <- function( x, main = "Contour plot for effect size", xlab = "Dose (Compound 1)", ylab = "Dose (Compound 2)", colorPalette, logScale = TRUE, zTransform = function(z) { z }, digits, digitsFunc, reverse.x = FALSE, reverse.y = FALSE, swapAxes = FALSE, ... ) { labels <- names(colorPalette) if (is.null(labels)) stop("Names for the vector `colorPalette` are mandatory") if (missing(digitsFunc)) { if (!missing(digits)) { digitsFunc <- function(x) round(x, digits = digits) } else { digitsFunc <- function(x) { x } } } if ("maxR" %in% names(x)) { synOut <- x$maxR$Ymean names(synOut)[names(synOut) == "call"] <- "synCall" effectOut <- x$confInt$offAxis names(effectOut)[names(effectOut) == "call"] <- "effectCall" effectOut$d1 <- as.numeric(gsub("(.+)_.+", "\\1", rownames(effectOut))) effectOut$d2 <- as.numeric(gsub(".+_(.+)", "\\1", rownames(effectOut))) x <- merge(synOut, effectOut, by = c("d1","d2")) } else { x <- x$offAxis names(x)[names(x) == "call"] <- "effectCall" #show doses on equidistant grid d1d2 <- rownames(x) d1d2split <- sapply(d1d2, function(y) strsplit(y, split = "_")[[1]]) x$d1 <- as.numeric(d1d2split[1,]) x$d2 <- as.numeric(d1d2split[2,]) } uniqueDoses <- with(x, list("d1" = sort(unique(d1)), "d2" = sort(unique(d2)))) doseGrid <- expand.grid(uniqueDoses) log10T <- function(z) log10(z + 0.5 * min(z[z != 0])) transformF <- if (logScale) log10T else function(z) z breaks <- 1:3 colourVec <- colorPalette #colorRampPalette(colorPalette)(length(breaks) - 1) breaksInfo <- data.frame( breaks = breaks, label = labels, colour = colorPalette, effect = labels ) # This will be used to plot a continuous & invisible variable to get a continuous color bar legend breaksInfo$x <- seq(min(x$d1), max(x$d1), length.out = nrow(breaksInfo)) breaksInfo$y <- seq(min(x$d2), max(x$d2), length.out = nrow(breaksInfo)) if (nrow(x) < length(unique(x$d1))*length(unique(x$d2))) { # Then we need to add the missing combinations in order to allow geom_contour_filled to generate a contour x2 <- expand.grid(d1 = x$d1, d2 = x$d2) x2$effectCallNum <- which(labels == "None") x2$effectCall <- "None" x2$estimate <- 1e-8 x <- rbindlist(list(x, x2), fill = TRUE) x <- x[!duplicated(x[, c("d1", "d2")]), ] } adjFactor <- 10 x$effectCallNum <- as.numeric(factor(x$effectCall, levels = labels)) # Fix glitch of blue line around the plot when all values are "None" if (all(x$effectCallNum == which(labels == "None"))) x$effectCallNum[1] <- x$effectCallNum[1] + 0.0001 x$effectCallNum <- x$effectCallNum/adjFactor - 1/(adjFactor*2) x$d1_t <- transformF(x$d1) x$d2_t <- transformF(x$d2) # When only one point is Ant/Syn, geom_contour_filled won't be able to display any coloured polygon, so we are # adding an artificial value next to it x <- rbind(x, x[x$effectCall %in% c("Syn", "Ant"),]) p <- ggplot(data = x, aes(x = .data$d1_t, y = .data$d2_t)) + geom_contour_filled( aes(z = .data$effectCallNum, colour = .data$effectCallNum), breaks = c(0, breaks/adjFactor) ) + geom_point( colour = rgb(0, 0, 0, 0.3), size = abs(x$estimate)/max(abs(x$estimate))*4 ## Size proportional to the effect size (normalized to be from 0 to 1) ) + scale_fill_manual("Call:", values = as.character(colourVec), labels = labels, drop = FALSE) + # values = colorPalette theme( panel.background = element_rect( fill = "white" ), axis.ticks.length = unit(0.1, "cm"), axis.ticks = element_line(colour = "black"), panel.border = element_rect(colour = "black", fill = NA, size = 0.5), legend.position = "bottom" ) + guides(colour = "none") + labs(title = main) + xlab(xlab) + ylab(ylab) if(reverse.x){ p <- p + scale_x_continuous( breaks = unique(x$d1_t), labels = digitsFunc(unique(x$d1)), trans = reverse_trans() ) } else { p <- p + scale_x_continuous( breaks = unique(x$d1_t), labels = digitsFunc(unique(x$d1)) ) } if(reverse.y){ p <- p + scale_y_continuous( breaks = unique(x$d2_t), labels = digitsFunc(unique(x$d2)), trans = reverse_trans() ) } else { p <- p + scale_y_continuous( breaks = unique(x$d2_t), labels = digitsFunc(unique(x$d2)) ) } if(swapAxes){ p <- p + coord_flip() } p }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/effectSize-functions.R
#' Fit two 4-parameter log-logistic functions for a synergy experiment #' #' This function uses dose-response data for two compounds and estimates #' coefficients for monotherapy models of both of these compounds such that they #' share a common baseline. Currently, these coefficients are estimated by #' default using a non-linear least squares approximation. Although entire #' dose-response data can be provided, estimation will subset the part of data #' where at least one of the compounds is dosed at zero, i.e. on-axis data. #' #' Model formula is specified as \code{effect ~ fn(h1, h2, ...)} where \code{fn} #' is a hard-coded function which fits two 4-parameter log-logistic functions #' simultaneously so that the baseline can be shared. If transformation #' functions are provided, \code{fn} is consequently adjusted to account for #' them. #' #' @param data Dose-response dataframe. Marginal data will be extracted from #' it automatically. #' @param start Starting parameter values. If not specified, they will be #' obtained from \code{\link{initialMarginal}}. #' @param constraints List of constraint matrix and vector which will be passed #' to \code{\link{constructFormula}}. If \code{constraints = NULL}, no #' constraints on parameter estimation will be imposed. #' @param fixed This arguments provides a user-friendly alternative to impose a #' fixed value for marginal parameters. It must be a named vector with names #' contained in \code{c("h1", "h2", "b", "m1", "m2", "e1", "e2")}. For #' example, \code{fixed = c("m1" = 1, "h1" = 1)} will automatically generate #' appropriate constraint matrix and vector to set the maximal response and #' the Hill coefficient of the first compound to 1. If both \code{constraints} #' and \code{fixed} arguments are passed, then only \code{fixed} will be used. #' @param method Which estimation method should be used to obtain the estimates. #' If \code{method = "nls"}, simple non-linear least squares #' \code{\link[stats]{nls}} will be used. If \code{method = "nlslm"} #' Levenberg-Marquardt non-linear least squares #' \code{\link[minpack.lm]{nlsLM}} is used instead (default). If \code{method #' = "optim"}, residual sum of squares will be minimized using general purpose #' optimization based on Nelder-Mean algorithm in \code{\link[stats]{optim}}. #' This method can be noticeably slower than the non-linear least squares #' methods. #' @param names Compound names to be used on the plot labels. #' @param ... Further arguments that are passed to the optimizer function, #' such as \code{lower} or \code{upper} (for the "nlslm" method), or #' \code{control}. #' @inheritParams fitSurface #' @importFrom methods hasArg #' @importFrom minpack.lm nlsLM #' @importFrom stats nls #' @importFrom utils modifyList #' @importFrom lifecycle deprecate_warn #' @return This function returns a \code{MarginalFit} object with monotherapy #' coefficient estimates and diverse information regarding monotherapy #' estimation. \code{MarginalFit} object is essentially a list with #' appropriately named elements. #' #' Among these list elements, \code{"coef"} is a named vector with parameter #' estimates. \code{h1} and \code{h2} are Hill's slope coefficients for each #' of the compounds, \code{m1} and \code{m2} are their maximal response levels #' whereas \code{b} is the shared baseline. Lastly, \code{e1} and \code{e2} #' are log-transformed EC50 values. #' #' \code{"sigma"} is standard deviation of residuals for the estimated #' monotherapy model and \code{"df"} is the degrees of freedom for the #' residuals. \code{"vcov"} is the variance-covariance matrix of the estimated #' parameters. #' #' Return object also contains information regarding data, biological and #' power transformations used in this estimation as well as model construct #' and method of estimation. #' @examples #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' transforms <- getTransformations(data) #' fitMarginals(data, transforms) #' @export fitMarginals <- function(data, transforms = NULL, start = NULL, constraints = NULL, fixed = NULL, method = c("nlslm", "nls", "optim"), names = NULL, ...) { method <- match.arg(method) if(!is.null(transforms)){ deprecate_warn(when = "newer versions", what = "BIGL::fitMarginals(transformations = 'will be deprecated')", details = "Please transform response before all analysis.") } ## Verify column names of input dataframe if (!all(c("effect", "d1", "d2") %in% colnames(data))) stop("effect, d1 and d2 arguments must be column names of data") ## Keep only marginal data data <- data[(abs(data$d1) < .Machine$double.eps | abs(data$d2) < .Machine$double.eps), ] fitArgs <- list("data" = data, "transforms" = transforms) ## If no starting parameters are provided, make an initial guess if (is.null(start)) start <- initialMarginal(data, transforms) ## If model argument is provided in the ellipsis, re-use it instead of ## recalculating everything based on constraints. if (!hasArg("model") || is.null(list(...)$model)) { if (!is.null(fixed)) { if (!is.null(constraints)) warning("Both `fixed` and `constraints` parameters were specified. Only `fixed` will be used.") fixedInd <- match(names(fixed), c("h1", "h2", "b", "m1", "m2", "e1", "e2")) if (any(is.na(fixedInd))) { stop(paste0("Please use parameter names: h1, h2, b, m1, m2, e1 or m2, ", "or construct the formula using constructFormula().")) } constraints <- list("matrix" = diag(1, 7)[fixedInd,], "vector" = fixed) } ## vars_x <- colnames(data)[!grepl("effect", colnames(data))] fitArgs$model <- { if (is.null(constraints)) ## No constraints provided constructFormula() else ## With constraints constructFormula(constraints$matrix, constraints$vector) } } else { fitArgs$model <- list(...)$model } ## Pass ... along, except for 'model' :-/ ## FIXME by renaming 'model' argument here and in marginalNLS extraArgs <- list(...) extraArgs <- extraArgs[names(extraArgs) != "model"] fitArgs <- modifyList(fitArgs, extraArgs, keep.null = TRUE) ## Subset only free parameters fitArgs$start <- start[fitArgs$model$free] fitResult <- switch(method, "nlslm" = { fitArgs$nlsfn <- nlsLM; do.call(marginalNLS, fitArgs) }, "nls" = { fitArgs$nlsfn <- nls; do.call(marginalNLS, fitArgs) }, "optim" = { do.call(marginalOptim, fitArgs) }) fitResult$method <- method # use default names if unspecified if (is.null(names) || !is.character(names) || length(names) != 2) { names <- c("Compound 1", "Compound 2") } fitResult$names <- names class(fitResult) <- append("MarginalFit", class(fitResult)) fitResult } #' Construct a model formula from parameter constraint matrix #' #' For parameter names defined in \code{naming} vector, formula is constructed #' so that \code{consMatrix \%*\% naming = consVector} is satisfied. Constraint #' coefficients are normalized and convert into fractions. #' #' @param consMatrix Constraint matrix #' @param consVector Constraint vector #' @param naming Parameter names #' @param extraVars Non-parameter variables used in the formula and function #' evaluation. These will be appended to the formula. #' @param formulaArgs Character vector of length two. First element indicates #' name for the response variable. Second element indicates name of the #' function. #' @importFrom MASS fractions #' @return This function returns a model construct appropriate for #' \code{\link{fitMarginals}} function. It also separates variables into those #' that are free and those which are constrained. #' @export #' @examples #' constM <- rbind(c(0, 0, 1, 0, 0, 0, 0), #' c(0, 0, 0, -1, 1, 0, 0)) #' constV <- c(0.9, 0) #' constructFormula(constM, constV) constructFormula <- function(consMatrix = NULL, consVector = NULL, naming = c("h1", "h2", "b", "m1", "m2", "e1", "e2"), extraVars = c("d1", "d2"), formulaArgs = c("effect", "fn")) { ## If no constraints are provided, return a default formula if (is.null(consMatrix) | is.null(consVector)) { model <- list("formula" = paste0(formulaArgs[1], " ~ ", formulaArgs[2], "(", paste(c(naming, extraVars), collapse = ", "), ")"), "free" = naming, "vars" = extraVars, "order" = naming) return(model) } if (inherits(consMatrix, "numeric")) consMatrix <- matrix(consMatrix, nrow = 1, ncol = length(consMatrix)) if (ncol(consMatrix) < length(naming)) stop("Constraint matrix does not have enough columns.") if (nrow(consMatrix) != length(consVector)) stop("Number of constraints in the matrix and vector do not match.") consIndex <- apply(consMatrix, 1, function(x) max(which(x != 0))) consFactors <- apply(consMatrix, 1, function(x) 1 / x[max(which(x != 0))]) if (anyDuplicated(consIndex)) stop(naming[consIndex[duplicated(consIndex)]], " cannot be constrained twice.") ## Normalize constraint constants normMatrix <- fractions(diag(consFactors, length(consFactors)) %*% consMatrix) normVector <- fractions(consFactors * consVector) namingOrig <- naming for (i in seq_along(consIndex)) { consVar <- consIndex[i] naming[consIndex[i]] <- paste0(naming[consVar], " = ", normVector[i]) ## Check if value is constrained in terms of other variables otherVar <- setdiff(which(normMatrix[i,] != 0), consVar) if (length(otherVar) > 0) { naming[consVar] <- paste0(naming[consVar], " - ", paste(paste(normMatrix[i, otherVar], naming[otherVar], sep = "*"), collapse = " + ")) } } ## Make formula look nicer by doing some substitution naming <- gsub("- -", "+ ", naming) naming <- gsub("= 0 (\\+|\\-)", "=", naming) naming <- gsub(" \\+ 1[:space:]*\\*", " +", naming) naming <- gsub(" - 1[:space:]*\\*", " -", naming) naming <- gsub("= 1\\*", "= ", naming) list("formula" = paste0(formulaArgs[1], " ~ ", formulaArgs[2], "(", paste(c(naming, extraVars), collapse = ", "), ")"), "free" = namingOrig[-consIndex], "nonfree" = namingOrig[consIndex], "order" = namingOrig, "vars" = extraVars, "constraints" = list("matrix" = normMatrix, "vector" = normVector)) } #' Fit two 4-parameter log-logistic functions with non-linear least squares #' #' This function does not automatically extract marginal data and requires #' model input obtained from \code{\link{constructFormula}}. #' #' @param model List with model parameters. Typically, this is an output from #' \code{\link{constructFormula}}. #' @param nlsfn Non-linear least-squares optimizer function #' @importFrom methods formalArgs #' @importFrom stats as.formula coef df.residual vcov #' @inheritParams fitMarginals marginalNLS <- function(data, transforms = NULL, start, model, nlsfn = nls, ...) { dataU <- data if (is.null(transforms)) { PowerT <- function(x) x BiolT <- function(x) x } else { PowerT <- function(x) transforms$PowerT(x, transforms$compositeArgs) BiolT <- function(x) transforms$BiolT(x, transforms$compositeArgs) ## Power-transform response data$effect <- PowerT(data$effect) } fn <- function(h1, h2, b, m1, m2, e1, e2, d1, d2) { PowerT(BiolT(ifelse(d2 == 0, L4(d1, h1, b, m1, e1), L4(d2, h2, b, m2, e2)))) } ## Starting parameters have to be in a list start <- as.list(start) nlsArgs <- list("formula" = as.formula(model$formula), "start" = start, "data" = data) ## Pick out args for NLS function from all ellipsis arguments extraArgs <- as.list(substitute(list(...)))[-1L] nlsArgs <- c(nlsArgs, extraArgs[names(extraArgs) %in% formalArgs(nlsfn)]) fit <- do.call(nlsfn, args = nlsArgs) ## If there were any constraints, coefficients need to be re-arranged if (is.null(model$constraints)) { coefs <- coef(fit) } else { ## Rearrange parameters coefs <- rep(0, length(model$order)) names(coefs) <- model$order coefs[match(names(coef(fit)), names(coefs))] <- coef(fit) coefs[model$nonfree] <- model$constraints$vector - model$constraints$matrix %*% coefs } list("coef" = coefs, "sigma" = summary(fit)$sigma, "df" = df.residual(fit), "data" = dataU, "transforms" = transforms, "vcov" = vcov(fit), "model" = model, "shared_asymptote" = as.logical(coefs["m1"] == coefs["m2"]), "extraArgs" = extraArgs) } #' Fit two 4-parameter log-logistic functions with common baseline #' #' This function is an alternative to non-linear least squares and #' provides optimization framework with \code{\link{optim}} function. #' It is however noticeably slower than NLS methods and can be especially #' time consuming in large datasets, in particular if bootstrap statistics #' are calculated. #' #' #' @param ... Further parameters passed to \code{\link[stats]{optim}} function #' @inheritParams fitMarginals #' @inheritParams marginalNLS #' @return Variance-covariance matrix which is returned by \code{\link{optim}} #' is based on the fact that minimization of sum-of-squared residuals leads #' essentially to a maximum likelihood estimator and so variance-covariance #' matrix can be estimated using inverse Hessian evaluated at the optimal #' parameters. In some cases, so obtained variance-covariance matrix might not #' be positive-definite which probably means that estimates are unstable #' because of either a poor choice of initial values or poor properties of the #' data itself. #' @importFrom numDeriv hessian #' @importFrom stats optim marginalOptim <- function(data, transforms = NULL, start, model, ...) { dataU <- data if (is.null(transforms)) { PowerT <- function(x) x BiolT <- function(x) x } else { PowerT <- function(x) transforms$PowerT(x, transforms$compositeArgs) BiolT <- function(x) transforms$BiolT(x, transforms$compositeArgs) ## Power-transform response data$effect <- PowerT(data$effect) } fn <- function(pars) { pred <- apply(data[, c("d1", "d2")], 1, function(x) { PowerT(BiolT(ifelse(x[2] == 0, L4(x[1], pars["h1"], pars["b"], pars["m1"], pars["e1"]), L4(x[2], pars["h2"], pars["b"], pars["m2"], pars["e2"])))) }) data$effect - pred } ## Starting parameters have to be in a list start <- as.list(start) opt_fn <- function(pars) { ## Rearrange parameters in case of constraints if (!is.null(model$constraints)) { pars_new <- rep(0, length(model$order)) names(pars_new) <- model$order pars_new[match(names(pars), names(pars_new))] <- pars pars_new[model$nonfree] <- model$constraints$vector - model$constraints$matrix %*% pars_new pars <- pars_new } sum(fn(pars)^2) / 2 } fit <- optim(start, opt_fn, ...) ## If there were any constraints, coefficients need to be re-arranged if (!is.null(model$constraints)) { ## Rearrange parameters coefs <- rep(0, length(model$order)) names(coefs) <- model$order coefs[match(model$free, names(coefs))] <- fit$par coefs[model$nonfree] <- model$constraints$vector - model$constraints$matrix %*% coefs } else { coefs <- fit$par } resid <- fn(coefs) df <- length(resid) - length(coefs) sigma.sq <- sum(resid^2) / df vcov <- tryCatch({ sigma.sq * solve(hessian(fn, fit$par)) }, error = function(e) "Failed to compute inverse Hessian.") list("coef" = coefs, "sigma" = sqrt(sigma.sq), "df" = df, "data" = dataU, "transforms" = transforms, "vcov" = vcov, "model" = model, "shared_asymptote" = as.logical(coefs["m1"] == coefs["m2"]), "extraArgs" = as.list(substitute(list(...)))[-1L] ) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/fitMarginals.R
#' Fit response surface model and compute meanR and maxR statistics #' #' This function computes predictions for off-axis dose combinations according #' to the BIGL or HSA null model and, if required, computes appropriate meanR #' and maxR statistics. Function requires as input dose-response dataframe and #' output of \code{\link{fitMarginals}} containing estimates for the monotherapy #' model. If transformation functions were used in monotherapy estimation, these #' should also be provided. #' #' Please see the example vignette \code{vignette("analysis", package = "BIGL")} #' and the report "Lack of fit test for detecting synergy" included in the #' \code{papers} folder for further details on the test statistics used: #' \code{system.file("papers", "newStatistics.pdf", package = "BIGL")} #' #' @param data Dose-response dataframe. #' @param fitResult Monotherapy (on-axis) model fit, e.g. produced by #' \code{\link{fitMarginals}}. It has to be a \code{"MarginalFit"} object or a #' list containing \code{df}, \code{sigma}, \code{coef}, #' \code{shared_asymptote} and \code{method} elements for, respectively, #' marginal model degrees of freedom, residual standard deviation, named #' vector of coefficient estimates, logical value of whether shared asymptote #' is imposed and method for estimating marginal models during bootstrapping #' (see \code{\link{fitMarginals}}). If biological and power transformations #' were used in marginal model estimation, \code{fitResult} should contain #' \code{transforms} elements with these transformations. Alternatively, these #' can also be specified via \code{transforms} argument. #' @param effect Name of the response column in the data ("effect") #' @param d1 Name of the column with doses of the first compound ("d1") #' @param d2 Name of the column with doses of the second compound ("d2") #' @param transforms Transformation functions. If non-null, \code{transforms} is #' a list containing 5 elements, namely biological and power transformations #' along with their inverse functions and \code{compositeArgs} which is a list #' with argument values shared across the 4 functions. See vignette for more #' information. #' @param statistic Which statistics should be computed. This argument can take #' one of the values from \code{c("none", "meanR", "maxR", "both")}. #' @param cutoff Cut-off to use in maxR procedure for declaring non-additivity #' (default is 0.95). #' @param B.CP Number of bootstrap iterations to use for CP matrix estimation #' @param B.B Number of iterations to use in bootstrapping null distribution for #' either meanR or maxR statistics. #' @param nested_bootstrap When statistics are calculated, if #' \code{nested_bootstrap = TRUE}, \code{CP} matrix is recalculated at each #' bootstrap iteration of \code{B.B} using \code{B.CP} iterations. Using such #' nested bootstrap may however significantly increase computational time. If #' \code{nested_bootstrap = FALSE}, \code{CP} bootstrapped data reuses #' \code{CP} matrix calculated from the original data. #' @param error Type of error for resampling in the bootstrapping procedure. #' This argument will be passed to \code{\link{generateData}}. If \code{error #' = 4} (default), the error terms for generating distribution of the null #' will be resampled from the vector specified in \code{sampling_errors}. If #' \code{error = 1}, normal errors are added. If \code{error = 2}, errors are #' sampled from a mixture of two normal distributions. If \code{error = 3}, #' errors are generated from a rescaled chi-square distribution. #' @param sampling_errors Sampling vector to resample errors from. Used only if #' \code{error} is 4 and is passed as argument to \code{\link{generateData}}. #' If \code{sampling_errors = NULL} (default), mean residuals at off-axis #' points between observed and predicted response are taken. #' @param parallel Whether parallel computing should be used for bootstrap. This #' parameter can take either integer value to specify the number of threads to #' be used or logical \code{TRUE/FALSE}. If \code{parallel = TRUE}, then #' \code{max(1, detectCores()-1)} is set to be the number of threads. If #' \code{parallel = FALSE}, then a single thread is used and cluster object #' is not created. #' @param CP Prediction covariance matrix. If not specified, it will be estimated #' by bootstrap using \code{B.CP} iterations. #' @param progressBar A boolean, should progress of bootstraps be shown? #' @param method What assumption should be used for the variance of on- and #' off-axis points. This argument can take one of the values from #' \code{c("equal", "model", "unequal")}. With the value \code{"equal"} as the #' default. \code{"equal"} assumes that both on- and off-axis points have the #' same variance, \code{"unequal"} estimates a different parameter for on- and #' off-axis points and \code{"model"} predicts variance based on the average #' effect of an off-axis point. If no transformations are used the #' \code{"model"} method is recommended. If transformations are used, only the #' \code{"equal"} method can be chosen. #' @param confInt a boolean, should confidence intervals be returned? #' @param digits Numeric value indicating the number of digits used for numeric values in confidence intervals #' @param bootRS a boolean, should bootstrapped response surfaces be used in the #' calculation of the confidence intervals? #' @param trans,invtrans the transformation function for the variance and its #' inverse, possibly as strings #' @param rescaleResids a boolean indicating whether to rescale residuals, #' or else normality of the residuals is assumed. #' @param newtonRaphson A boolean, should Newton-Raphson be used to find Loewe #' response surfaces? May be faster but also less stable to switch on #' @param asymptotes Number of asymptotes. It can be either \code{1} #' as in standard Loewe model or \code{2} as in generalized Loewe model. #' @param bootmethod The resampling method to be used in the bootstraps. Defaults to the same as method #' @param wild_bootType Type of distribution to be used for wild bootstrap. If \code{wild_bootstrap = TRUE}, #' errors are generated from "rademacher", "gamma", "normal" or "two-point" distribution. #' @param control If \code{control = "FCR"} then algorithm controls false coverage rate, if \code{control = "dFCR"} then #' algorithm controls directional false coverage rate, if \code{control = "FWER"} then #' algorithm controls family wise error rate #' @inheritParams generateData #' @importFrom parallel makeCluster clusterSetRNGStream detectCores stopCluster parLapply #' @importFrom progress progress_bar #' @importFrom stats aggregate #' @return This function returns a \code{ResponseSurface} object with estimates #' of the predicted surface. \code{ResponseSurface} object is essentially a #' list with appropriately named elements. #' #' Elements of the list include input data, monotherapy model coefficients and #' transformation functions, null model used to construct the surface as well #' as estimated CP matrix, occupancy level at #' each dose combination according to the generalized Loewe model and #' \code{"offAxisTable"} element which contains observed and predicted effects #' as well as estimated z-scores for each dose combination. #' #' If statistical testing was done, returned object contains \code{"meanR"} #' and \code{"maxR"} elements with output from \code{\link{meanR}} and #' \code{\link{maxR}} respectively. #' @examples #' \dontrun{ #' data <- subset(directAntivirals, experiment == 4) #' ## Data should contain d1, d2 and effect columns #' transforms <- list("PowerT" = function(x, args) with(args, log(x)), #' "InvPowerT" = function(y, args) with(args, exp(y)), #' "BiolT" = function(x, args) with(args, N0 * exp(x * time.hours)), #' "InvBiolT" = function(y, args) with(args, 1/time.hours * log(y/N0)), #' "compositeArgs" = list(N0 = 1, time.hours = 72)) #' fitResult <- fitMarginals(data, transforms) #' surf <- fitSurface(data, fitResult, statistic = "meanR") #' summary(surf) #' } #' @export fitSurface <- function(data, fitResult, transforms = fitResult$transforms, null_model = c("loewe", "hsa", "bliss", "loewe2"), effect = "effect", d1 = "d1", d2 = "d2", statistic = c("none", "meanR", "maxR", "both"), CP = NULL, B.CP = 50, B.B = NULL, nested_bootstrap = FALSE, error = 4, sampling_errors = NULL, wild_bootstrap = FALSE, wild_bootType = "normal", control = "FWER", cutoff = 0.95, parallel = FALSE, progressBar = TRUE, method = c("equal", "model", "unequal"), confInt = TRUE, digits = 9, bootRS = TRUE, trans = "identity", rescaleResids = FALSE, invtrans = switch(trans, "identity" = "identity", "log" = "exp"), newtonRaphson = FALSE, asymptotes = 2, bootmethod = method) { ## Argument matching null_model <- match.arg(null_model) statistic <- match.arg(statistic) method <- match.arg(method) transFun = match.fun(trans); invTransFun = match.fun(invtrans) if (method %in% c("model", "unequal") && (!is.null(transforms) || !is.null(fitResult$transforms))) { stop("No transformations can be used when choosing the method 'model' or 'unequal'") } ## Verify column names of input dataframe if (!all(c("effect", "d1", "d2") %in% colnames(data))) stop("effect, d1 and d2 arguments must be column names of data") id <- match(c("effect", "d1", "d2"), colnames(data)) colnames(data)[id] <- c("effect", "d1", "d2") data$d1d2 = apply(data[, c("d1", "d2")], 1, paste, collapse = "_") sigma0 <- fitResult$sigma df0 <- fitResult$df MSE0 <- sigma0^2 #Off-axis data and predictions data_off = with(data, data[d1 & d2, , drop = FALSE]) uniqueDoses <- with(data, list("d1" = sort(unique(d1)), "d2" = sort(unique(d2)))) doseGrid <- expand.grid(uniqueDoses) offAxisFit = fitOffAxis(fitResult, null_model = null_model, doseGrid = doseGrid, newtonRaphson = newtonRaphson) offAxisPred = predictOffAxis(fitResult, null_model = null_model, doseGrid = doseGrid, transforms = transforms, fit = offAxisFit) #Retrieve all off-axis points idOffDoseGrid = with(doseGrid, d1 & d2) doseGridOff = doseGrid[idOffDoseGrid,] d1d2off = apply(doseGridOff, 1, paste, collapse = "_") rownames(doseGridOff) = d1d2off idUnique = d1d2off[match(data_off$d1d2, d1d2off)] offAxisPredAll <- offAxisPred[idUnique] offaxisZTable <- cbind(data_off[, c("d1", "d2", "effect", "d1d2"), drop = FALSE], "predicted" = offAxisPredAll) if(!is.null(transforms)) offaxisZTable$effect <- with(transforms, PowerT(offaxisZTable$effect, compositeArgs)) offAxisTable <- cbind(offaxisZTable, "z.score" = with(offaxisZTable, (effect - predicted) / sigma0)) if(null_model == "loewe"){ occupancy = offAxisFit$occupancy startvalues = offAxisFit$oc } else if(null_model == "loewe2"){ startvalues = offAxisFit occupancy = NULL } else { occupancy = startvalues = NULL } ### Computation of MeanR/MaxR statistics ## Bootstrap sampling vector if (is.null(sampling_errors)) { ## Ensure errors are generated from transformed data if applicable dataT <- data[, c("d1", "d2", "effect", "d1d2")] if (!is.null(transforms)) { dataT$effect <- with(transforms, PowerT(dataT$effect, compositeArgs)) } mean_effects <- aggregate(data = dataT, effect ~ d1d2, mean) names(mean_effects) = c("d1d2", "meaneffect") Total <- merge(dataT, mean_effects, by = c("d1d2")) sampling_errors <- Total$effect - Total$meaneffect } ## NB: mean responses are taken R = with(offaxisZTable, tapply(effect-predicted, d1d2, mean)) reps <- with(offaxisZTable, tapply(effect-predicted, d1d2, length)) if (all(reps == 1) && method %in% c("model", "unequal")) { stop("Replicates are required when choosing the method 'model' or 'unequal'") } #Check predicted variances if(method == "model"){ off_mean <- with(data_off, tapply(effect, d1d2, mean)) off_var = with(data_off, tapply(effect, d1d2, var)) Coef = lm.fit(transFun(off_var), x = cbind(1, off_mean))$coef #Don't allow negative variances if(Coef[2]<0){ #warning("Variance was found to decrease with mean, check mean-variance trend!") } predVar = invTransFun(Coef[1] + Coef[2]*off_mean) if(any(predVar < 0)){ #warning("Negative variances modelled on real data!\nCheck mean-variance trend with plotMeanVarFit and consider transforming the variance!") } model = c(Coef, "min" = min(off_var[off_var>0]), "max" = max(off_var)) #Store smallest observed variance } else model = NULL B = if(is.null(B.B)) B.CP else max(B.B, B.CP) #Number of bootstraps #If any bootstraps needed, do them first if(is.null(CP) && (is.null(B))){ stop("No covariance matrix supplied, and no bootstraps required.\n Please provide either of both!") } else if(!is.null(B)){ ## Setup parallel computation if ((is.logical(parallel) & parallel) | is.numeric(parallel)) { nCores <- ifelse(is.logical(parallel), max(1, detectCores() - 1), parallel) clusterObj <- makeCluster(nCores, outfile="") clusterSetRNGStream(clusterObj) } else { clusterObj <- NULL } #Progess bar if(progressBar && !is.null(B)){ pb <- progress_bar$new(format = "(bootstraps): [:bar]:percent", total = B, width = 60) pb$tick(0) } else {pb = NULL} #Start bootstrapping paramsBootstrap <- list("data" =data, "fitResult" = fitResult, "transforms" = transforms, "null_model" = null_model, "error" = error, "sampling_errors" = sampling_errors, "wild_bootstrap" = wild_bootstrap, "bootmethod" = bootmethod, "method" = method, "doseGrid" = doseGrid, "startvalues" = startvalues, "pb" = pb, "progressBar" = progressBar, "model" = model, "means" = Total$meaneffect, "rescaleResids" = rescaleResids, "invTransFun" = invTransFun, "newtonRaphson" = newtonRaphson, "asymptotes" = asymptotes) bootStraps = if(is.null(clusterObj)) { lapply(integer(B), bootFun, args = paramsBootstrap) } else { parLapply(clusterObj, integer(B), bootFun, args = paramsBootstrap) } } else {bootStraps = clusterObj = NULL} ## If not provided, compute prediction covariance matrix by bootstrap if (is.null(CP)) CP <- getCP(bootStraps, null_model, transforms, sigma0 = sigma0, doseGrid = doseGrid) CP = CP[names(R), names(R)] #Calculate test statistics paramsStatistics = list("bootStraps" = bootStraps, "CP" = CP, "cutoff" = cutoff, "data_off" = data_off, "fitResult" = fitResult, "null_model" = null_model, "transforms" = transforms, "doseGrid" = doseGrid, "reps" = reps, "R" = R, "idUnique" = idUnique, "B.B" = B.B, "Total" = Total, "n1" = length(R), "method" = method, "respS" = offAxisPredAll, "bootRS" = bootRS, "doseGridOff" = doseGridOff[names(R),], "transFun" = transFun, "invTransFun" = invTransFun, "model" = model, "rescaleResids" = rescaleResids, "wild_bootstrap" = wild_bootstrap, "wild_bootType" = wild_bootType, "control" = control, "digits" = digits) statObj <- NULL if (statistic %in% c("meanR", "both")) statObj <- c(statObj, list("meanR" = do.call(meanR, paramsStatistics))) if (statistic %in% c("maxR", "both")) statObj <- c(statObj, list("maxR" = do.call(maxR, paramsStatistics))) if(confInt && is.null(B.B) && is.null(B.CP)){ warning("Confidence intervals only available with the bootstrap") confInt = FALSE } if(confInt) statObj <- c(statObj, list("confInt" = do.call(bootConfInt, paramsStatistics))) retObj <- c(list("data" = data, "fitResult" = fitResult, "transforms" = transforms, "null_model" = null_model, "method" = method, "offAxisTable" = offAxisTable, "asymptotes" = asymptotes, "occupancy" = occupancy, "CP" = CP, "cutoff" = cutoff), statObj) if (!is.null(clusterObj)) stopCluster(clusterObj) # add compound names from marginal fit retObj$names <- fitResult$names class(retObj) <- append(class(retObj), "ResponseSurface") return(retObj) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/fitSurface.R
#' Compute combined predicted response from drug doses according to standard or #' generalized Loewe model. #' #' @param doseInput Dose-response dataframe containing \code{"d1"} and #' \code{"d2"} columns #' @param parmInput Numeric vector or list with appropriately named #' parameter inputs. Typically, it will be coefficients from a #' \code{MarginalFit} object. #' @inheritParams fitSurface #' @param newtonRaphson a boolean, is Newton raphson used for finding the #' response surface? May be faster but also less stable #' @param ... Further arguments that are currently unused #' @inheritParams simulateNull #' @importFrom nleqslv nleqslv #' @importFrom stats uniroot generalizedLoewe <- function (doseInput, parmInput, asymptotes = 2, startvalues = NULL, newtonRaphson = FALSE, ...) { parmInput[c("h1", "h2")] = abs(parmInput[c("h1", "h2")]) ## Need good accuracy here: solve for -logit(o) solver <- function(dose, par){ fun0 <- function(x){ res <- exp(dose[1] + x/par["h1"]) + exp(dose[2] + x/par["h2"]) - 1 if(is.finite(res)) res else 1 } gr0 = function(x){ exp(dose[1] + x/par["h1"])/par["h1"] + exp(dose[2] + x/par["h2"])/par["h2"] } if(newtonRaphson) nleqslv(fn = fun0, x = dose[3], jac = gr0)$x else uniroot(fun0, c(-5000, 5000), tol = .Machine$double.eps)$root } ## Remove observations where both drugs are dosed at zero allZero <- !rowSums(doseInput != 0) dose <- doseInput[!allZero,, drop = FALSE] logDoseMinE = log(dose) - rep(parmInput[c("e1", "e2")], each = nrow(dose)) ## In case of a single asymptote, use an artificial one for the second drug ## equal to the first one. if (asymptotes == 1) { parm <- c(parmInput[1:4], parmInput[4], parmInput[5:6]) names(parm)[4:5] <- c("m1", "m2") } else { parm <- parmInput } increasing <- parm["m1"] >= parm["b"] && parm["m2"] >= parm["b"] decreasing <- parm["m1"] <= parm["b"] && parm["m2"] <= parm["b"] ## If agonist and antagonist, give a warning if (!(increasing || decreasing)) { warning("Marginal curves are diverging. The synergy/antagonism calls may be reversed") } logDoseMinE = cbind(logDoseMinE, "start" = if(is.null(startvalues)) integer(nrow(logDoseMinE)) else startvalues) ## For each combination of Compound 1 and Compound 2, find transformed ## occupancy rate, i.e. -logit(o*), which satisfies Loewe model equation. oc <- apply(logDoseMinE, 1, solver, parmInput) if (all(is.na(oc))) stop("genLoewe: no roots found between starting parameters") if (any(is.na(oc))) warning("genLoewe: some roots not found") LogOccupancy <- function(d, e, o, h) log(d) - e + abs(1/h) * o xf <- with(as.list(parm), { logO1 <- LogOccupancy(dose[["d1"]], e1, oc, h1) logO2 <- LogOccupancy(dose[["d2"]], e2, oc, h2) rvInt <- (m1 - b) * exp(logO1) + (m2 - b) * exp(logO2) rv <- b + rvInt/(exp(oc) + 1) rv }) ## Set baseline response for observations where both doses are zero. ## Otherwise, use the estimate above. if (any(allZero)) { rv <- rep(NA, length(allZero)) rv[!allZero] <- xf rv[allZero] <- parm["b"] xf <- rv } return(list("response" = xf, "oc" = oc, "occupancy" = cbind(dose, "occupancy" = 1/(exp(oc)+1)))) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/generalizedLoewe.R
#' A function to get the d1d2 identifier #' @param dat the data frame containing d1 and d2 entries #' @return a vector of d1d2 identifiers getd1d2 = function(dat){ apply(dat[, c("d1", "d2")], 1, paste, collapse = "_") }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/getd1d2.R
# Inverse 4-parameter log-logistic (response-dose) function invL4 <- function(y, h, b, m) { ((y-b)/(m-y))^(1/h) } invL4deriv <- function(y, h, b, m) { ((y-b)/(m-y))^(-1/h-1)*(1/h*(1/(m-y)+(y-b)/(m-y)^2)) } # calculation of d/D in the additivity/synergy equation doseRatio <- function(response, d, h, b, m, expe, lower, upper) { # In case of different asymptotes, response can be outside of [lower, upper] # range, this needs to be handled separately (otherwise formula gives NaN) if(!d){ return(0) } else if(response <= lower){ return(if(b < m) Inf else 0) } else if (response >= upper){ return(if(b < m) 0 else Inf) } else { return(d/(invL4(response, h, b, m)*expe)) } } doseRatioGr = function(response, d, h, b, m, expe, lower, upper){ if(!d || response < lower || response > upper){ return(0) } else { return(d/expe* (-invL4deriv(response, h, b, m))) } } #' Alternative Loewe generalization #' #' @inheritParams generalizedLoewe harbronLoewe <- function (doseInput, parmInput, asymptotes = 2, startvalues = NULL, newtonRaphson = FALSE, ...) { parmInput[c("h1", "h2")] = abs(parmInput[c("h1", "h2")]) ## In case of a single asymptote, use an artificial one for the second drug ## equal to the first one. if (asymptotes == 1) { parm <- c(parmInput[1:4], parmInput[4], parmInput[5:6]) names(parm)[4:5] <- c("m1", "m2") } else { parm <- parmInput } increasing <- parm["m1"] >= parm["b"] && parm["m2"] >= parm["b"] decreasing <- parm["m1"] <= parm["b"] && parm["m2"] <= parm["b"] ## If agonist and antagonist, give an error if (!(increasing || decreasing)) { stop("Alternative Loewe generalization does not work for diverging marginal curves.") } expE1 = exp(parm["e1"]);expE2 = exp(parm["e2"]) lower1 <- min(parm[c("b", "m1")]);upper1 <- max(parm[c("b", "m1")]) lower2 <- min(parm[c("b", "m2")]);upper2 <- max(parm[c("b", "m2")]) lower = min(lower1, lower2); upper = max(upper1, upper2) solver <- function(dose, par) { fun0 <- function(y) { res <- doseRatio(y, dose[1], par["h1"], par["b"], par["m1"], expE1, lower1, upper1) + doseRatio(y, dose[2], par["h2"], par["b"], par["m2"], expE2, lower2, upper2) - 1 if(is.finite(res)) res else 1 } gr0 = function(y){ doseRatioGr(y, dose[1], par["h1"], par["b"], par["m1"], expE1, lower1, upper1) + doseRatioGr(y, dose[2], par["h2"], par["b"], par["m2"], expE2, lower2, upper2) } if(newtonRaphson){ out = nleqslv(fn = fun0, x = dose[3], jac = gr0, control = list(ftol = .Machine$double.eps))$x out = if(out <= lower) {lower} else if(out >= upper) {upper} else {out} return(out) } else { parRange <- range(par[c("b", "m1", "m2")]) if (length(unique(parRange)) == 1) # special case of 2 flat profiles return(par["b"]) uniroot(fun0, parRange, tol = .Machine$double.eps)$root } } ## Remove observations where both drugs are dosed at zero allZero <- !rowSums(doseInput != 0) dose <- doseInput[!allZero, , drop = FALSE] parRange <- range(parm[c("b", "m1", "m2")]) dose = cbind(dose, if(is.null(startvalues)) { mean(parm[c("b", "m1", "m2")]) } else { startvalues[!allZero]}) res = if (length(unique(parRange)) == 1){ rep(parm["b"], nrow(dose)) } else {# special case of 2 flat profiles apply(dose, 1, solver, parm) } if (all(is.na(res))) stop("Alternative Loewe generalization: no roots found between starting parameters") if (anyNA(res)) warning("Alternative Loewe generalization: some roots not found") ## Set baseline response for observations where both doses are zero. ## Otherwise, use the estimate above. if (any(allZero)) { rv <- rep(NA, length(allZero)) rv[!allZero] <- res rv[allZero] <- parm["b"] res <- rv } return(res) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/harbronLoewe.R
#' 4-parameter logistic dose-response function #' #' @param dose Dose level #' @param b Hill's coefficient (slope of the curve) #' @param L Baseline effect (at zero dose) #' @param U Asymptote effect (at infinite dose) #' @param logEC50 Point of inflection (in logarithmic terms) L4 <- function(dose, b, L, U, logEC50) { denum <- 1 + (exp(logEC50) / dose)^(abs(b)) return(L + (U - L) / denum) } #' R color to RGB (red/green/blue) conversion. #' @param cname vector of any of the three kinds of R color specifications, i.e., either a color name (as listed by \code{\link{colors}}()), a hexadecimal string of the form "#rrggbb" or "#rrggbbaa" (see \code{\link{rgb}}), or a positive integer i meaning \code{\link{palette}}()[i]. #' @param alpha logical value indicating whether the alpha channel (opacity) values should be returned. #' @importFrom grDevices rgb col2rgb col2hex <- function (cname, alpha = FALSE) { colMat <- col2rgb(cname) rgb(red = colMat[1, ]/255, green = colMat[2, ]/255, blue = colMat[3, ]/255) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/helper.R
#' Highest Single Agent model #' #' This function returns response levels for when these are based on #' Highest Single Agent (HSA) model. #' #' @inheritParams generalizedLoewe hsa <- function(doseInput, parmInput, ...) { pars <- parmInput increasing <- pars["m1"] >= pars["b"] & pars["m2"] >= pars["b"] decreasing <- pars["m1"] <= pars["b"] & pars["m2"] <= pars["b"] ## If agonist and antagonist, try to determine the leading compound and emit a ## warning. if (!(increasing | decreasing)) { warning("Marginal curves are diverging. HSA might be flawed.") lead <- which.max(c(abs(pars["m1"] - pars["b"]), abs(pars["m2"] - pars["b"]))) if (lead == 1) applyFunction <- if (pars["m1"] > pars["b"]) max else min else if (lead == 2) applyFunction <- if (pars["m2"] > pars["b"]) max else min } else { applyFunction <- if (increasing) max else min } pred1 <- L4(doseInput[["d1"]], pars["h1"], pars["b"], pars["m1"], pars["e1"]) pred2 <- L4(doseInput[["d2"]], pars["h2"], pars["b"], pars["m2"], pars["e2"]) apply(cbind(pred1, pred2), 1, applyFunction) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/hsa.R
#' Estimate initial values for fitting marginal dose-response curves #' #' This is a wrapper function which, when a dose-response dataframe is provided, #' returns start value estimates for both compounds that could be supplied to #' \code{\link{fitMarginals}} function. This function is also used by #' \code{\link{fitMarginals}} if no initials values were supplied. #' #' Note that this function returns \code{e1} and \code{2} which are #' log-transformed inflection points for respective compounds. #' #' @param ... Further parameters that are currently not used #' @return Named vector with parameter estimates. Parameter names are consistent #' with parameter names in \code{\link{fitMarginals}}. \code{h1} and \code{h2} #' are Hill's slope coefficients for each of the compounds, \code{m1} and #' \code{m2} are their maximal response levels whereas \code{b} is the shared #' baseline. Lastly, \code{e1} and \code{e2} are log-transformed EC50 values. #' @note Returns starting value for e = log(EC50). #' @inheritParams fitMarginals #' @inheritParams fitSurface #' @export #' @examples #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' transforms <- getTransformations(data) #' initialMarginal(data, transforms) initialMarginal <- function(data, transforms = NULL, ...) { ## Extract marginal data for each of the compounds df1 <- with(data[data$d2 == 0,], data.frame("dose" = d1, "effect" = effect)) df2 <- with(data[data$d1 == 0,], data.frame("dose" = d2, "effect" = effect)) sg1 <- GetStartGuess(df1, transforms) sg2 <- GetStartGuess(df2, transforms) mean.sg.cc <- mean(c(sg1["cc"], sg2["cc"])) mean.sg.dd <- mean(c(sg1["dd"], sg2["dd"])) pg <- c(h1 = sg1[["bb"]], h2 = sg2[["bb"]], b = mean.sg.cc, m1 = sg1[["dd"]], m2 = sg2[["dd"]], e1 = sg1[["ee"]], e2 = sg2[["ee"]]) pg[is.na(pg)] <- 1e-05 pg } #' Estimate initial values for dose-response curve fit #' #' @param df Dose-response dataframe containing \code{"dose"} and #' \code{"effect"} columns #' @inheritParams fitSurface #' @importFrom MASS rlm #' @importFrom stats lm GetStartGuess <- function(df, transforms = NULL) { x <- df$dose resp <- df$effect ind <- !is.na(resp) resp <- resp[ind] x <- x[ind] ox <- order(x) resp <- resp[ox] x <- x[ox] ## Work out whether response is increasing or decreasing with increasing ## dose of the drug if (!is.null(transforms$PowerT)) resp <- with(transforms, PowerT(resp, compositeArgs)) mod <- lm((resp - resp[1]) ~ I(seq_along(x) - 1) - 1) ## Based on positive/negative relationship, assign asymptotes accordingly if (coef(mod) > 0){ ## Maximum response at x = Inf c0 <- min(resp) d0 <- max(resp) } else { ## Maximum response at x = 0 c0 <- max(resp) d0 <- min(resp) } ## Given previous min/max values, response variable is squeezed into 0/1 so ## as to treat 0/1 as asymptotes. It is also slightly shrunk to allow ## inclusion of 1 and 0 and then logit-transformed. ## r <- (resp - min(x) * 0.99)/(max(resp * 1.01) - min(resp) * 0.99) r <- (resp - c0)/(d0 - c0) r <- r*0.999 + 0.0005 r <- log(abs(r/(1 - r))) ## bb = Hill coefficient / slope ## cc = baseline response ## dd = asymptote ## ee = inflection point / EC50 d <- log(x) ## ee represents the midpoint of the logistic curve that is fit to the ## monotherapy curve. bb is the slope coefficient of the curve theFit <- suppressWarnings( rlm(r ~ d, subset = !is.na(r) & is.finite(r) & !is.na(d) & is.finite(d))$coef) bb <- theFit[[2]] ee <- -theFit[[1]]/bb ## If the midpoint is beyond range, set the slope coefficient to 1. if (exp(ee) > max(x)) { theFit <- suppressWarnings( rlm(r ~ offset(d), subset = !is.na(r) & is.finite(r) & !is.na(d) & is.finite(d))$coef) bb <- 1 ee <- -theFit[[1]] } ## Use previous min/max for baseline and asymptote values cc <- c0 # median(resp[x == min(x)]) dd <- d0 # median(resp[x == max(x)]) ## If transformation functions were provided, baseline and asymptote ## parameters are accordingly transformed. if (!is.null(transforms$PowerT)) { cc <- with(transforms, InvPowerT(cc, compositeArgs)) dd <- with(transforms, InvPowerT(dd, compositeArgs)) } if (!is.null(transforms)) { eps <- min(abs(resp)[abs(resp) > 0])/2 ## Transform baseline response cc0 <- with(transforms, InvBiolT(cc, compositeArgs)) if (!is.finite(cc0)) { cc <- with(transforms, InvBiolT(cc + eps, compositeArgs)) } else cc <- cc0 ## Transform asymptotic response dd0 <- with(transforms, InvBiolT(dd, compositeArgs)) if (!is.finite(dd0)) { dd <- with(transforms, InvBiolT(dd + eps, compositeArgs)) } else dd <- dd0 } return(c("bb" = abs(bb), "cc" = cc, "dd" = dd, "ee" = ee)) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/initialMarginal.R
#' Isobologram of the response surface predicted by the null model #' #' If transformation functions are used, then the isobologram response levels #' will be plotted on the transformed scale. #' #' @param x Output of \code{\link{fitSurface}} #' @param grid.len Number of concentrations to plot for each compound in the #' contour plot. An evenly spaced grid of doses will be generated for each #' compound given its respective observed minimum and maximum doses. Note that #' \code{grid.len^2} computations will be needed later so this number should #' stay reasonably low. #' @param logScale If \code{logScale = TRUE}, then grid of doses is evenly #' spaced in the logarithmic scale. #' @param greyScale If \code{greyScale = TRUE}, then plot is in grey scale, #' otherwise in colour. #' @param ... Further parameters that are not used at this moment. #' @import ggplot2 #' @export isobologram <- function(x, grid.len = 100, logScale = TRUE, greyScale = FALSE, ...) { ## Generate evenly spaced grid either on a linear or a log-scale genSeq <- function(doses) { if (logScale) { ## Log-scale removed zero dose doses <- setdiff(doses, 0) seq.range <- log(range(doses)) c(0, exp(seq(seq.range[1], seq.range[2], length.out = grid.len - 1))) } else { ## Linear scale seq.range <- range(doses) seq(seq.range[1], seq.range[2], length.out = grid.len) } } coefs <- coef(x$fitResult) ## Generate a grid of doses for Compound 1 and predict the response doses1 <- genSeq(unique(x$data$d1)) resp1 <- L4(doses1, coefs["h1"], coefs["b"], coefs["m1"], coefs["e1"]) ## Generate a grid of doses for Compound 2 and predict the response doses2 <- genSeq(unique(x$data$d2)) resp2 <- L4(doses2, coefs["h2"], coefs["b"], coefs["m2"], coefs["e2"]) ## Combine both compounds and their marginal predictions data <- rbind(data.frame("d1" = doses1, "d2" = 0, "effect" = resp1), data.frame("d1" = 0, "d2" = doses2, "effect" = resp2)) ## Based on marginal data, generate null model predictions uniqueDoses <- with(data, list("d1" = sort(unique(d1)), "d2" = sort(unique(d2)))) doseGrid <- expand.grid(uniqueDoses) predSurface <- predictOffAxis(doseGrid, x$fitResult, null_model = x$null_model, asympotes = x$asymptotes) melt.surface <- data.frame(doseGrid[with(doseGrid, d1&d2),], "effect" = as.numeric(predSurface)) labnames <- c("Response", if (!is.null(x$names)) x$names else c("Compound 1", "Compound 2")) if (!is.null(attr(x$data, "orig.colnames"))) { labnames <- unlist(attr(x$data, "orig.colnames")) } if(greyScale){ colourPalette <- c("#F0F0F0", "#D9D9D9", "#BDBDBD", "#969696", "#737373", "#525252", "#252525", "#000000") #"#FFFFFF" white is too white } else { colourPalette <- c("steelblue", "lightsteelblue", "lightblue", "floralwhite", "beige", "khaki", "orange1", "tomato3", "red") } p <- ggplot(melt.surface, aes(x = .data$d1, y = .data$d2, z = .data$effect, fill = .data$effect)) + theme_bw() + geom_tile() + labs(x = labnames[2], y = labnames[3]) + scale_fill_gradientn(labnames[1], colours = colourPalette) + geom_contour(bins = 7, col = "black", linewidth = 0.2) xBreaks <- unique(x$data$d1) yBreaks <- unique(x$data$d2) if (logScale) { p <- p + scale_x_log10(breaks = xBreaks) + scale_y_log10(breaks = yBreaks) } else { p <- p + scale_x_continuous(breaks = xBreaks) + scale_y_continuous(breaks = yBreaks) } p }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/isobologram.R
#' Summary of maxR object #' #' @param object Object of \code{"maxR"} class #' @param ... Further arguments #' @export summary.maxR <- function(object, ...) { ans <- list() ans$call <- object$Call ans$points <- outsidePoints(object$Ymean) ans$totals <- data.frame("Call" = object$Call, "Syn" = sum(object$Ymean$call == "Syn"), "Ant" = sum(object$Ymean$call == "Ant"), "Total" = nrow(object$Ymean)) class(ans) <- append("summary.maxR", class(ans)) ans } #' Print summary of maxR object #' #' @param x Summary of \code{"maxR"} object #' @inheritParams summary.maxR #' @export print.summary.maxR <- function(x, ...) { if (x$call != "None") { x$points$`p-value` <- ifelse(x$points$`p-value` < 2e-16, "<2e-16", round(x$points$`p-value`, 5)) cat("\nEvidence for effects in data: ", x$call, "\n", sep="") cat("Points with significant deviations from the null: \n") print(x$points) rownames(x$totals) <- "" cat("\nOverall maxR summary:\n") print(x$totals) } else { cat("MaxR test did not detect any deviations from the null.\n") } } #' Plot of maxR object #' #' @param x Output of \code{\link{maxR}}. This can also be \code{"maxR"} #' element in the output of \code{\link{fitSurface}}. #' @param plevels Probability levels used to generate a color scale #' @param cutoff Probability cutoff to use for range of colors #' @param maxshow Forced value for range of colors #' @param ... Further arguments that are passed to \code{\link{format}} function #' for formatting of axis labels #' @inheritParams plotResponseSurface #' @inheritParams contour.ResponseSurface #' @inheritParams graphics::title #' @importFrom graphics axis filled.contour points title #' @importFrom grDevices extendrange rgb #' @export plot.maxR <- function(x, main = "Contour plot for maxR", xlab = "Dose (Compound 1)", ylab = "Dose (Compound 2)", colorPalette = c("blue", "white", "red"), logScale = TRUE, zTransform = function(z) { z }, plevels = c(0.7, 0.8, 0.9, 0.95, 0.99, 0.999), cutoff = max(plevels), maxshow = NULL, reverse.x = FALSE, reverse.y = FALSE, swapAxes = FALSE, ...) { uniqueDoses <- with(x$Ymean, list("d1" = sort(unique(d1)), "d2" = sort(unique(d2)))) doseGrid <- expand.grid(uniqueDoses) log10T <- function(z) log10(z + 0.5 * min(z[z != 0])) transformF <- if (logScale) log10T else function(z) z maxRvalues <- x$Ymean$R maxRvalues <- maxRvalues[match(with(doseGrid, paste(d1, d2)), with(x$Ymean, paste(d1, d2)))] maxRvalues[is.na(maxRvalues)] <- 0 if (is.null(maxshow)) { ## We show all values above cutoff in the same color maxshow <- quantile(attr(x$Ymean, "distr"), cutoff) if (is.null(maxshow)) { maxshow <- 3.5 warning("No `maxshow` parameter specified, so 3.5 is used.") } } origMaxRvalues <- maxRvalues # Keep unchanged for point sizes maxRvalues[maxRvalues > maxshow] <- maxshow maxRvalues[maxRvalues < -maxshow] <- -maxshow ## Levels to show on color-scale zlevels <- sort(quantile(attr(x$Ymean, "distr"), c(plevels[plevels < cutoff], cutoff)) %o% c(-1,1)) if(swapAxes){ if(reverse.x) xlim <- rev(extendrange(transformF(uniqueDoses$d2))) else xlim <- extendrange(transformF(uniqueDoses$d2)) if(reverse.y) ylim <- rev(extendrange(transformF(uniqueDoses$d1))) else ylim <- extendrange(transformF(uniqueDoses$d1)) filled.contour( x = transformF(uniqueDoses$d2), y = transformF(uniqueDoses$d1), z = t(matrix(maxRvalues, sapply(uniqueDoses, length))), levels = zlevels, color.palette = colorRampPalette(colorPalette, space = "rgb"), plot.axes = { axis(1, at = transformF(uniqueDoses$d2), labels = format(uniqueDoses$d2, ...), cex.axis = 0.8) axis(2, at = transformF(uniqueDoses$d1), labels = format(uniqueDoses$d1, ...), cex.axis = 0.8) points(expand.grid(x = transformF(uniqueDoses$d2), y = transformF(uniqueDoses$d1)), pch = 20, col = rgb(0, 0, 0, 0.3), ## Size proportional to maxR statistic cex = t(matrix(abs(origMaxRvalues)/2, sapply(uniqueDoses, length)))) }, key.axes = { axis(4, at = c(0, zlevels), tck = -0.1, mgp = c(3, 0.3, 0), cex.axis = 0.7, labels = c( paste0("\u2191", if (colorPalette[1] %in% c("blue", "white")) "Antagonism" else "Synergy", "\n\n \n\n", "\u2193", if (colorPalette[1] %in% c("blue", "white")) "Synergy" else "Antagonism"), paste0("\u2264", 1 - cutoff), 1 - rev(plevels[plevels < cutoff]), 1 - plevels[plevels < cutoff], paste0("\u2264", 1 - cutoff)) ) }, key.title = title(main = "p-values", line = 1, cex.main = 1), xlim = xlim, ylim = ylim, zlim = maxshow*c(-1, 1), main = main, xlab = ylab, ylab = xlab, bty = "n" ) } else { if(reverse.x) xlim <- rev(extendrange(transformF(uniqueDoses$d1))) else xlim <- extendrange(transformF(uniqueDoses$d1)) if(reverse.y) ylim <- rev(extendrange(transformF(uniqueDoses$d2))) else ylim <- extendrange(transformF(uniqueDoses$d2)) filled.contour( x = transformF(uniqueDoses$d1), y = transformF(uniqueDoses$d2), z = matrix(maxRvalues, sapply(uniqueDoses, length)), levels = zlevels, color.palette = colorRampPalette(colorPalette, space = "rgb"), plot.axes = { axis(1, at = transformF(uniqueDoses$d1), labels = format(uniqueDoses$d1, ...), cex.axis = 0.8) axis(2, at = transformF(uniqueDoses$d2), labels = format(uniqueDoses$d2, ...), cex.axis = 0.8) points(expand.grid(x = transformF(uniqueDoses$d1), y = transformF(uniqueDoses$d2)), pch = 20, col = rgb(0, 0, 0, 0.3), ## Size proportional to maxR statistic cex = matrix(abs(origMaxRvalues)/2, sapply(uniqueDoses, length))) }, key.axes = { axis(4, at = c(0, zlevels), tck = -0.1, mgp = c(3, 0.3, 0), cex.axis = 0.7, labels = c( paste0("\u2191", if (colorPalette[1] %in% c("blue", "white")) "Antagonism" else "Synergy", "\n\n \n\n", "\u2193", if (colorPalette[1] %in% c("blue", "white")) "Synergy" else "Antagonism"), paste0("\u2264", 1 - cutoff), 1 - rev(plevels[plevels < cutoff]), 1 - plevels[plevels < cutoff], paste0("\u2264", 1 - cutoff)) ) }, key.title = title(main = "p-values", line = 1, cex.main = 1), xlim = xlim, ylim = ylim, zlim = maxshow*c(-1, 1), main = main, xlab = xlab, ylab = ylab, bty = "n" ) } } #' List non-additive points #' #' List all points with corresponding p-values declared non-additive by the #' maxR statistical test. #' #' @param maxR maxR statistics table returned by \code{Ymean} component from the #' output of \code{\link{maxR}} function. This can also be \code{"maxR"} #' element in the output of \code{\link{fitSurface}} function. #' @param B Iterations to use for the distribution of the maxR statistic. This #' is only used if \code{Ymean} dataframe does not have a \code{"distr"} attribute #' attached as is normally done when using \code{\link{fitSurface}} or \code{\link{maxR}} #' function. #' @return Returns a dataframe listing only dose combinations that exhibit #' significant deviations from the expected response surface. #' @export #' @examples #' \donttest{ #' data <- subset(directAntivirals, experiment == 2) #' ## Data must contain d1, d2 and effect columns #' fitResult <- fitMarginals(data) #' surf <- fitSurface(data, fitResult, statistic = "maxR") #' outsidePoints(surf$maxR$Ymean) #' } outsidePoints <- function(maxR, B = 10000) { if (is.null(attr(maxR, "distr"))) { n1 <- nrow(maxR) df0 <- attr(maxR, "df0") stopifnot(length(n1) == 1) stopifnot(length(df0) == 1) sim1 <- abs(matrix(rt(B*n1, df = df0), ncol = n1, byrow = TRUE)) distr <- apply(sim1, 1, max) #q1 <- quantile(apply(sim1, 1, max), cutoff) f <- ecdf(distr) } else f <- attr(maxR, "distr") resDF <- maxR[maxR$sign, ] resDF$`p-value` <- 1-f(resDF$absR) if (nrow(resDF) > 0) resDF[, c(1:2, 4, 8, 7)] else NULL }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/maxR-functions.R
#' Compute maxR statistic for each off-axis dose combination #' #' \code{\link{maxR}} computes maxR statistics for each off-axis dose #' combination given the data provided. It provides a summary with results #' indicating whether a given point is estimated to be synergetic or #' antagonistic. These can be based either on normal approximation or a #' fully bootstrapped distribution of the statistics. #' #' @param doseGridOff dose grid for off-axis points #' @inheritParams fitSurface #' @inheritParams meanR #' @importFrom stats rt ecdf #' @return This function returns a \code{maxR} object with estimates for the #' maxR statistical test. \code{maxR} object is essentially a list with #' appropriately named elements. #' #' In particular, \code{maxR} object contains \code{"Ymean"} element which is #' a summary table of maxR test results for each dose combination. This table #' contains mean deviation from the predicted surface, normalized deviation #' (\code{"absR"}) as well as a statistical call whether this deviation is #' significant. Distributional information on which these calls are made can #' be retrieved from the attributes of the \code{"Ymean"} dataframe. #' #' Also, \code{maxR} object contains \code{"Call"} element which indicates the #' general direction of the deviation of the observed surface from the null. #' This call is based on the strongest local deviation in the \code{"Ymean"} #' table. 4 values are available here: \code{"Syn"}, \code{"Ant"}, #' \code{"None"}, \code{"Undefined"}. If one compound acts as an agonist while #' another one is an antagonist, then a deviation from the null is classified #' as \code{"Undefined"}. If both compounds act in the same direction, then a #' stronger than individual effect is classified as synergy while a weaker #' effect would be classified as antagonism. maxR <- function(data_off, fitResult, transforms = fitResult$transforms, null_model = c("loewe", "hsa", "bliss", "loewe2"), R, CP, reps, nested_bootstrap = FALSE, B.B = NULL, cutoff = 0.95, cl = NULL, B.CP = NULL, method = c("equal", "model", "unequal"), bootStraps, idUnique, n1, doseGridOff, transFun, invTransFun, ...) { ## Argument matching null_model <- match.arg(null_model) method <- match.arg(method) FStat <- getMaxRF(data_off, fitResult, method, CP, reps, transforms, null_model, R, n1, transFun = transFun, invTransFun = invTransFun) Ymean = data.frame(doseGridOff, R = FStat, absR = abs(FStat), "effect - predicted" = R) df0 = fitResult$df ## Use normal approximation if B.B is not provided. ## Otherwise, bootstrap the procedure to find the distribution. if (is.null(B.B)) { ## MN: find distribution & overall call & points B <- 1e5 # iterations to find distribution of M under null sim1 <- abs(matrix(rt(B*n1, df = df0), ncol = n1, byrow = TRUE)) M <- apply(sim1, 1, max) q <- quantile(M, cutoff) Rnull <- NULL } else { Rnull <- vapply(bootStraps, FUN.VALUE = FStat, function(x){ if(nested_bootstrap){ paramsBootstrap <- list("data" = x$data, "fitResult" = x$simFit, "transforms" = transforms, "null_model" = null_model) nestedBootstraps = lapply(integer(B.CP), bootFun, args = paramsBootstrap) CP = getCP(nestedBootstraps, null_model, transforms) } getMaxRF(data = x$data[x$data$d1 & x$data$d2,], fitResult = x$simFit, method = method, CP = CP, reps = reps, transforms = transforms, null_model = null_model, n1 = n1, idUnique = idUnique, respS = x$respS, transFun = transFun, invTransFun = invTransFun) }) M <- apply(abs(Rnull), 2, max) q <- quantile(M, cutoff) } coefFit = fitResult$coef eq1 <- coefFit["m1"] == coefFit["b"] eq2 <- coefFit["m2"] == coefFit["b"] inc1 <- coefFit["m1"] >= coefFit["b"] inc2 <- coefFit["m2"] >= coefFit["b"] dec1 <- coefFit["m1"] <= coefFit["b"] dec2 <- coefFit["m2"] <= coefFit["b"] call <- { if (max(Ymean$absR) > q) { invertCall <- Ymean$R[which.max(Ymean$absR)] < 0 if (eq1 & eq2) "Undefined" else if (inc1 & inc2) c("Syn", "Ant")[1 + invertCall] else if (dec1 & dec2) c("Ant", "Syn")[1 + invertCall] else "Undefined" } else "None" } Ymean$sign <- Ymean$absR > q Ymean$call <- "None" ## MN: here make call based on the parameters Ymean$call[Ymean$sign] <- if (eq1 & eq2) { "Undefined" } else if (inc1 & inc2) { c("Syn", "Ant")[1+(Ymean$R[Ymean$sign] < 0)] } else if (dec1 & dec2) { c("Ant", "Syn")[1+(Ymean$R[Ymean$sign] < 0)] } else "Undefined" attr(Ymean, "df0") <- df0 attr(Ymean, "cutoff") <- cutoff attr(Ymean, "q") <- q attr(Ymean, "distr") <- ecdf(M) ans <- list("Call" = call, "Ymean" = Ymean) class(ans) <- append(class(ans), "maxR") ans }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/maxR.R
#' Summary of meanR object #' #' @param object Output from \code{\link{meanR}} #' @param ... Further arguments #' @export summary.meanR <- function(object, ...) { ans <- list() ans$bootstrapped <- !is.null(object$FDist) ans$dfs <- c("n1" = object$n1, "df0" = object$df0) ans$res <- data.frame("F" = object$FStat, "p-value" = object$p.value, check.names = FALSE) class(ans) <- append("summary.meanR", class(ans)) ans } #' Print summary of meanR object #' #' @param x Summary of meanR object #' @inheritParams summary.meanR #' @export print.summary.meanR <- function(x, ...) { type <- if (x$bootstrapped) "Bootstrapped" else "Exact" if (x$res$`p-value` < 2e-16) x$res$`p-value` <- "< 2e-16" else x$res$`p-value` <- paste("=", round(x$res$`p-value`, 4)) cat(paste(type, "meanR test (H0 = no synergy/antagonism):\n")) cat("\tF(", x$dfs["n1"], ",", x$dfs["df0"], ") = ", round(x$res$F, 4), " (p-value ", x$res$`p-value`, ")\n", sep="") } #' Plot bootstrapped cumulative distribution function of meanR null distribution #' #' @param x Output from \code{\link{meanR}} #' @inheritParams summary.meanR #' @export plot.meanR <- function(x, ...) { if (!is.null(x$FDist)) plot(x$FDist) else stop("CDF plotting only available for bootstrapped meanR.") }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/meanR-functions.R
#' Compute meanR statistic for the estimated model #' #' \code{\link{meanR}} computes the meanR statistic for the provided model #' and returns the computed F-statistic and the estimated p-value. p-value #' can be calculated either by assuming an exact distribution or using #' bootstrapping procedure. In the latter case, null distribution of #' bootstrapped F-statistics is also returned. #' #' @param R Numeric vector containing mean deviation of predicted response #' surface from the observed one at each of the off-axis points. If missing, #' it will be calculated automatically from output of #' \code{\link{predictOffAxis}} function. #' @param CP Matrix which is part of covariance matrix for the \code{R} argument #' @param reps Numeric vector containing number of replicates for each off-axis #' dose combination. If missing, it will be calculated automatically from output #' of \code{\link{predictOffAxis}} function. #' @param cl If parallel computations are desired, \code{cl} should be a cluster #' object created by \code{\link[parallel]{makeCluster}}. If parallel #' computing is active, progress reporting messages are not necessarily #' ordered as it should be expected. #' @param bootStraps precomputed bootstrap objects #' @param paramsBootstrap parameters for the nested bootstrap #' @param idUnique unique combinations of on-axis points, a character vector #' @param n1 the number of off-axis points #' @param data_off data frame with off -axis information #' @param transFun,invTransFun the transformation and inverse transformation functions for the variance #' @param ... Further arguments that will be later passed to #' \code{\link{generateData}} function during bootstrapping #' @inheritParams fitSurface #' @importFrom progress progress_bar #' @importFrom stats ecdf pf #' @return This function returns a \code{meanR} object with estimates for the #' meanR statistical test. \code{meanR} object is essentially a list with #' appropriately named elements. #' #' \code{meanR} object list includes notably the calculated F-statistic, #' p-value and degrees of freedom (\code{"n1"} and \code{"df0"} respectively) #' used to find the critical value of the F-distribution under the null. #' #' If \code{\link{meanR}} test is run with bootstrapping, then p-value #' estimate is based on bootstrapped null distribution of test statistic and an #' additional element \code{"FDist"} (of class \code{"ecdf"}) is returned. meanR <- function(data_off, fitResult, transforms = fitResult$transforms, null_model = c("loewe", "hsa", "bliss", "loewe2"), R, CP, reps, nested_bootstrap = FALSE, B.B = NULL, B.CP = NULL, cl = NULL, method = c("equal", "model", "unequal"), bootStraps, paramsBootstrap, idUnique, n1, transFun, invTransFun, ...) { ## Argument matching null_model <- match.arg(null_model) method <- match.arg(method) df0 = fitResult$df if (all(reps == 1) && method %in% c("model", "unequal")) { stop("Replicates are required when choosing the method 'model' or 'unequal'") } FStat <- getMeanRF(data_off, fitResult, method, CP, reps, transforms, null_model, R, n1, idUnique, transFun = transFun, invTransFun = invTransFun) if (is.null(B.B)) { ans <- list("FStat" = FStat, "p.value" = pf(FStat, n1, df0, lower.tail = FALSE), "n1" = n1, "df0" = df0) class(ans) <- append("meanR", class(ans)) return(ans) } FStatb <- vapply(bootStraps, FUN.VALUE = FStat, function(x) { if(nested_bootstrap){ paramsBootstrap <- list("data" = x$data, "fitResult" = x$simFit, "transforms" = transforms, "null_model" = null_model) nestedBootstraps = lapply(integer(B.CP), bootFun, args = paramsBootstrap) CP = getCP(nestedBootstraps, null_model, transforms) } getMeanRF(data = x$data[x$data$d1 & x$data$d2,], fitResult = x$simFit, method = method, CP = CP, reps = reps, transforms = transforms, null_model = null_model, n1 = n1, idUnique = idUnique, respS = x$respS, transFun = transFun, invTransFun = invTransFun) }) pvalb <- mean(FStatb >= FStat) ans <- list("FStat" = FStat, "FDist" = ecdf(FStatb), "p.value" = pvalb, "n1" = n1, "df0" = df0) class(ans) <- append("meanR", class(ans)) ans }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/meanR.R
#' Calculate model variance, assuming variance increases linearly with mean #' @importFrom stats lm.fit #' @param dat_off off-axis points data #' @param transFun,invTransFun the transformation and inverse transformation functions for the variance #' @return the predicted model variance modelVar = function(dat_off, transFun, invTransFun){ off_mean <- with(dat_off, tapply(effect, d1d2, mean)) off_var = with(dat_off, tapply(effect, d1d2, var)) linmod <- c(lm.fit(transFun(off_var), x = cbind(1, off_mean))$coef, "min" = min(off_var), "max" = max(off_var)) Var = predictVar(off_mean, linmod, invTransFun) Var }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/modelVar.R
#' Make a mean-variance plot #' @param data a dataset or matrix with d1, d2 and effect column #' @param trans,invtrans the transformation function for the variance and its inverse, possibly as strings #' @param main the title of the plot #' @param log log-transform of the axes, as in plot() #' @param ... passed on to plot() #' @return Plots the mean-variance trend #' @export #' @importFrom graphics lines #' @details This is a crucial graphical check for deciding on the plotMeanVarFit = function(data, trans = "identity", invtrans = switch(trans, "identity" = "identity", "log" = "exp"), main = paste(switch(trans, "identity" = "No", "log" = "log"), "transformation"), log = switch(trans, "identity" = "", "log" = "y", ""),...){ transFun = match.fun(trans); invtransFun = match.fun(invtrans) if(!all(c("d1", "d2", "effect") %in% colnames(data))) stop("Data must contain d1, d2 and effect columns!") dat_off = data[data$d1 & data$d2,] dat_off$d1d2 = getd1d2(dat_off) off_mean <- with(dat_off, tapply(effect, d1d2, mean)) off_var = with(dat_off, tapply(effect, d1d2, var)) predVar = modelVar(dat_off, transFun, invtransFun) if(any(predVar<0)) warning("Negative variances modelled!\n") plot(off_mean, off_var, log =log, ylab = "Variance", xlab ="Mean", main = main,...) lines(sort(off_mean), sort(predVar)) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/plotMeanVarFit.R
#' Plot response surface #' #' Plot the 3-dimensional response surface predicted by one of the null #' models. This plot allows for a visual comparison between the null #' model prediction and observed points. This function is mainly used #' as the workhorse of \code{\link{plot.ResponseSurface}} method. #' #' @param data Object "data" from the output of \code{\link{fitSurface}} #' @param fitResult Object "fitResult" from the output of \code{\link{fitSurface}} #' @param transforms Object "transforms" from the output of \code{\link{fitSurface}} #' @param predSurface Vector of all predicted responses based on #' \code{expand.grid(uniqueDoses)}. If not supplied, it will be computed #' with \code{\link{predictOffAxis}} function. #' @param null_model If \code{predSurface} is not supplied, it is computed using #' one of the available null models, i.e. \code{"loewe"}, \code{"hsa"}, #' \code{"bliss"} and \code{"loewe2"}. See also \code{\link{fitSurface}}. #' @param breaks Numeric vector with numerical breaks. To be used in conjunction #' with \code{colorPalette} argument. If named, the labels will be displayed in the legend #' @param colorPalette Vector of color names for surface #' @param colorPaletteNA Color used in the matrix of colours when the combination of doses doesn't exist (NA) #' @param colorBy This parameter determines values on which coloring is based #' for the 3-dimensional surface. If matrix or a data frame with \code{d1} and #' \code{d2} columns is supplied, dose combinations from \code{colorBy} will be #' matched automatically to the appropriate dose combinations in \code{data}. #' Unmatched dose combinations will be set to 0. This is especially useful for #' plotting results for off-axis estimates only, e.g. off-axis Z-scores or #' maxR test statistics. If \code{colorBy = "colors"}, surface will be colored #' using colors in \code{colorPalette} argument. #' @param addPoints Boolean whether the dose points should be included #' @param colorPoints Colors for off-axis and on-axis points. Character vector #' of length four with colors for 1) off-axis points; 2) on-axis points of the #' first drug (i.e. second drug is dosed at zero); 3) on-axis points of the #' second drug; 4) on-axis points where both drugs are dosed at zero. #' @param radius Size of spheres (default is 4) #' @param logScale Draw doses on log-scale (setting zeroes to be finite constant) #' @param zTransform Optional transformation function for z-axis. By default, #' identity function is used. #' @param main Fixed non-moving title for the 3D plot #' @param legend Whether legend should be added (default FALSE) #' @param add (deprecated) Add the predicted response surface to an existing plot. Will not #' draw any points, just the surface. Must be called after another call to #' \code{\link{plotResponseSurface}}. #' @param xat x-axis ticks: "pretty", "actual" or a numeric vector #' @param yat y-axis ticks: "pretty", "actual" or a numeric vector #' @param colorfun If replicates in \code{colorBy} variable are present, these #' will be aggregated using \code{colorfun} function. This can also be a #' custom function returning a scalar. #' @param plotfun If replicates for dose combinations in \code{data} are #' available, points can be aggregated using \code{plotfun} function. #' Typically, it will be \code{\link{mean}}, \code{\link[stats]{median}}, #' \code{\link{min}} or \code{\link{max}} but a custom-defined function #' returning a scalar from a vector is also possible. #' @param gradient Boolean indicating whether colours should be interpolated between breaks (default TRUE). #' If FALSE, \code{colorPalette} must contain length(breaks)-1 colours #' @param width Width in pixels (optional, defaults to 800px). #' @param height Height in pixels (optional, defaults to 800px). #' @param title String title (default "") #' @param digitsFunc Function to be applied to the axis values #' @param reverse Boolean indicating whether colours should be reversed (default FALSE). #' @param ... Further arguments to format axis labels #' @return Plotly plot #' @importFrom stats median predict quantile #' @importFrom grDevices axisTicks colorRampPalette colors terrain.colors #' @importFrom graphics plot.new #' @importFrom plotly plot_ly add_surface add_markers layout config #' @export #' @examples #' \dontrun{ #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' fitResult <- fitMarginals(data) #' data_mean <- aggregate(effect ~ d1 + d2, data = data[, c("d1", "d2", "effect")], #' FUN = mean) #' #' ## Construct the surface from marginal fit estimates based on HSA #' ## model and color it by mean effect level #' plotResponseSurface(data, fitResult, null_model = "hsa", #' colorBy = data_mean, breaks = 10^(c(0, 3, 4, 6)), #' colorPalette = c("grey", "blue", "green")) #' #' ## Response surface based on Loewe additivity model and colored with #' ## rainbow colors. #' plotResponseSurface(data, fitResult, null_model = "loewe", breaks = c(-Inf, 0, Inf), #' colorBy = "colors", colorPalette = rainbow(6)) #' } plotResponseSurface <- function(data, fitResult = NULL, transforms = fitResult$transforms, predSurface = NULL, null_model = c("loewe", "hsa", "bliss", "loewe2"), colorPalette = c("red", "grey70", "blue"), colorPaletteNA = "grey70", colorBy = "none", addPoints = TRUE, colorPoints = c("black", "sandybrown", "brown", "white"), breaks, # = c(-Inf, 0, Inf) radius = 4, logScale = TRUE, colorfun = median, zTransform = function(x) x, add = FALSE, main = "", legend = FALSE, xat = "actual", yat = "actual", plotfun = NULL, gradient = TRUE, width = 800, height = 800, title = "", digitsFunc = function(x) {x}, reverse = FALSE, ... ) { ## Argument matching null_model <- match.arg(null_model) if (missing(fitResult) & missing(predSurface)) stop("Marginals fit result or predicted surface need to be supplied.") if (is.character(colorBy) & all(colorBy %in% colors())) { colorPalette <- colorBy colorBy <- "colors" } if(!is.null(names(colorPalette))){ colorPalette <- colorPalette[c("Ant", "None", "Syn")] } ## Calculate extra arguments uniqueDoses <- with(data, list("d1" = sort(unique(d1)), "d2" = sort(unique(d2)))) doseGrid <- expand.grid(uniqueDoses) logT <- function(z) log(z + 0.5 * min(z[z != 0])) log10T <- function(z) log10(z + 0.5 * min(z[z != 0])) ## Transform function for doses transformF <- if (logScale) log10T else function(z) z zGrid <- predSurface ## If marginal fit information is provided, response surface can be ## automatically calculated. if (is.null(predSurface)) { respSurface <- predictResponseSurface(doseGrid, fitResult, null_model = null_model, transforms = transforms) if (!is.null(transforms)) { predSurface <- with(transforms, InvPowerT(respSurface, compositeArgs)) } else { predSurface <- respSurface } zGrid <- predSurface } ## If colorVec is a matrix with d1 and d2 columns, reorder it so that it ## matches the data ordering. if (inherits(colorBy, c("matrix", "data.frame"))) { stopifnot(c("d1", "d2") %in% colnames(colorBy)) colorVec <- colorBy colorBy <- "asis" coloredBy <- colorVec cols <- colnames(coloredBy) pCols <- which(!(cols %in% c("d1", "d2"))) if (length(pCols) > 1) pCols <- min(pCols) if (any(duplicated(coloredBy[, c("d1", "d2")]))) { coloredBy <- aggregate(coloredBy[, pCols], by = coloredBy[, c("d1", "d2")], FUN = colorfun) } colorVec <- rep(NA, nrow(doseGrid)) for (i in 1L:nrow(doseGrid)) { ind <- match(paste(doseGrid[i,], collapse=";"), apply(coloredBy[, c("d1", "d2")], 1, function(x) paste(x, collapse=";"))) if("effectCall" %in% names(coloredBy)){ if (!is.na(ind)) colorVec[i] <- as.character(coloredBy[[pCols]][ind]) } else { if (!is.na(ind)) colorVec[i] <- coloredBy[[pCols]][ind] } } } dataOffAxis <- with(data, data[d1 & d2, , drop = FALSE]) predOffAxis <- predSurface[cbind(match(dataOffAxis$d1, uniqueDoses$d1), match(dataOffAxis$d2, uniqueDoses$d2))] if (nrow(dataOffAxis) == 0) { warning("No off-axis observations were found. Surface won't be custom colored..") colorBy <- "none" } surfaceColors <- colorRampPalette(colorPalette)(length(breaks) - 1) getFF = function(response){ if(is.numeric(response)) { cut(response, breaks = breaks, include.lowest = TRUE) } else if(is.factor(response)){ response } else if(is.character(response)){ if (reverse) { # b >= m1, m2 decreasing curves so labels are changed; if b < m1, m2 colours preserved factor(response, levels = c("Syn", "None", "Ant"), labels = c("Syn", "None", "Ant"), ordered = TRUE) } else { factor(response, levels = c("Ant", "None", "Syn"), labels = c("Ant", "None", "Syn"), ordered = TRUE) } } } surfaceColor <- function(response) { ff <- getFF(response) zcol <- surfaceColors[ff] return(zcol) } getLabels <- function(response) { ff <- getFF(response) labels <- gsub(",", ", ", levels(ff)) return(labels) } ## Generate colors for the surface plot if (colorBy == "asis") { if(is.numeric(colorVec)) colorVec[is.na(colorVec)] <- 0 zcol <- surfaceColor(colorVec) labels <- getLabels(colorVec) } else if (colorBy == "colors") { ## Use specified colors and recycle if necessary zcol <- rep(colorPalette, length(zGrid)) } else { ## Generate colors from terrain.colors by looking at range of zGrid zGridFloor <- floor(100 * zGrid) col <- terrain.colors(diff(range(zGridFloor, na.rm = TRUE))) zcol <- col[zGridFloor - min(zGridFloor, na.rm = TRUE) + 1] } ## ## 3-dimensional surface plotting ## labnames <- c("Response", if (!is.null(fitResult$names)) fitResult$names else c("Compound 1", "Compound 2")) if (!is.null(attr(data, "orig.colnames"))) labnames <- attr(data, "orig.colnames") ## Plot with no axes/labels if (!is.null(plotfun)) data <- aggregate(effect ~ d1 + d2, data, FUN = plotfun)[, names(data)] ## Get x and y ticks ## TODO: use scales:log_breaks()(range(x)) if (!is.numeric(xat)) { xat <- match.arg(xat, c("pretty", "actual")) if (xat == "pretty") { xlab <- axisTicks(range(transformF(uniqueDoses$d1)), log = logScale, nint = 3) if (logScale && length(xlab) > 4) xlab <- xlab[!(log10(xlab) %% 1)] } else xlab <- uniqueDoses$d1 xat <- transformF(xlab) } else { xlab <- xat xat <- transformF(xat) } if (!is.numeric(yat)) { yat <- match.arg(yat, c("pretty", "actual")) if (yat == "pretty") { ylab <- axisTicks(range(transformF(uniqueDoses$d2)), log = logScale, nint = 3) if (logScale && length(ylab) > 4) ylab <- ylab[!(log10(ylab) %% 1)] } else ylab <- uniqueDoses$d2 yat <- transformF(ylab) } else { ylab <- yat yat <- transformF(yat) } MatrixForColor <- matrix(as.numeric(factor(zcol, levels = unique(surfaceColors))), nrow = nrow(zGrid), ncol = ncol(zGrid)) if (any(is.na(MatrixForColor))) { if (missing(colorPaletteNA)) colorPaletteNA <- "grey70" if (!colorPaletteNA %in% colorPalette) stop("Please indicate a colour for `colorPaletteNA` that is present in the `colorPalette` list") MatrixForColor[is.na(MatrixForColor)] <- which(colorPalette %in% colorPaletteNA)[1] } # Layout setting (x, y and z axis) axx <- list( backgroundcolor = "rgb(250, 250, 250)", gridcolor = "rgb(150, 150, 150)", showbackground = TRUE, ticketmode = 'array', ticktext = digitsFunc(as.numeric(xlab)), tickvals = xat, title = fitResult$names[1] ) axy <- list( backgroundcolor = "rgb(250, 250, 250)", gridcolor = "rgb(150, 150, 150)", showbackground = TRUE, ticketmode = 'array', ticktext = digitsFunc(as.numeric(ylab)), tickvals = yat, title = fitResult$names[2] ) axz <- list( backgroundcolor = "rgb(250, 250, 250)", gridcolor = "rgb(150, 150, 150)", showbackground = TRUE, title = "Response" ) data$color <- colorPoints[1 + 1 * (data$d2 == 0) + 2 * (data$d1 == 0)] data$color <- factor(data$color, levels = colorPoints) data$color <- droplevels(data$color) # Plot the main surface p <- plotly::plot_ly( height = height, width = width, colors = unique(colorPalette), type = "surface", showlegend = FALSE ) if (gradient) { p <- plotly::add_surface( p, x = transformF(uniqueDoses$d1), y = transformF(uniqueDoses$d2), #NOTE: Plotly requires to transpose the zGrid matrix (and consequently the MatrixForColor) z = zTransform(t(as.matrix(zGrid))), opacity = 0.8, surfacecolor = t(MatrixForColor), cauto = FALSE, colors = unique(surfaceColors), cmin = 1, cmax = length(unique(surfaceColors)), text = "", hoverinfo = 'text', showlegend = FALSE, showscale = FALSE ) } else { colorPaletteHex <- col2hex(unique(colorPalette)) colorVecAlt <- colorPaletteHex[sort(unique(as.numeric(MatrixForColor)))] mz <- seq(0,1, length.out = length(colorVecAlt)+1) if (length(mz) > 2) mz <- c(mz[1], rep(mz[2:(length(mz)-1)], each = 2), mz[length(mz)]) custom_colors <- data.frame( z = mz, col = rep(colorVecAlt, each = 2) ) p <- plotly::add_surface( p, x = transformF(uniqueDoses$d1), y = transformF(uniqueDoses$d2), #NOTE: Plotly requires to transpose the zGrid matrix (and consequently the MatrixForColor) z = zTransform(t(as.matrix(zGrid))), # zauto = FALSE, # opacity = 0.8, surfacecolor = t(MatrixForColor), colorscale = custom_colors, # cauto = FALSE, text = "", hoverinfo = 'text', showlegend = FALSE, showscale = FALSE ) } if (legend) { # Categorical legend (only if colorPalette was named) if (!is.null(names(colorPalette))) { uColorPalette <- colorPalette[!duplicated(colorPalette)] for (xcolor in names(uColorPalette)) { p <- add_markers( p, x = -900, y = -900, color = I(uColorPalette[as.character(xcolor)]), size = 10, legendgroup = "call", name = xcolor, opacity = 1, showlegend = TRUE, marker = list(symbol = "square") ) } p <- layout( p, xaxis = list( showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ), yaxis = list( showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ), legend = list( title = "Call:", itemsizing = 'constant', font = list(size = 10), orientation = "h", # show entries horizontally xanchor = "center", # use center of legend as anchor x = 0.5, y = 0 ) ) } else { #TODO Add legend for continuous variables } } if (addPoints) { # Legend names pointNames <- c( paste0("Combination \n", paste(fitResult$names, collapse = " and ")), fitResult$names, "Dose 0 for both compounds" ) names(pointNames) <- colorPoints ## add points (spheres) for (xcolor in levels(data$color)) { df <- data.frame( d1_transf = transformF(data$d1)[data$color == xcolor], d1 = data$d1[data$color == xcolor], d2_transf = transformF(data$d2)[data$color == xcolor], d2 = data$d2[data$color == xcolor], effect = zTransform(data$effect)[data$color == xcolor] ) p <- plotly::add_markers( p = p, opacity = 1, x = df$d1_transf, y = df$d2_transf, z = df$effect, marker = list(color = xcolor, size = radius, line = list(color = "#333", width = 1)), showlegend = legend, text = paste0("d1: ", digitsFunc(df$d1), "\nd2: ", digitsFunc(df$d2), "\neffect: ", digitsFunc(df$effect)), hoverinfo = "text", name = pointNames[xcolor], legendgroup = "points" ) } # set legend layout p <- plotly::layout( p = p, title = title, scene = list( xaxis = axx, yaxis = axy, zaxis = axz, aspectratio = list(x = 1, y = 1, z = 1.1) ), legend = list( itemsizing = 'constant', font = list(size = 10), orientation = "h", # show entries horizontally xanchor = "center", # use center of legend as anchor x = 0.5, y = 0 ) ) } p <- layout( p, scene = list( xaxis = axx, yaxis = axy, zaxis = axz, aspectratio = list(x = 1, y = 1, z = 1.1) ), title = title ) p <- config( p, displaylogo = FALSE, modeBarButtonsToRemove = c("orbitRotation", "resetCameraLastSave3d", "hoverClosest3d") ) p }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/plotReponseSurface.R
#' Compute off-axis predictions #' #' Given a dataframe with dose-response data, this function uses coefficient #' estimates from the marginal (on-axis) monotherapy model to compute the #' expected values of response at off-axis dose combinations using a provided #' null model. #' #' @param doseGrid A dose grid with unique combination of doses #' @param fit a pre-calculated off-axis fit #' @param ... Further arguments passed on to the Loewe fitters #' @inheritParams fitSurface #' @return This functions returns a named vector with predicted off-axis points #' @export #' @examples #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' transforms <- getTransformations(data) #' fitResult <- fitMarginals(data, transforms) #' uniqueDoses <- with(data, list("d1" = sort(unique(data$d1)), #' "d2" = sort(unique(data$d2)))) #' doseGrid <- expand.grid(uniqueDoses) #' predictOffAxis(fitResult, null_model = "hsa", doseGrid = doseGrid) predictOffAxis <- function(doseGrid, fitResult, transforms = fitResult$transforms, null_model = c("loewe", "hsa", "bliss", "loewe2"), fit = NULL,...) { nm = match.arg(null_model) if(is.null(fit)){ fit = fitOffAxis(doseGrid, fitResult, nm, ...) } out = (if(nm %in% c("loewe")) fit$response else fit)[doseGrid$d1 & doseGrid$d2] names(out) = getd1d2(doseGrid[doseGrid$d1 & doseGrid$d2, ]) if (!is.null(transforms)) { CompositeT <- with(transforms, function(y, args) PowerT(BiolT(y, args), args)) out <- with(transforms, CompositeT(out, compositeArgs)) } return(out) } fitOffAxis = function(doseGrid, fitResult, null_model = c("loewe", "hsa", "bliss", "loewe2"), ...){ nm = match.arg(null_model) switch(nm, "loewe" = generalizedLoewe(doseGrid, fitResult$coef, ...), "hsa" = hsa(doseGrid, fitResult$coef), "bliss" = Blissindependence(doseGrid, fitResult$coef), "loewe2" = harbronLoewe(doseGrid, fitResult$coef,...) ) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/predictOffAxis.R
#' Predict the entire response surface, so including on-axis points, and return #' the result as a matrix. For plotting purposes. #' @inheritParams predictOffAxis #' @inheritParams fitSurface predictResponseSurface = function(doseGrid, fitResult, null_model, transforms = fitResult$transforms){ fit = fitOffAxis(doseGrid, fitResult, null_model, startvalues = NULL) vec = (if(null_model %in% c("loewe")) fit$response else fit) names(vec) = getd1d2(doseGrid) if (!is.null(transforms)) { CompositeT <- with(transforms, function(y, args) PowerT(BiolT(y, args), args)) vec <- with(transforms, CompositeT(vec, compositeArgs)) } out = matrix(0, length(unique(doseGrid$d1)), length(unique(doseGrid$d2)), dimnames = list(sort(unique(doseGrid$d1)), sort(unique(doseGrid$d2)))) for(n in names(vec)){ foo = strsplit(n, split = "_")[[1]] out[foo[1], foo[2]] = vec[n] } out }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/predictResponseSurface.R
#' Run the BIGL application for demonstrating response surfaces #' #' @param ... Pass parameters to \code{\link[shiny]{runApp}} #' @export #' @examples #' \dontrun{ #' runBIGL() #' } runBIGL <- function(...){ if (requireNamespace("shiny", quietly = TRUE)) shiny::runApp(appDir = system.file("ui", package = "BIGL"), ...) else stop("shiny package needs to be installed for the interactive application to work.") }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/runBIGL.R
#' Functions for scaling, and rescaling residuals. May lead to unstable behaviour in practice #' @details Residuals are calculated with respect to the average observation on #' the off-axis point, so replicates are required! #' @param sampling_errors A vector of raw residuals #' @param ... passed on to predictVar scaleResids = function(sampling_errors, ...){ predVar = predictVar(...) sampling_errors/sqrt(predVar) } #' Backscale residuals #' @param scaledResids scaled residuals #' @inheritParams scaleResids backscaleResids = function(scaledResids, ...){ predVar = predictVar(...) scaledResids*sqrt(predVar) } #'Predict variance #' @param means a vector of means #' @inheritParams generateData predictVar = function(means, model, invTransFun){ predVar = invTransFun(model[1] + model[2]*means) if(model["min"] == 0){ predVar[predVar<=0] = 0.000001 #Correct for negative variances } else { predVar[predVar<=0] = model["min"] #Correct for negative variances } predVar[predVar > model["max"]] = model["max"] #Upper bound predVar } #' Add residuals by adding to mean effects #' @inheritParams scaleResids #' @inheritParams predictVar addResids = function(means, ...){ means + sampleResids(means, ...) } #' Sample residuals according to a new model #' @inheritParams fitSurface #' @inheritParams predictVar #' @inheritParams scaleResids #' @return sampled residuals sampleResids = function(means, sampling_errors, method, rescaleResids,...){ if(method %in% c("equal", "unequal")){ return(sample(sampling_errors, size = length(means), replace = TRUE)) } else if(method == "model"){ resids = if(rescaleResids){ scaledResids = scaleResids(sampling_errors, means, ...) sampledResids = sample(scaledResids, replace = TRUE) backscaleResids(sampledResids, means, ...) } else{ rnorm(length(means), sd = sqrt(predictVar(means, ...))) } return(resids) } else{} } #' Sample residuals according to a new model #' @inheritParams fitSurface #' @inheritParams predictVar #' @inheritParams scaleResids #' @inheritParams bootConfInt #' @importFrom stats rgamma #' @return sampled residuals wildbootAddResids <- function(means, sampling_errors, method, rescaleResids, model, invTransFun, wild_bootstrap, wild_bootType,...){ if(wild_bootstrap){ errors = switch(wild_bootType, # Rademacher "rademacher" = {sampling_errors*(2*rbinom(length(means), size = 1, prob = 0.5)-1)}, # Rademacher distribution "gamma" = {sampling_errors*(rgamma(length(means),shape = 4, scale = 0.5)-2)}, # Gamma distribution # Normal "normal" = {noff <- length(means) mu1 <- 0.5*(sqrt(17/6)+sqrt(1/6)) mu2 <- 0.5*(sqrt(17/6)-sqrt(1/6)) W1 = rnorm(noff,mu1,sqrt(0.5)) Z1 = rnorm(noff,mu2,sqrt(0.5)) sampling_errors*(W1*Z1-mu1*mu2)}, # Normal distribution # Two-point distribution by Mammen (1993) "two-point" = {vals <- c(-(sqrt(5)-1)/2, (sqrt(5)+1)/2) probs <- rev(abs(vals)/sqrt(5)) sampling_errors*sample(vals, size = length(means), replace = TRUE, prob = probs)}) }else{ errors <- sampleResids(means, sampling_errors, method, rescaleResids, model, invTransFun,...) } means+errors }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/scaleResids.R
globalVariables(c("loewe", "loewe2", "hsa", "bliss", "predicted", "d2", "effect")) #' Plot 2D cross section of response surface #' @param ls list of results objects obtained from \code{\link{fitSurface}}. Names of list objects #' expected to be one of the null model options i.e. loewe, loewe2, hsa, bliss #' @param xlab label for x-axis #' @param ylab label for y-axis #' @param color plot lines in colour? Defaults to FALSE #' @param plotBy compound name to be used for order of plotting. If plotBy = "Compound 1" then plots are split by #' concentrations in Compound 1 and concentrations in Compound 2 are shown on the x-axis. #' @author Mohammed Ibrahim #' @examples #' \dontrun{ #' data <- subset(directAntivirals, experiment == 1) #' transforms <- list("PowerT" = function(x, args) with(args, log(x)), #' "InvPowerT" = function(y, args) with(args, exp(y)), #' "BiolT" = function(x, args) with(args, N0 * exp(x * time.hours)), #' "InvBiolT" = function(y, args) with(args, 1/time.hours * log(y/N0)), #' "compositeArgs" = list(N0 = 1, time.hours = 72)) #' fitResult <- fitMarginals(data, transforms) #' nullModels <- c("loewe", "loewe2", "bliss", "hsa") #' rs_list <- Map(fitSurface, null_model = nullModels, MoreArgs = list( #' data = data, fitResult = fitResult, B.CP = 50, statistic = "none")) #' synergy_plot_bycomp(ls = rs_list, plotBy = "Compound 1", color = TRUE) #' synergy_plot_bycomp(ls = rs_list, plotBy = "Compound 2", color = TRUE) #' } #' @export synergy_plot_bycomp <- function(ls, xlab = NULL, ylab = NULL, color = FALSE, plotBy = NULL) { ls <- Filter(function(x) { !inherits(x, "try-error") && !is.null(x) }, ls) nmes <- names(ls) tmp <- lapply(nmes, function(nn) { res <- ls[[nn]]$offAxisTable[, c("d1", "d2", "effect", "predicted")] if(!is.null(ls[[nn]]$transforms)){ res$effect <- ls[[nn]]$transforms$InvPowerT(res$effect, args = ls[[nn]]$transforms$compositeArgs) res$predicted <- ls[[nn]]$transforms$InvPowerT(res$predicted, args = ls[[nn]]$transforms$compositeArgs) } colnames(res)[colnames(res) == "predicted"] <- nn res }) plot_df <- tmp[[1]] if (length(tmp) > 1) { for (i in 2:length(tmp)) { plot_df <- merge(plot_df, tmp[[i]], by = c("d1", "d2", "effect")) } } if (is.null(plotBy)) { plotBy <- ls[[1]]$names[1] } else { if (!plotBy %in% ls[[1]]$names) { warning("Unrecognized name in `plotBy`, ", toString(ls[[1]]$names[[1]]), " will be used.") plotBy <- ls[[1]]$names[1] } else if (plotBy == ls[[1]]$names[2]) { names(plot_df)[1:2] <- names(plot_df)[2:1] if (is.null(xlab)) { xlab <- paste(ls[[1]]$names[1], "Concentration") } } } if (is.null(xlab)) { xlab <- paste(ls[[1]]$names[2], "Concentration") } facet_d1 <- unique(plot_df$d1) facet_d1 <- facet_d1[order(facet_d1)] plot_df$d1 <- factor(plot_df$d1, levels = facet_d1, labels = formatC(facet_d1, format = "fg", digits = 2)) allLabels <- c("Bliss" = "bliss", "HSA" = "hsa", "Generalized Loewe" = "loewe", "Alternative Loewe" = "loewe2") allValues <- c("Bliss" = 1, "HSA" = 2, "Generalized Loewe" = 3, "Alternative Loewe" = 4) labels <- allLabels[which(allLabels %in% nmes)] values <- allValues[which(allLabels %in% nmes)] breaks <- unique(plot_df$d2) breakLabels <- trimws(formatC(breaks, format = "fg", digits = 1)) if (color) { color_vec <- c( "loewe2" = "gold3", #"Alternative Loewe" "bliss" = "green3", #"Bliss" "loewe" = "coral2", #"Generalized Loewe" "hsa" = "cornflowerblue" #"HSA" ) color_vec <- color_vec[names(color_vec) %in% labels] labels <- labels[match(names(color_vec), labels)] p <- ggplot(plot_df, aes(x = d2, y = effect)) + geom_point() + facet_wrap(~d1) + scale_x_continuous(trans = "log10", breaks = breaks, labels = breakLabels) + #scale_color_manual(values = values, labels = names(labels)) + scale_color_manual(values = as.character(color_vec[labels]), labels = names(labels)) + theme_bw() + theme( panel.grid = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), legend.position = "top", legend.justification = c(0.5,0), legend.title = element_text(hjust = 0.5), legend.background = element_rect(linewidth = 0.25, color = "black")) + labs(x = xlab, y = ylab, color = "Model") + guides(color = guide_legend(nrow = 1, byrow = T)) if ("bliss" %in% labels) { p <- p + geom_line(aes(y = bliss, color = "Bliss")) } if ("hsa" %in% labels) { p <- p + geom_line(aes(y = hsa, color = "HSA")) } if ("loewe" %in% labels) { p <- p + geom_line(aes(y = loewe, color = "Generalized Loewe")) } if ("loewe2" %in% labels) { p <- p + geom_line(aes(y = loewe2, color = "Alternative Loewe")) } } else { line_vec <- c( "Alternative Loewe" = 1, #"loewe2" "Bliss" = 2, #"bliss" "Generalized Loewe" = 3, #"loewe" "HSA" = 4 #"hsa" ) line_vec <- line_vec[names(line_vec) %in% names(labels)] labels <- labels[match(names(line_vec), names(labels))] p <- ggplot(plot_df, aes(x = d2, y = effect)) + geom_point() + facet_wrap(~d1) + scale_x_continuous(trans = "log10", breaks = breaks, labels = breakLabels) + scale_linetype_manual(values = line_vec, labels = names(labels)) + theme_bw() + theme( panel.grid = element_blank(), axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), legend.position = "top", legend.justification = c(0.5,0), legend.title = element_text(hjust = 0.5), legend.background = element_rect(linewidth = 0.25, color = "black") ) + labs(x = xlab, y = ylab, linetype = "Model") + guides(linetype = guide_legend(nrow = 1, byrow = T)) if ("bliss" %in% labels) { p <- p + geom_line(aes(y = bliss, linetype = "Bliss")) } if ("hsa" %in% labels) { p <- p + geom_line(aes(y = hsa, linetype = "HSA")) } if ("loewe" %in% labels) { p <- p + geom_line(aes(y = loewe, linetype = "Generalized Loewe")) } if ("loewe2" %in% labels) { p <- p + geom_line(aes(y = loewe2, linetype = "Alternative Loewe")) } } p }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/splitPlot.R
#' Helper functions for the test statistics #' @param idUnique id of unique off axis points #' @param data the datasets #' @param respS the evaluated response surface #'@inheritParams predictOffAxis getR = function(data, idUnique, transforms, respS){ if(!is.null(transforms)){ data$effect <- with(transforms, PowerT(data$effect, compositeArgs)) } tapply(data$effect - respS[idUnique], data$d1d2, mean) } #'@inheritParams predictOffAxis #'@inheritParams getR getMeanRF = function(data, fitResult, method, CP, reps, transforms, null_model, R, n1, idUnique, respS, transFun, invTransFun){ if(missing(R)){ R = getR(data = data, idUnique = idUnique, transforms = transforms, respS = respS) } A <- getA(data, fitResult, method, CP, reps, n1, transFun, invTransFun) FStat <- max(0, as.numeric(crossprod(R, solve(A)) %*% R / n1)) return(FStat) } getMaxRF = function(data, fitResult, method, CP, reps, transforms, null_model, R, n1, idUnique, respS, transFun, invTransFun){ if(missing(R)){ R = getR(data = data, idUnique = idUnique, transforms = transforms, respS = respS) } A <- getA(data, fitResult, method, CP, reps, n1, transFun, invTransFun) E <- eigen(A) V <- E$values Q <- E$vectors Amsq <- Q %*% tcrossprod(diag(1/sqrt(V)), Q) RStud <- crossprod(R, Amsq) return(as.numeric(RStud)) } getA = function(dat_off, fitResult, method, CP, reps, n1, transFun, invTransFun){ MSE0 <- fitResult$sigma^2 mse_off <- switch(method, "equal" = MSE0, "model" = c(modelVar(dat_off, transFun, invTransFun)), "unequal" = mean(with(dat_off, tapply(effect, d1d2, var))) ) A <- MSE0*CP + mse_off*diag(1/reps, nrow = n1) return(A) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/statistics.R
#' Apply two-parameter Box-Cox transformation #' #' @param y Numeric vector #' @param lambda Power parameter in power transform #' @param alpha Shift paramater in 2-parameter power transform. Defaults to #' \code{0} which implies a 1-parameter Box-Cox transform. #' @return Power-transformed data boxcox.transformation <- function(y, lambda, alpha = 0){ if (lambda == 0) { log(y + alpha) } else { ((y + alpha)^lambda - 1)/lambda } } #' Summarize data by factor #' #' @param value data to sumamrize #' @param fac factor to summarize by #' @importFrom stats median na.omit sd get.summ.data <- function(value, fac){ data <- data.frame("value" = value, "rep" = fac) aggs <- aggregate(data$value, by = list(data$rep), FUN = function(x) c("median" = median(x, na.rm = TRUE), "mean" = mean(x, na.rm = TRUE), "sd" = sd(x, na.rm = TRUE), "N" = length(x))) aggs <- data.frame("Rep" = aggs[["Group.1"]], aggs$x) data <- na.omit(data) # Remove observations with n_i < 2 return(aggs) } #' Return absolute t-value, used in optimization call in #' \code{\link{optim.boxcox}} #' #' @param value data #' @param fac factor #' @param lambda box-cox parameter #' @param zero.add2 2nd box-cox parameter #' @importFrom robustbase lmrob #' @importFrom graphics plot abline get.abs_tval <- function(value, fac, lambda, zero.add2 = 0){ value <- boxcox.transformation(value, lambda, zero.add2) data <- get.summ.data(value, fac) if (all(data$mean > 0)) absmin <- 0 else absmin <- abs(min(data$mean)) + 1e-05 * diff(range(data$mean)) ## We check whether variance and mean in power-transformed data ## appear to be related m <- lmrob(log10(data$sd + zero.add2) ~ log10(data$mean + absmin)) abs.tval <- abs(summary(m)$coefficients[2,3]) abs.tval } #' Find optimal Box-Cox transformation parameters #' #' @param value Response variable in the data, e.g. \code{"effect"} column #' @param fac Factor indicating groups of replicates, e.g. #' \code{interaction(d1,d2)} #' @param shift Whether to use 2-parameter Box-Cox transformation. Input may be #' \code{TRUE/FALSE} or a numeric value indicating the shift parameter to use. #' If \code{FALSE}, shift parameter is set to zero. #' @importFrom stats optim optimize #' @return Numeric vector with power and shift parameter in that order. #' @examples #' data <- subset(directAntivirals, experiment == 1) #' optim.boxcox(data$effect, interaction(data$d1, data$d2)) #' @export optim.boxcox <- function(value, fac, shift = FALSE){ ## If shift parameter is optimized, absmin would be its ## lower bound in order to guarantee positivity of the ## numerator. if (all(value > 0)) absmin <- 0 else absmin <- abs(min(value)) + 1e-05 * diff(range(value)) if (isTRUE(shift)) { ## Optimize over two parameters fn2 <- function(p) get.abs_tval(value, fac, p[1], p[2]) optim(c(0.5, 0), fn2, method = "L-BFGS-B", lower = c(0.1, absmin), upper = c(0.9, Inf))$par } else { ## Optimize over a single parameter if (!is.numeric(shift)) shift <- 0 fn1 <- function(p) get.abs_tval(value, fac, p, shift) c(optimize(fn1, c(0.1, 0.9))[["minimum"]], shift) } } #' Return a list with transformation functions #' #' This function takes in response data from a dose-response model and attempts #' to find an optimal Box-Cox power transform based on #' \code{\link{optim.boxcox}} function. It then returns a list of transformation #' functions which contains this power transform and its inverse which can be #' subsequently used in \code{\link{fitMarginals}} and \code{\link{fitSurface}}. #' #' Additionally, returned list contains biological transform and its inverse #' based on a simple exponential growth model, especially useful when response #' data is provided in cell counts. User can additionally provide arguments for #' these biological transforms where \code{N0} stands for initial cell count and #' \code{time.hours} indicates number in hours after which response data was #' measured. #' #' @param shift If \code{TRUE} or is a numeric value, then a two-parameter #' Box-Cox transformation is assumed. This parameter will be passed on to #' \code{\link{optim.boxcox}} function. #' @param args List with elements that are added to the list of transformation #' function and which can be used by these functions. In particular, this #' list should be of type \code{args = list("N0" = 1, "time.hours" = 1)} where #' \code{N0} and \code{time.hours} are arguments used for the biological #' transform. #' @inheritParams fitSurface #' @details \code{\link{getTransformations}} relies on #' \code{\link{optim.boxcox}} to obtain the optimal Box-Cox transformation #' parameters. However, \code{\link{optim.boxcox}} optimizes for the power #' parameter only within the interval (0.1, 0.9). Hence, if obtained power #' parameter is close to 0.1, then a logarithmic transformation is applied #' instead. #' @return This function returns a list with transformation functions. These #' include power transformation (\code{"PowerT"}) and its inverse #' (\code{"InvPowerT"}) as well as biological transformation (\code{"BiolT"}) #' and its inverse (\code{"InvBiolT"}). #' #' Power transformation is a 1-parameter Box-Cox transformation. If #' \code{shift = TRUE}, then power transformation is a 2-parameter Box-Cox #' transformation. Optimal values for power and shift operators are selected #' by means of \code{\link{optim.boxcox}} function. #' #' Biological transformation \code{y = N0 * exp(x * t)} where \code{N0} is the #' initial cell count and \code{t} is the incubation time. If response/effect #' variable (\code{y}) is given in terms of cell counts, biological #' transformation ensures that modelisation is done for the growth rate #' instead (\code{x}). #' #' Returned list also contains \code{"compositeArgs"} elements shared by all #' the transformation functions. These arguments include initial cell count #' (\code{"N0"}) and incubation time (\code{"time.hours"}). #' @examples #' data <- subset(directAntivirals, experiment == 1) #' ## Data must contain d1, d2 and effect columns #' getTransformations(data) #' @export getTransformations <- function(data, shift = FALSE, args = list("N0" = 1, "time.hours" = 1)) { if (any(data$effect <= 0)) stop("Values below zero are not allowed by power transform.") bcPars <- optim.boxcox(data$effect, interaction(data$d1, data$d2), shift) if (shift == FALSE) shift <- 0 if (shift == FALSE) { if (abs(bcPars[1] - 0.1) < 1e-2) { PowerT <- function(x) log(x) InvPowerT <- function(y) exp(y) } else { PowerT <- function(x) (x^bcPars[1] - 1) / bcPars[1] InvPowerT <- function(y) (y*bcPars[1] + 1)^(1/bcPars[1]) } } else { if (abs(bcPars[1] - 0.1) < 1e-2) { PowerT <- function(x) log(x + bcPars[2]) InvPowerT <- function(y) exp(y) - bcPars[2] } else { PowerT <- function(x) ((x + bcPars[2])^bcPars[1] - 1) / bcPars[1] InvPowerT <- function(y) (y*bcPars[1] + 1)^(1/bcPars[1]) - bcPars[2] } } transforms <- list( "PowerT" = function(x, args) with(args, PowerT(x)), "InvPowerT" = function(x, args) with(args, InvPowerT(x)), "BiolT" = function(x, args) with(args, N0*exp(x*time.hours)), "InvBiolT" = function(y, args) with(args, log(y/N0) / time.hours), "compositeArgs" = args ) return(transforms) }
/scratch/gouwar.j/cran-all/cranData/BIGL/R/transformations.R
## ----init, message = FALSE---------------------------------------------------- library(BIGL) library(knitr) library(ggplot2) set.seed(12345) if (!requireNamespace("rmarkdown", quietly = TRUE) || !rmarkdown::pandoc_available("1.14")) { warning(call. = FALSE, "These vignettes assume rmarkdown and pandoc version 1.14. These were not found. Older versions will not work.") knitr::knit_exit() } ## ----settings----------------------------------------------------------------- nExp <- 4 # Dataset has 11 experiments, we consider only 4 cutoff <- 0.95 # Cutoff for p-values to use in plot.maxR() function ## ----data--------------------------------------------------------------------- data("directAntivirals", package = "BIGL") head(directAntivirals) ## ----------------------------------------------------------------------------- subsetData <- function(data, i) { ## Subset data to a single experiment and, optionally, select the necessary ## columns only subset(data, experiment == i)[, c("effect", "d1", "d2")] } ## ----subset, out.width="100%"------------------------------------------------- i <- 4 data <- subsetData(directAntivirals, i) ## ----transformations---------------------------------------------------------- ## Define forward and reverse transform functions transforms <- list( "BiolT" = function(y, args) with(args, N0*exp(y*time.hours)), "InvBiolT" = function(T, args) with(args, 1/time.hours*log(T/N0)), "PowerT" = function(y, args) with(args, log(y)), "InvPowerT" = function(T, args) with(args, exp(T)), "compositeArgs" = list(N0 = 1, time.hours = 72) ) ## ----autotransform, eval=FALSE------------------------------------------------ # transforms_auto <- getTransformations(data) # fitMarginals(data, transforms = transforms_auto) # # ## In the case of 1-parameter Box-Cox transformation, it is easy # ## to retrieve the power parameter by evaluating the function at 0. # ## If parameter is 0, then it is a log-transformation. # with(transforms_auto, -1 / PowerT(0, compositeArgs)) ## ----marginalFit-------------------------------------------------------------- ## Fitting marginal models marginalFit <- fitMarginals(data, transforms = transforms, method = "nls", names = c("Drug A", "Drug B")) summary(marginalFit) ## ----marginalPlot, fig.align="center", fig.height = 4, fig.width = 6---------- ## Plotting marginal models plot(marginalFit) + ggtitle(paste("Direct-acting antivirals - Experiment" , i)) ## ----marginalFitC, eval = FALSE----------------------------------------------- # ## Parameter ordering: h1, h2, b, m1, m2, e1, e2 # ## Constraint 1: m1 = m2. Constraint 2: b = 0.1 # constraints <- list("matrix" = rbind(c(0, 0, 0, -1, 1, 0, 0), # c(0, 0, 1, 0, 0, 0, 0)), # "vector" = c(0, 0.1)) # # ## Parameter estimates will now satisfy equality: # ## constraints$matrix %*% pars == constraints$vector # fitMarginals(data, transforms = transforms, # constraints = constraints) ## ----marginalFitFixed, eval = FALSE------------------------------------------- # ## Set baseline at 0.1 and maximal responses at 0. # fitMarginals(data, transforms = transforms, # fixed = c("m1" = 0, "m2" = 0, "b" = 0.1)) ## ----fallback, eval = FALSE--------------------------------------------------- # nlslmFit <- tryCatch({ # fitMarginals(data, transforms = transforms, # method = "nlslm") # }, warning = function(w) w, error = function(e) e) # # if (inherits(nlslmFit, c("warning", "error"))) # optimFit <- tryCatch({ # fitMarginals(data, transforms = transforms, # method = "optim") # }) ## ----eval=FALSE--------------------------------------------------------------- # customMarginalFit <- list("coef" = c("h1" = 1, "h2" = 2, "b" = 0, # "m1" = 1.2, "m2" = 1, "e1" = 0.5, "e2" = 0.5), # "sigma" = 0.1, # "df" = 123, # "model" = constructFormula(), # "shared_asymptote" = FALSE, # "method" = "nlslm", # "transforms" = transforms) # class(customMarginalFit) <- append(class(customMarginalFit), "MarginalFit") ## ----analysis, message=FALSE, comment = NA------------------------------------ rs <- fitSurface(data, marginalFit, null_model = "loewe", B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rs) ## ----image, warning=FALSE, comment = NA, fig.width = 6, fig.height = 4, fig.align = "center"---- isobologram(rs) ## ----plot3d, warning=FALSE, fig.align="center", fig.height=7, fig.width=7----- plot(rs, legend = FALSE, main = "") ## ----analysis_hsa, message=FALSE, comment = NA-------------------------------- rsh <- fitSurface(data, marginalFit, null_model = "hsa", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsh) ## ----analysis_bliss, message=FALSE, comment = NA------------------------------ rsb <- fitSurface(data, marginalFit, null_model = "bliss", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsb) ## ----analysis_loewe2, message=FALSE, comment = NA----------------------------- rsl2 <- fitSurface(data, marginalFit, null_model = "loewe2", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsl2) ## ----plot_2d_cross_section, message=FALSE, comment = NA, fig.width = 8, fig.height = 6---- nullModels <- c("loewe", "loewe2", "bliss", "hsa") rs_list <- Map(fitSurface, null_model = nullModels, MoreArgs = list( data = data, fitResult = marginalFit, B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ) synergy_plot_bycomp(rs_list, ylab = "Response", plotBy = "Drug A", color = TRUE) ## ----meanrnorm, message = FALSE----------------------------------------------- meanR_N <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = NULL, parallel = FALSE) ## ----meanrnonnorm, message = FALSE-------------------------------------------- meanR_B <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ## ----meanresults, echo=FALSE-------------------------------------------------- MeanR_both <- rbind("Normal errors" = c(meanR_N$meanR$FStat, meanR_N$meanR$p.value), "Bootstrapped errors" = c(meanR_B$meanR$FStat, meanR_B$meanR$p.value)) colnames(MeanR_both) <- c("F-statistic", "p-value") kable(MeanR_both) ## ----maxboth, message = FALSE------------------------------------------------- maxR_N <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = NULL, parallel = FALSE) maxR_B <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") maxR_both <- rbind(summary(maxR_N$maxR)$totals, summary(maxR_B$maxR)$totals) ## ----printmax, echo = FALSE--------------------------------------------------- rownames(maxR_both) <- c("Normal errors", "Bootstrapped errors") kable(maxR_both) ## ----maxoutside, results="asis"----------------------------------------------- outPts <- outsidePoints(maxR_B$maxR$Ymean) kable(outPts, caption = paste0("Non-additive points for Experiment ", i)) ## ----maxcontour, fig.align="center", fig.width=6, fig.height=5---------------- contour(maxR_B, colorPalette = c("blue", "white", "red"), main = paste0(" Experiment ", i, " contour plot for maxR"), scientific = TRUE, digits = 3, cutoff = cutoff ) ## ----plot3dmax, warning=FALSE, fig.height=7, fig.width=7---------------------- plot(maxR_B, color = "maxR", legend = FALSE, main = "") ## ----summarySingleConfInt----------------------------------------------------- summary(maxR_B$confInt) ## ----plotSingleConfInt, fig.height=5, fig.width=8----------------------------- plotConfInt(maxR_B, color = "effect-size") ## ----contour_effectsize, warning=FALSE, fig.align="center", fig.width=6, fig.height=5, message=FALSE, comment = NA---- contour( maxR_B, colorPalette = c("Syn" = "blue", "None" = "white", "Ant" = "red"), main = paste0(" Experiment ", i, " contour plot for effect size"), colorBy = "effect-size", scientific = TRUE, digits = 3, cutoff = cutoff ) ## ----plot3d_effectsize, warning=FALSE, fig.height=7, fig.width=7, message=FALSE, comment = NA---- plot(maxR_B, color = "effect-size", legend = FALSE, main = "", gradient = FALSE, colorPalette = c("Ant" = "red", "None" = "white", "Syn" = "blue"), colorPaletteNA = "white") ## ----heterogenanalysis, fig.width=6, fig.height=5----------------------------- marginalFit <- fitMarginals(data, transforms = NULL) summary(marginalFit) resU <- fitSurface(data, marginalFit, method = "unequal", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(resU) ## ----modelVariancePlot, fig.width=6, fig.height=5----------------------------- plotMeanVarFit(data) plotMeanVarFit(data, log = "xy") #Clearer on the log-scale plotMeanVarFit(data, trans = "log") #Thresholded at maximum observed variance ## ----modelVarianceSum, fig.width=6, fig.height=5------------------------------ resM <- fitSurface(data, marginalFit, method = "model", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ## ----modelVarianceSumLogTransform, fig.width=6, fig.height=5, eval = FALSE---- # resL <- fitSurface(data, marginalFit, method = "model", trans = "log", # statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, # wild_bootstrap = TRUE, wild_bootType = "normal", # control = "dFCR") ## ----resM--------------------------------------------------------------------- summary(resM) ## ----fullanalysis, message=FALSE---------------------------------------------- marginalFits <- list() datasets <- list() respSurfaces <- list() maxR.summary <- list() for (i in seq_len(nExp)) { ## Select experiment data <- subsetData(directAntivirals, i) ## Fit joint marginal model marginalFit <- fitMarginals(data, transforms = transforms, method = "nlslm") ## Predict response surface based on generalized Loewe model respSurface <- fitSurface(data, marginalFit, statistic = "maxR", B.CP = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR" ) datasets[[i]] <- data marginalFits[[i]] <- marginalFit respSurfaces[[i]] <- respSurface maxR.summary[[i]] <- summary(respSurface$maxR)$totals } ## ----maxrfull, echo=FALSE----------------------------------------------------- allMaxR <- do.call(rbind, maxR.summary) rownames(allMaxR) <- paste("Experiment", 1:nrow(allMaxR)) kable(allMaxR, row.names = TRUE) ## ----tabs, echo = FALSE, results = "asis"------------------------------------- i <- 4 genCaption <- function(k) paste("Non-additive points for Experiment", k) outPts <- outsidePoints(respSurfaces[[i]]$maxR$Ymean) print(kable(outPts, caption = genCaption(i))) ## ----fullcontour, echo=FALSE, fig.align = "center", fig.width = 6, fig.height = 5---- i <- 4 contour(respSurfaces[[i]], main = paste("Experiment", i), scientific = TRUE, digits = 3, cutoff = cutoff)
/scratch/gouwar.j/cran-all/cranData/BIGL/inst/doc/analysis.R
--- title: "Synergy analysis" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true fig_caption: yes vignette: > %\VignetteIndexEntry{Synergy analysis} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- We first load the necessary packages and set some pre-defined values needed to replicate the analysis. ```{r init, message = FALSE} library(BIGL) library(knitr) library(ggplot2) set.seed(12345) if (!requireNamespace("rmarkdown", quietly = TRUE) || !rmarkdown::pandoc_available("1.14")) { warning(call. = FALSE, "These vignettes assume rmarkdown and pandoc version 1.14. These were not found. Older versions will not work.") knitr::knit_exit() } ``` ```{r settings} nExp <- 4 # Dataset has 11 experiments, we consider only 4 cutoff <- 0.95 # Cutoff for p-values to use in plot.maxR() function ``` ## Process and clean the data The data for the analysis must come in a data-frame with required columns `d1`, `d2` and `effect` for doses of two compounds and observed cell counts respectively. The `effect` column may represent also a type of normalized data and subsequent transformation functions should be adjusted. We will use sample data included in the package - `directAntivirals`. ```{r data} data("directAntivirals", package = "BIGL") head(directAntivirals) ``` This data consists of 11 experiments that can be processed separately. For initial illustration purposes we choose just one experiment and retain only the columns of interest. We define a simple function to do just that. ```{r} subsetData <- function(data, i) { ## Subset data to a single experiment and, optionally, select the necessary ## columns only subset(data, experiment == i)[, c("effect", "d1", "d2")] } ``` Now let us only pick `Experiment 4` to illustrate the functionality of the package. ```{r subset, out.width="100%"} i <- 4 data <- subsetData(directAntivirals, i) ``` Dose-response data for `Experiment 4` will be used for a large share of the analysis presented here, therefore this subset is stored in a dataframe called `data`. Later, we will run the analysis for some other experiments as well. ## Data transformation If raw data is measured in cell counts, data transformation might be of interest to improve accuracy and interpretation of the model. Of course, this will depend on the model specification. For example, if a generalized Loewe model is assumed on the growth rate of the cell count, the appropriate conversion should be made from the observed cell counts. The formula used would be $$y = N_0\exp\left(kt\right)$$ where $k$ is a growth rate, $t$ is time (fixed) and $y$ is the observed cell count. If such a transformation is specified, it is referred to as the biological transformation. In certain cases, variance-stabilizing transformations (Box-Cox) can also be useful. We refer to these transformations as power transformations. In many cases, a simple logarithmic transformation can be sufficient but, if desired, a helper function `optim.boxcox` is available to automate the selection of Box-Cox transformation parameters. In addition to specifying biological and power transformations, users are also asked to specify their inverses. These are later used in the bootstrapping procedure and plotting methods. As an example, we might define a `transforms` list that will be passed to the fitting functions. It contains both biological growth rate and power transformations along with their inverses. ```{r transformations} ## Define forward and reverse transform functions transforms <- list( "BiolT" = function(y, args) with(args, N0*exp(y*time.hours)), "InvBiolT" = function(T, args) with(args, 1/time.hours*log(T/N0)), "PowerT" = function(y, args) with(args, log(y)), "InvPowerT" = function(T, args) with(args, exp(T)), "compositeArgs" = list(N0 = 1, time.hours = 72) ) ``` `compositeArgs` contains the initial cell counts (`N0`) and incubation time (`time.hours`). In certain cases, the `getTransformations` wrapper function can be employed to automatically obtain a prepared list with biological growth rate and power transformations based on results from `optim.boxcox`. Its output will also contain the inverses of these transforms. ```{r autotransform, eval=FALSE} transforms_auto <- getTransformations(data) fitMarginals(data, transforms = transforms_auto) ## In the case of 1-parameter Box-Cox transformation, it is easy ## to retrieve the power parameter by evaluating the function at 0. ## If parameter is 0, then it is a log-transformation. with(transforms_auto, -1 / PowerT(0, compositeArgs)) ``` # Analysis Once dose-response dataframe is correctly set up, we may proceed onto synergy analysis. We will use `transforms` as defined above with a logarithmic transformation. If not desired, `transforms` can be set to `NULL` and would be ignored. Synergy analysis is quite modular and is divided into 3 parts: 1. Determine marginal curves for each of the compounds. These curves are computed based on monotherapy data, i.e. those observations where one of the compounds is dosed at 0. 2. Compute expected effects for a chosen null model given the previously determined marginal curves at various dose combinations. 3. Compare the expected response with the observed effect using statistical testing procedures. ## Fitting marginal (on-axis) data The first step of the fitting procedure will consist in treating marginal data only, i.e. those observations within the experiment where one of the compounds is dosed at zero. For each compound the corresponding marginal doses are modelled using a 4-parameter logistic model. The marginal models will be estimated together using non-linear least squares estimation procedure. Estimation of both marginal models needs to be simultaneous since it is assumed they share a common baseline that also needs to be estimated. The `fitMarginals` function and other marginal estimation routines will automatically extract marginal data from the dose-response data frame. Before proceeding onto the estimation, we get a rough guess of the parameters to use as starting values in optimization and then we fit the model. `marginalFit`, returned by the `fitMarginals` routine, is an object of class `MarginalFit` which is essentially a list containing the main information about the marginal models, in particular the estimated coefficients. The optional `names` argument allows to specify the names of the compounds to be shown on the plots and in the summary. If not defined, the defaults ("Compound 1" and "Compound 2") are used. ```{r marginalFit} ## Fitting marginal models marginalFit <- fitMarginals(data, transforms = transforms, method = "nls", names = c("Drug A", "Drug B")) summary(marginalFit) ``` `marginalFit` object retains the data that was supplied and the transformation functions used in the fitting procedure. It also has a `plot` method which allows for a quick visualization of the fitting results. ```{r marginalPlot, fig.align="center", fig.height = 4, fig.width = 6} ## Plotting marginal models plot(marginalFit) + ggtitle(paste("Direct-acting antivirals - Experiment" , i)) ``` Note as well that the `fitMarginals` function allows specifying linear constraints on parameters. This provides an easy way for the user to impose asymptote equality, specific baseline value and other linear constraints that might be useful. See `help(constructFormula)` for more details. ```{r marginalFitC, eval = FALSE} ## Parameter ordering: h1, h2, b, m1, m2, e1, e2 ## Constraint 1: m1 = m2. Constraint 2: b = 0.1 constraints <- list("matrix" = rbind(c(0, 0, 0, -1, 1, 0, 0), c(0, 0, 1, 0, 0, 0, 0)), "vector" = c(0, 0.1)) ## Parameter estimates will now satisfy equality: ## constraints$matrix %*% pars == constraints$vector fitMarginals(data, transforms = transforms, constraints = constraints) ``` The `fitMarginals` function allows an alternative user-friendly way to specify one or more fixed-value constraints using a named vector passed to the function via `fixed` argument. ```{r marginalFitFixed, eval = FALSE} ## Set baseline at 0.1 and maximal responses at 0. fitMarginals(data, transforms = transforms, fixed = c("m1" = 0, "m2" = 0, "b" = 0.1)) ``` By default, no constraints are set, thus asymptotes are not shared and so a generalized Loewe model will be estimated. ### Optimization algorithms We advise the user to employ the `method = "nlslm"` argument which is set as the default in monotherapy curve estimation. It is based on `minpack.lm::nlsLM` function with an underlying Levenberg-Marquardt algorithm for non-linear least squares estimation. This algorithm is known to be more robust than `method = "nls"` and its Gauss-Newton algorithm. In cases with nice sigmoid-shaped data, both methods should however lead to similar results. `method = "optim"` is a simple sum-of-squared-residuals minimization driven by a default Nelder-Mead algorithm from `optim` minimizer. It is typically slower than non-linear least squares based estimation and can lead to a significant increase in computational time for larger datasets and bootstrapped statistics. In nice cases, Nelder-Mead algorithm and non-linear least squares can lead to rather similar estimates but this is not always the case as these algorithms are based on different techniques. In general, we advise that in automated batch processing whenever `method = "nlslm"` does not converge fast enough and/or emits a warning, user should implement a fallback to `method = "optim"` and re-do the estimation. If none of these suggestions work, it might be useful to fiddle around and slightly perturb starting values for the algorithms as well. By default, these are obtained from the `initialMarginal` function. ```{r fallback, eval = FALSE} nlslmFit <- tryCatch({ fitMarginals(data, transforms = transforms, method = "nlslm") }, warning = function(w) w, error = function(e) e) if (inherits(nlslmFit, c("warning", "error"))) optimFit <- tryCatch({ fitMarginals(data, transforms = transforms, method = "optim") }) ``` Note as well that additional arguments to `fitMarginals` passed via `...` ellipsis argument will be passed on to the respective solver function, i.e. `minpack.lm::nlsLM`, `nls` or `optim`. ### Custom marginal fit While `BIGL` package provides several routines to fit 4-parameter log-logistic dose-response models, some users may prefer to use their own optimizers to estimate the relevant parameters. It is rather easy to integrate this into the workflow by constructing a custom `MarginalFit` object. It is in practice a simple list with * `coef`: named vector with coefficient estimates * `sigma`: standard deviation of residuals * `df`: degrees of freedom from monotherapy curve estimates * `model`: model of the marginal estimation which allows imposing linear constraints on parameters. If no constraints are necessary, it can be left out or assigned the output of `constructFormula` function with no inputs. * `shared_asymptote`: whether estimation is constrained to share the asymptote. During the estimation, this is deduced from `model` object. * `method`: method used in dose-response curve estimation which will be re-used in bootstrapping * `transforms`: power and biological transformation functions (and their inverses) used in monotherapy curve estimation. This should be a list in a format described above. If `transforms` is unspecified or `NULL`, no transformations will be used in statistical bootstrapping unless the user asks for it explicitly via one of the arguments to `fitSurface`. Other elements in the `MarginalFit` are currently unused for evaluating synergy and can be disregarded. These elements, however, might be necessary to ensure proper working of available methods for the `MarginalFit` object. As an example, the following code generates a custom `MarginalFit` object that can be passed further to estimate a response surface under the null hypothesis. ```{r eval=FALSE} customMarginalFit <- list("coef" = c("h1" = 1, "h2" = 2, "b" = 0, "m1" = 1.2, "m2" = 1, "e1" = 0.5, "e2" = 0.5), "sigma" = 0.1, "df" = 123, "model" = constructFormula(), "shared_asymptote" = FALSE, "method" = "nlslm", "transforms" = transforms) class(customMarginalFit) <- append(class(customMarginalFit), "MarginalFit") ``` Note that during bootstrapping this would use `minpack.lm::nlsLM` function to re-estimate parameters from data following the null. A custom optimizer for bootstrapping is currently not implemented. ## Compute expected response for off-axis data Five types of null models are available for calculating expected response surfaces. * Generalized Loewe model is used if maximal responses are not constrained to be equal, i.e. `shared_asymptote = FALSE`, in the marginal fitting procedure and `null_model = "loewe"` in response calculation. * Classical Loewe model is used if constraints are such that `shared_asymptote = TRUE` in the marginal fitting procedure and `null_model = "loewe"` in response calculation. * Highest Single Agent is used if `null_model = "hsa"` irrespective of the value of `shared_asymptote`. * Bliss independence model is used when `null_model = "bliss"`. In the situations when maximal responses are constrained to be equal, the classical Bliss independence approach is used, when they are not equal, the Bliss independence calculation is performed on responses rescaled to the maximum range (i.e. absolute difference between baseline and maximal response). * Alternative Loewe Generalization is used when `null_model = "loewe2"`. If the asymptotes are constrained to be equal, this reduces to the classical Loewe. Note that if `shared_asymptote = TRUE` constraints are used, this also reduces to classical Loewe model. Three methods are available to control for errors * Family wise error rate is used if `control = "FWER"` * False coverage rate is used if `control = "FCR"` * Directional false coverage rate is used if `control = "dFCR"` ### (Generalized) Loewe model If transformation functions were estimated using `fitMarginals`, these will be automatically recycled from the `marginalFit` object when doing calculations for the response surface fit. Alternatively, transformation functions can be passed by a separate argument. Since the `marginalFit` object was estimated without the shared asymptote constraint, the following will compute the response surface based on the generalized Loewe model. ```{r analysis, message=FALSE, comment = NA} rs <- fitSurface(data, marginalFit, null_model = "loewe", B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rs) ``` The occupancy matrix used in the expected response calculation for the Loewe models can be accessed with `rs$occupancy`. For off-axis data and a fixed dose combination, the Z-score for that dose combination is defined to be the standardized difference between the observed effect and the effect predicted by a generalized Loewe model. If the observed effect differs significantly from the prediction, it might be due to the presence of synergy or antagonism. If multiple observations refer to the same combination of doses, then a mean is taken over these multiple standardized differences. The following plot illustrates the isobologram of the chosen null model. Coloring and contour lines within the plot should help the user distinguish areas and dose combinations that generate similar response according to the null model. Note that the isobologram is plotted by default on a logarithmically scaled grid of doses. ```{r image, warning=FALSE, comment = NA, fig.width = 6, fig.height = 4, fig.align = "center"} isobologram(rs) ``` The plot below illustrates the above considerations in a 3-dimensional setting. In this plot, points refer to the observed effects whereas the surface is the model-predicted response. The surface is colored according to the median Z-scores where blue coloring indicates possible synergistic effects (red coloring would indicate possible antagonism). ```{r plot3d, warning=FALSE, fig.align="center", fig.height=7, fig.width=7} plot(rs, legend = FALSE, main = "") ``` ### Highest Single Agent For the Highest Single Agent null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_hsa, message=FALSE, comment = NA} rsh <- fitSurface(data, marginalFit, null_model = "hsa", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsh) ``` <!-- Occupancy estimates provided with HSA response surface still rely on the (generalized) Loewe model. --> ### Bliss Independence Also for the Bliss independence null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_bliss, message=FALSE, comment = NA} rsb <- fitSurface(data, marginalFit, null_model = "bliss", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsb) ``` <!-- Occupancy estimates provided with Bliss response surface still rely on the (generalized) Loewe model. --> ### Alternative Loewe Generalization Also for the Alternative Loewe Generalization null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_loewe2, message=FALSE, comment = NA} rsl2 <- fitSurface(data, marginalFit, null_model = "loewe2", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsl2) ``` <!-- Occupancy estimates provided still rely on the (generalized) Loewe model. --> ## Plot 2D cross section of response surface Α 2-dimensional predicted response surface plot can be generated for a group of null models. In the plot, the points refer to the observed effects whereas the colored lines are the model-predicted responses for the different null models. The panels correspond to concentration levels of one compound and the x-axis shows the concentration levels of the second compound. The user has the option to define which compound will be shown in the panels and in the x-axis. ```{r plot_2d_cross_section, message=FALSE, comment = NA, fig.width = 8, fig.height = 6} nullModels <- c("loewe", "loewe2", "bliss", "hsa") rs_list <- Map(fitSurface, null_model = nullModels, MoreArgs = list( data = data, fitResult = marginalFit, B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ) synergy_plot_bycomp(rs_list, ylab = "Response", plotBy = "Drug A", color = TRUE) ``` ## Statistical testing Presence of synergistic or antagonistic effects can be formalized by means of statistical tests. Two types of tests are considered here and are discussed in more details in the [methodology vignette](methodology.html) as well as the [accompanying paper](https://dx.doi.org/10.1038/s41598-017-18068-5). * `meanR` test evaluates how the predicted response surface based on a specified null model differs from the observed one. If the null hypothesis is rejected, this test suggests that at least some dose combinations may exhibit synergistic or antagonistic behaviour. The `meanR` test is not designed to pinpoint which combinations produce these effects nor what type of deviating effect is present. * `maxR` test allows to evaluate presence of synergistic/antagonistic effects for each dose combination and as such provides a point-by-point classification. Both of the above test statistics have a well specified null distribution under a set of assumptions, namely normality of Z-scores. If this assumption is not satisfied, distribution of these statistics can be estimated using bootstrap. Normal approximation is significantly faster whereas bootstrapped distribution of critical values is likely to be more accurate in many practical cases. ### meanR Here we will use the previously computed `CP` covariance matrix to speed up the process. * normal errors ```{r meanrnorm, message = FALSE} meanR_N <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = NULL, parallel = FALSE) ``` * non-normal errors The previous piece of code assumes normal errors. If we drop this assumption, we can use bootstrap methods to resample from the observed errors. Other parameters for bootstrapping, such as additional distribution for errors, wild bootstrapping to account for heteroskedasticity, are also available. See `help(fitSurface)`. ```{r meanrnonnorm, message = FALSE} meanR_B <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` Both tests use the same calculated F-statistic but compare it to different null distributions. In this particular case, both tests lead to identical results. ```{r meanresults, echo=FALSE} MeanR_both <- rbind("Normal errors" = c(meanR_N$meanR$FStat, meanR_N$meanR$p.value), "Bootstrapped errors" = c(meanR_B$meanR$FStat, meanR_B$meanR$p.value)) colnames(MeanR_both) <- c("F-statistic", "p-value") kable(MeanR_both) ``` ### maxR The `meanR` statistic can be complemented by the `maxR` statistic for each of available dose combinations. We will do this once again by assuming both normal and non-normal errors similar to the computation of the `meanR` statistic. ```{r maxboth, message = FALSE} maxR_N <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = NULL, parallel = FALSE) maxR_B <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") maxR_both <- rbind(summary(maxR_N$maxR)$totals, summary(maxR_B$maxR)$totals) ``` Here is the summary of `maxR` statistics. It lists the total number of dose combinations listed as synergistic or antagonistic for Experiment `r i` given the above calculations. ```{r printmax, echo = FALSE} rownames(maxR_both) <- c("Normal errors", "Bootstrapped errors") kable(maxR_both) ``` By using the `outsidePoints` function, we can obtain a quick summary indicating which dose combinations in Experiment `r i` appear to deviate significantly from the null model according to the `maxR` statistic. ```{r maxoutside, results="asis"} outPts <- outsidePoints(maxR_B$maxR$Ymean) kable(outPts, caption = paste0("Non-additive points for Experiment ", i)) ``` Synergistic effects of drug combinations can be depicted in a bi-dimensional contour plot where the `x-axis` and `y-axis` represent doses of `Compound 1` and `Compound 2` respectively and each point is colored based on the *p*-value and sign of the respective `maxR` statistic. ```{r maxcontour, fig.align="center", fig.width=6, fig.height=5} contour(maxR_B, colorPalette = c("blue", "white", "red"), main = paste0(" Experiment ", i, " contour plot for maxR"), scientific = TRUE, digits = 3, cutoff = cutoff ) ``` Previously, we had colored the 3-dimensional predicted response surface plot based on its Z-score, i.e. deviation of the predicted versus the observed effect. We can also easily color it based on the computed `maxR` statistic to account for additional statistical variation. ```{r plot3dmax, warning=FALSE, fig.height=7, fig.width=7} plot(maxR_B, color = "maxR", legend = FALSE, main = "") ``` ### Effect sizes and confidence interval The BIGL package also yields effect sizes and corresponding confidence intervals with respect to any response surface. The overall effect size and confidence interval is output in the summary of the `ResponseSurface`, but can also be called directly: ```{r summarySingleConfInt} summary(maxR_B$confInt) ``` In addition, a contour plot can be made with pointwise confidence intervals. Contour plot colouring can be defined according to the effect sizes or according to maxR results. ```{r plotSingleConfInt, fig.height=5, fig.width=8} plotConfInt(maxR_B, color = "effect-size") ``` You can also customize the coloring of the contour plot and 3-dimensional predicted response surface plot based on effect sizes: ```{r contour_effectsize, warning=FALSE, fig.align="center", fig.width=6, fig.height=5, message=FALSE, comment = NA} contour( maxR_B, colorPalette = c("Syn" = "blue", "None" = "white", "Ant" = "red"), main = paste0(" Experiment ", i, " contour plot for effect size"), colorBy = "effect-size", scientific = TRUE, digits = 3, cutoff = cutoff ) ``` ```{r plot3d_effectsize, warning=FALSE, fig.height=7, fig.width=7, message=FALSE, comment = NA} plot(maxR_B, color = "effect-size", legend = FALSE, main = "", gradient = FALSE, colorPalette = c("Ant" = "red", "None" = "white", "Syn" = "blue"), colorPaletteNA = "white") ``` # Analysis in case of variance heterogeneity Starting from the package version `1.2.0` the variance can be estimated separately for on-axis (monotherapy) and off-axis points using `method` argument to `fitSurface`. The possible values for `method` are: * `"equal"`, equal variances assumed (as above, default), * `"unequal"`, variance is estimated separately for on-axis and off-axis points, * `"model"`, the variance is modelled as a function of the mean. Please see the [methodology vignette](methodology.html) for details. Below we show an example analysis in such case. Note that transformations are not possible if variances are not assumed equal. ```{r heterogenanalysis, fig.width=6, fig.height=5} marginalFit <- fitMarginals(data, transforms = NULL) summary(marginalFit) resU <- fitSurface(data, marginalFit, method = "unequal", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(resU) ``` For the variance model, an exploratory plotting function is available to explore the relationship between the mean and the variance. ```{r modelVariancePlot, fig.width=6, fig.height=5} plotMeanVarFit(data) plotMeanVarFit(data, log = "xy") #Clearer on the log-scale plotMeanVarFit(data, trans = "log") #Thresholded at maximum observed variance ``` The linear fit seems fine in this case. ```{r modelVarianceSum, fig.width=6, fig.height=5} resM <- fitSurface(data, marginalFit, method = "model", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` If the log transformation yielded a better fit, then this could be achieved by using the following option. ```{r modelVarianceSumLogTransform, fig.width=6, fig.height=5, eval = FALSE} resL <- fitSurface(data, marginalFit, method = "model", trans = "log", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` Negative variances were modelled, but variance model has the smallest observed variances as minimum so we can proceed.` ```{r resM} summary(resM) ``` # Analysis of multiple experiments In order to proceed with multiple experiments, we repeat the same procedure as previously. We collect all the necessary objects for which estimations do not have to be repeated to generate `meanR` and `maxR` statistics in a simple list. ```{r fullanalysis, message=FALSE} marginalFits <- list() datasets <- list() respSurfaces <- list() maxR.summary <- list() for (i in seq_len(nExp)) { ## Select experiment data <- subsetData(directAntivirals, i) ## Fit joint marginal model marginalFit <- fitMarginals(data, transforms = transforms, method = "nlslm") ## Predict response surface based on generalized Loewe model respSurface <- fitSurface(data, marginalFit, statistic = "maxR", B.CP = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR" ) datasets[[i]] <- data marginalFits[[i]] <- marginalFit respSurfaces[[i]] <- respSurface maxR.summary[[i]] <- summary(respSurface$maxR)$totals } ``` We use the `maxR` procedure with a chosen p-value cutoff of `r cutoff`. If `maxR` statistic falls outside the `r cutoff*100`th percentile of its distribution (either bootstrapped or not), the respective off-axis dose combination is said to deviate significantly from the generalized Loewe model and the algorithm determines whether it deviates in a synergistic or antagonistic way. Below is the summary of overall calls and number of deviating points for each experiment. ```{r maxrfull, echo=FALSE} allMaxR <- do.call(rbind, maxR.summary) rownames(allMaxR) <- paste("Experiment", 1:nrow(allMaxR)) kable(allMaxR, row.names = TRUE) ``` Previous summarizing and visual analysis can be repeated on each of the newly defined experiments. For example, `Experiment 4` indicates a total of 16 combinations that were called synergistic according to the `maxR` test. ```{r tabs, echo = FALSE, results = "asis"} i <- 4 genCaption <- function(k) paste("Non-additive points for Experiment", k) outPts <- outsidePoints(respSurfaces[[i]]$maxR$Ymean) print(kable(outPts, caption = genCaption(i))) ``` Consequently, above table for `Experiment 4` can be illustrated in a contour plot. ```{r fullcontour, echo=FALSE, fig.align = "center", fig.width = 6, fig.height = 5} i <- 4 contour(respSurfaces[[i]], main = paste("Experiment", i), scientific = TRUE, digits = 3, cutoff = cutoff) ```
/scratch/gouwar.j/cran-all/cranData/BIGL/inst/doc/analysis.Rmd
--- title: "Methodology" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Methodology} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- \newcommand{\MSE} {\text{MSE}} \newcommand{\prob}[1] {\text{P}\left\{#1\right\}} \newcommand{\diag}[1] {\text{diag} \left( #1 \right)} \newcommand{\mb}[1] {\boldsymbol{#1}} \newcommand{\cov}[1] {\mbox{Cov}\left\{#1\right\}} \newcommand{\covf}[2] {\mbox{Cov}_{#1}\left\{#2\right\}} \newcommand{\var}[1] {\mbox{Var}\left\{#1\right\}} \newcommand{\varf}[2] {\mbox{Var}_{#1}\left\{#2\right\}} Methodology described in this vignette is adapted from the article *"BIGL: Biochemically Intuitive Generalized Loewe null model for prediction of the expected combined effect compatible with partial agonism and antagonism"* (2017) by K. Van der Borght, A. Tourny, R. Bagdziunas, O. Thas, M. Nazarov, H. Turner, B. Verbist and H. Ceulemans ([doi:10.1038/s41598-017-18068-5](https://dx.doi.org/10.1038/s41598-017-18068-5)) as well as its technical supplement. We advise the reader to consult it for a deeper understanding of the procedure described next. Further chapters were added as extensions on top of the original article regarding variance heterogeneity, Bliss independence and alternative Loewe generalization. ## Marginal monotherapy curves First, a monotherapy model is described by the following equation. $$ y\left(d\right) = b + \dfrac{m - b}{1 + \left(\frac{\operatorname{EC50}}{d}\right)^{|h|}} $$ where $y$ is the response (or effect), $d$ is the dose (or concentration) of the compound, $h$ is the Hill's coefficient and $b$ and $m$ are respectively baseline and maximum response for that compound. Lastly, $\textrm{EC50}$ stands for the dose level of the compound needed to attain the midpoint effect, i.e. $$y\left(\textrm{EC50}\right) = b + \frac{m - b}{2}$$ Note that $m > b$ if and only if the response is increasing with the dose of the compound. If the response is decreasing, then $m < b$. This monotherapy equation is estimated for both compounds with the constraint that $b$, the baseline level, is shared across compounds. This baseline level is denoted by `b` in the parameter vector. Additionally, `m1` and `m2` in the parameter vector stand for estimates of maximal responses $m_{1}$ and $m_{2}$, respectively, whereas `h1` and `h2` are Hill's coefficients (slope) of the monotherapy curve for each compound. Lastly, `e1` and `e2` are log-transformed inflection points, i.e. `e1` $= \log\left(\textrm{EC50}_{1}\right)$ and `e2` $= \log\left(\textrm{EC50}_{2}\right)$. ## Null models of no synergy ### Occupancy Define the occupancy level $\textrm{occup}$, i.e. the fractional (enzymatic) effect or observed effect relative to maximal effect, for both compounds at given dose levels as $$ \textrm{occup}_{1}\left(d_{1}\right) = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{1}}{d_{1}}\right)^{h_{1}}} $$ $$ \textrm{occup}_{2}\left(d_{2}\right) = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{2}}{d_{2}}\right)^{h_{2}}} $$ Alternatively, the above equations can be rearranged to express dose in terms of occupancy so that $$ d_{1} = \operatorname{EC50}_{1} \left(\frac{1}{\operatorname{occup_{1}}} - 1 \right)^{-1/h_{1}} $$ $$ d_{2} = \operatorname{EC50}_{2} \left(\frac{1}{\operatorname{occup_{2}}} - 1 \right)^{-1/h_{2}} $$ Although the occupancy was considered here in the marginal case, it is equally well-defined when compounds are combined and is understood as the fraction of enzyme bound to any compound. It can thus be used to re-express classical Loewe additivity equations. ### Classical Loewe model In the classical Loewe model where both marginal models share upper $(m)$ and lower $(b)$ asymptotes, occupancy is defined as the solution to this additivity equation for each dose combination $(d_{1}, d_{2})$, namely $$\frac{d_1\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}}+ \frac{d_2\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}} = 1$$ Once occupancy is computed, in the classical Loewe model the predicted response at dose combination $(d_{1}, d_{2})$ can be calculated to be \begin{equation} \begin{split} y & = b + \left(m - b\right) \times \textrm{occup} = \\ & = b + \left(m - b\right) \times \textrm{occup} \times \left[\frac{d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right] = \\ & = b + \textrm{occup} \times \left[\frac{ \left(m - b\right) d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{ \left(m - b\right) d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right] \end{split} \end{equation} ### Generalized Loewe model Generalized Loewe model extends the classical Loewe model by allowing compounds to have different upper asymptotes so that when adjusted, the above predicted response is written instead as $$ y = b + \textrm{occup} \times \left[\frac{\left(m_{1} - b\right) d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{\left(m_{2} - b\right) d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right]$$ In particular, if $m_{1} = m_{2}$, then generalized Loewe is equivalent to the classical Loewe. ### Highest Single Agent A null model based on the Highest Single Agent (HSA) model does not attempt to model interaction effects at all and the predicted effect of a combination is either the minimum (if marginal curves are decreasing) or the maximum (if marginal curves are increasing) of both monotherapy curves. ### Bliss independence model Bliss independence implies that two agents do not cooperate, i.e. act independently of each other. <!-- In the classical Bliss independence model where marginal models share baseline and maximum response, the predicted response at dose combination $(d_{1}, d_{2})$ is based on the fractional effects of the marginal responses: --> Additionally, the assumption is that a decreasing monotherapy curves express the fractions of _unaffected_ control populations, while increasing curves express the fractions of _affected_ control populations. Bliss independence model is formulated for the fractional responses $f$ ("fraction affected"), where the predicted response $f_{12}$ at dose combination $(d_{1}, d_{2})$ is defined as: $$ f_{12}(d_1, d_2) = f_1(d_1) + f_2(d_2) - f_1(d_1)f_2(d_2), $$ with $$f_1(d_1) = \frac{y\left(d_1\right) - b}{m_1 - b} = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{1}}{d_{1}}\right)^{|h_{1}|}}$$ $$f_2(d_2) = \frac{y\left(d_2\right) - b}{m_2 - b} = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{2}}{d_{2}}\right)^{|h_{2}|}}$$ In the classical Bliss independence model, marginal models share baseline and maximum response. To allow the compounds to have different maximal responses, the fractional responses are rescaled to the maximum range (i.e. absolute difference between baseline and maximal response). Then the predicted response is defined as: $$ y = b + (m_{max}-b) \left[ \tilde{f_1}(d_1) + \tilde{f_2}(d_2) - \tilde{f_1}(d_1)\tilde{f_2}(d_2) \right], $$ where $m_{max}$ is one of $m_1$ or $m_2$, for which the value of $|m_i - b|$ is larger, and $$ \tilde{f_i} = f_i\frac{m_i-b}{m_{max}-b}~~\text{for}~~i = 1, 2.$$ This implementation of Bliss independence supports both compounds with decreasing and increasing monotherapy profiles. However using one compound with a decreasing profile and another with an increasing profile in combination is not supported. ### Alternative Loewe Generalization An alternative generalization of Loewe Additivity for the case of different asymptotes can be defined as a combination of Loewe and HSA approaches as follows: In a classical Loewe equation, predicted response $y$ at a given dose combination $(d_1, d_2)$ can be found by solving the equation: $$ \frac{d_1}{D_1(y)} + \frac{d_2}{D_2(y)} = 1, $$ where $D_i(y) = \operatorname{EC50}_{i}\left(\frac{y-b}{m_i-y}\right)^{\frac{1}{|h_i|}}$, for $i = 1, 2$ is the dose of the $i$-th compound that gives response $y$. Note that here $D_i$ is properly defined only if $y$ is between $b$ and $m_i$. For the case of different asymptotes, say when $y > m_1$ (increasing curve) or $y < m_1$ (decreasing curve), we set $D_1(y) = +\infty,$ so that the $y$ is determined from the equation $d_2 = D_2(y)$, replicating what is done in the HSA approach. ### Calculation procedure In order to evaluate any of the null models described above, the `fitSurface` function will use the monotherapy parameter estimates from the previous step. The idea is if there are synergistic or antagonistic effects, then administration of both compounds will lead to important deviations from what combined monotherapy data would suggest according to the null model. Routines within `fitSurface` function do essentially the following. 1. Find occupancy for each combination of doses by solving the additivity equation of the classical Loewe model. This step does not require knowledge of the baseline or maximal response for either of the compounds. Occupancy solution is also reported in the HSA model case although occupancy plays no role in such a model. 2. Compute the predicted response based on the above described response equations and the previously computed occupancy rate for each dose combination. 3. If desired, the function will then calculate the selected statistic to evaluate the deviation of the predictions from the desired null model. ## Synergy evaluation assuming equal variances for on- and off-axis points Synergy is evaluated for all off-axis dose combinations, i.e. dose combinations that are not used in the monotherapy curve estimation. Synergy evaluation depends on the underlying null model and any of the above models, i.e. generalized or classical Loewe or Highest Single Agent, can be used for this purpose. We provide here a brief summary of both statistical tests. Technical derivations and further details are available in the article cited at the beginning of the document. To define test statistics, the following notations are used. * Let $y_{ij}$ be the observed effect for replicate $j$ of dose combination $i$, so that $y_{11}, y_{12}, y_{13}, y_{21}, ..., y_{kn_{k}}$ is a set of observed effects. We assume $k$ different dose combinations and $n_{k}$ replicates for each combination. The number of different off-axis dose combinations is denoted as $n_{1}$. * $p_{1}, ..., p_{k}$ are the predicted responses for the $k$ off-axis dose combinations. * $\sigma^{2}$ is the variance of the replicate observations, assumed to be constant over all dose combinations, and estimated by taking MSE of the null model. * $\operatorname{df}_{0}$ is the number of degrees of freedom from the marginal model estimation. We construct a vector $R = (r_{1}, ..., r_{k})$ which represents mean deviation from the predicted effect. In particular, $$ r_{k} = \frac{1}{n_{k}} \sum_{i = 1}^{n_{k}} y_{ki} - p_{k} $$ With the help of bootstrapping, the covariance matrix of $R$ can be estimated under the null hypothesis of no synergy so that $\operatorname{Var}\left(R\right) = \sigma^{2}\left(D + C_{p}\right)$ where $D$ is a diagonal matrix with $1 / n_{i}$ in the $i$-th position and $C_{p}$ is the covariance matrix obtained from bootstrap. ### `meanR` The `meanR` test will evaluate whether the null model globally fits well the observed data. It is derived using a lack-of-fit sum of squares technique. In particular, the test statistic is given by $$ \operatorname{TestStat} = \frac{R^{T}\left(D + C_{p}\right)^{-1}R}{n_{1}\sigma^{2}} $$ Assuming that residuals from the generalized Loewe model are normally distributed, it can be shown that this statistic follows an $F_{n_{1}, \operatorname{df}_{0}}$ distribution under the null. If these assumptions are not satisfied, the null distribution can be approximated by bootstrapping. ### `maxR` The `maxR` test evaluates whether the null model locally fits the observed data. In particular, it provides a test score for each off-axis combination. Based on the sign of this score, it can be determined whether synergy or antagonism is more likely and a formal test can be constructed. Under the null hypothesis of no lack-of-fit and normally distributed effects, $$ \max \left| R^{T}\left(D + C_{p}\right)^{-1/2} \right| / \sigma \sim \max \left| Z_{1}, \dots, Z_{k} \right| $$ where $Z_{j} \sim N\left(0,1\right)$. More particularly, the test statistic for the $k$-th off-axis dose combination $(d_{1}, d_{2})$ is computed as $$ \operatorname{TestStat}\left(d_{1}, d_{2}\right) = \left[\left| R^{T}\left(D + C_{p}\right)^{-1/2} \right| / \sigma\right]_{k} $$ where $\left[\cdot\right]_{k}$ indicates the $k$-th coordinate. This test statistic is then compared either to the null distribution based on normal approximation or a bootstrapped approximation. ## Synergy evaluation in case of variance heterogeneity In the methodology described above one important assumption is made regarding the variance of the on- and off-axis dose combinations. It is considered to be equal across all points. This assumption is also mentioned in the original article and its technical supplement. In reality it is often seen that the variance of the monotherapies is not equal to the variance of the off-axis combinations. The assumption of equal variances is thus not always valid. That is why the `meanR` and `maxR` test-statistics can also estimate the variances for on-axis (monotherapies) and off-axis dose-combinations separately. Two extra methods are described below: the `unequal` method (Separated variance) and the `model` method (Modeled variance). For both methods replicates are required and no variance-stabilizing transformations are required. The latter is often necessary when assuming equal variances. ### Adapted `meanR` The adapted `meanR` test uses two separate variance estimates for (a) the monotherapies (= $\sigma^{2}_{0}$) and (b) the dose combinations (= $\Sigma_{1}$, a diagonal matrix). The notation for both `unequal` as `model` will be the same, but the estimation of $\Sigma_{1}$ will be different. The variance of the monotherapies $\sigma^{2}_{0}$ is estimated as $\sigma^{2}$ above by taking the MSE of the null model. The test statistic is given by: $$ \operatorname{TestStat} = \frac{R^{T}\left(\Sigma_{1}D + \sigma^{2}_{0}C_{p}\right)^{-1}R}{n_{1}} $$ 1. **`unequal` method**: The variance for the dose combinations is estimated by taking the variance in each dose combination and then taking the mean of all these variances, thus $\Sigma_{1} = \sigma^2_1 I_{n_1}$. The downside of this method is that the variance for all combinations is assumed to be equal. In reality the variance often depends on the mean effect. This is taken into account in the `model` method. 2. **`model` method**: In this method the diagonal elements of $\Sigma_{1}$ are no longer estimated as single number but rather as a vector of variances. Each off-axis point has now its own variance. A linear model is fitted on the original dataset, modeling the variance of each off-axis point as a function of its mean effect. The estimated model parameters are then used to predict the variance for the corresponding mean effect measured for that dose combination. These predicted variances are placed in the diagonal of $\Sigma_{1}$. Modelling the variance with a linear model may require a transformation, to achieve a better fit and to avoid negative variances being modelled. A log-transformation often makes a good impression. ### Adapted `maxR` The same approach is taken for the adapted `maxR` test statistic. Instead of using one estimated variance for both on- and off-axis points, two separate estimates are used. The estimates for $\Sigma_{1}$ are different depending on the method used (`unequal` or `model`). The methodology of estimating the variance is the same as was described in the "Adapted `meanR`" section above. The `maxR` test becomes $$ \max \left| R^{T}\left(\Sigma_{1} D + \sigma^{2}_{0} C_{p}\right)^{-1/2} \right| \sim \max \left| Z_{1}, \dots, Z_{k} \right| $$ where $Z_{j} \sim N\left(0,1\right)$. In particular, the test statistic for the $k$-th off-axis dose combination $(d_{1}, d_{2})$ is computed as $$ \operatorname{TestStat}\left(d_{1}, d_{2}\right) = \left[\left| R^{T}\left(\Sigma_{1} D + \sigma^{2}_{0} C_{p}\right)^{-1/2} \right| \right]_{k} $$ where $\left[\cdot\right]_{k}$ indicates the $k$-th coordinate. ### Bootstrapping under unequal variances In case of the `unequal` variance assumption, the bootstrap proceeds as before, with the off-axis residuals being pooled and resampled. With the `model` assumption, the resampling is more complicated, as the residuals are no longer exchangeable. One option is to rescale the observed residuals according to the mean-variance model (i.e. dividing them by their standard deviations), resample from this pool of standardized residuals, and then scale back to the true variance (by multiplying by the standard deviation). Yet this approach has proven to be unstable as it leads to extreme observations. An alternative (the default) is to generate zero-mean normal data with the modelled variances (see the `rescaleResids` argument in `fitSurface()`). ### Advantages of `unequal` and `model` methods compared to assumption of equal variances The assumption of equal variances between monotherapies and off-axis dose-combinations fails to control the type I error rate around pre-specified level, when the variance of off-axis points increases (natural variance or outliers). This results in false positive synergy calls when in reality there were none. Both the `unequal` and the `model` methods control the type I error rate far better, with slightly better results obtained by the `model` method. Furthermore, the sensitivity and specificity of the `maxR` test statistics are higher with the methods assuming variance heterogeneity compared to the methods where equal variances are assumed. ## Effect size for off-axis points As with many statistical tests, the researcher may not only be interested in a measure of significance (e.g. a p-value), but also in a measure of effect size, and a measure of the imprecision of this estimated effect size. Here we develop confidence intervals for two types of effect sizes. The first is a pointwise effect size, which is defined at every off axis point as the difference between the true mean response and the expected response under additivity. It is estimated as $E_{i} = \frac{1}{n_i}\sum_{j=1}^{n_i}(R_{ij}-\hat{R}_i)$ with $j=1, ..., n_i$, for every off-axis point $i=1, ..., n_1$, whereby we strive to achieve a simultaneous coverage for all off-axis points of 95\%. ### Confidence interval\label{subsec:ci} Let $e_i$ denote the true effect size on off axis point $i$, and call $E_{i}$ its estimate based on the data. Relying on the asymptotic normality of the estimator, an approximate (asymptotic) confidence interval would be formed as the set: \begin{equation} \left\{ e: \left\vert \frac{E_{i}-e_i}{\hat{s}_i} \right\vert < z_{\alpha/2} \right\} \end{equation} with $\hat{s}_i$ the estimated standard error of $E_i$, and $z_{\alpha/2}$ the $1-\alpha/2$ quantile of the standard normal distribution. We know, however, from the meanR and maxR tests that the asymptotic distributions provide poor approximations. Therefore, we use the bootstrap here too to build the confidence intervals. For every bootstrap instance, bootstrap observations are sampled for on- as well as off axis points. For the on-axis points, a parametric bootstrap based on the estimated monotherapy curves is used, as for the calculation of the meanR and maxR statistics. Based on these on-axis bootstrap samples, new monotherapy curves are fitted with resulting residual variances bootstrap variance $MSE_0^b$, and corresponding response surfaces with expected outcomes $\hat{R}_i^b $ are derived. For the off-axis points, with $n_i$ replicates at a given point, $A_{ij} = R_{ij} - \bar{R}_i$ are the pointwise residuals $j = 1, ..., n_i$. Here $R_{ij}$ is the observed outcome and $\bar{R}_i$, the average outcome at point $i$, serves as an unbiased estimator of the true response. Note that these residuals are different from the residuals $E_i = \bar{R}_i-\hat{R}_i$ used to construct the test statistics; for $A_{ij}$ the departure with respect to the mean outcome at that off-axis point is used. These residuals are resampled with replacement from the observed residuals, possibly using rescaling as explained below. The resampled residuals are then added to the estimated effect sizes to obtain bootstrapped observations $R_{ij}^b = \bar{R}_i + A_{ij}^b$, with $b = 1, ...,B$ denoting the bootstrap instance. An extra option of using the wild bootstrap is also available. In this method, a new response variable is calculated by multiplying the sampled residuals $R_i$ with yet another random variable $\upsilon_{ij}$ so that the bootstrapped observations are given by $$R_{ij}^b = \bar{R}_i + A_{ij}^b \upsilon_{ij}$$ where $v_{ij}$ are independent random variables with mean zero and variance 1, and can be sampled from a normal, gamma, rademacher or two-point distributions. This leads to the bootstrap effect sizes $E^b_i = \bar{R}_i^b-\hat{R}_i^b$ and test statistics $\left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert$, using the standard deviations $\hat{s}_i^b$ from the bootstrap. Over all bootstrap instances, we then find the distribution of \begin{equation} T = \max_i \left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert \end{equation} Call $t_\alpha$ the threshold such that \begin{equation} \prob{T>t_\alpha} = \alpha \end{equation} Finally, we find for every off-axis point the confidence interval as the collection: \begin{equation} \left\{ e: \left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert < t_\alpha \right\} \end{equation} ### Standard error\label{subsec:se} As an estimate of the standard error we use \begin{equation} \hat{s}_i^b = \sqrt{\text{diag}(\mb{F}^b+\MSE_0^b \mb{C}_p)_i}, \label{eq:confIntSe} \end{equation} with $\mb{F}^b$ equal to $\MSE_0^b\mb{D}$, $\MSE_1^b\mb{D}$ or $\mb{S}^b$, depending on which variance model is used. This standard error resembles the variance estimators of the meanR and maxR statistics discussed there. By using only the diagonal elements of $\mb{F}^b$, we ignore the covariance between test statistics. We use the studentised-range concept (which is also central to Tukey's method for multiple testing) for controlling the family-wise error rate (FWER) as default. Other available options are false coverage rate (FCR) and directional false coverage rate (dFCR). For the covariance matrix $\mb{C}_p$ we use the one estimated for the observed data. This matrix turns out to be quite stable over the bootstrap runs; moreover the calculation of the matrix for each bootstrap sample would imply a time-consuming nested bootstrap procedure. On the other hand, $MSE_0^b$ is re-estimated with each bootstrap sample. Also $\mb{F}^b$ is re-estimated based on the bootstrapped data. ### Single effect measure Researchers may also want to have a single measure for the strength of the synergy for a single experiment, e.g. in view of ranking compound combinations according to their synergistic effect. For this we calculate the average of the pointwise off-axis effect sizes. This is estimated as $\bar{E} = \frac{1}{\sum_{i= 1}^{n_1}n_i }\sum_{i=1}^{n_1}\sum_{j= 1}^{n_i}(R_{ij}-\hat{R}_i)$. It may be considered as a measure of the "volume" between the expected and observed response surfaces, similar to the "integrated synergy" of \textcite{DiVeroli2016}. Note that this effect size may cause synergistic and antagonistic points to cancel out against one another, but we believe this scenario is unlikely. As before we would like a measure of imprecision for this effect size. #### Confidence interval The construction of the bootstrap confidence interval for the single effect size follows the general procedure for bootstrap-t confidence intervals \parencite{Efron1982, Hall1988}. Let $\mb{F}$ denote the estimated covariance matrix of the raw residuals as before. Using Equation \eqref{Eq_Var} in the main text and the fact that \begin{equation} \var{\bar{E}} = \var{\frac{1}{n_1}\sum_{i=1}^{n_1} E_i} = \frac{1}{n_1^2}\left(\sum_{i=1}^{n_1} \var{E_i} + \sum_{i=1}^{n_1}\sum_{j=1}^{n_1}\cov{E_i, E_j}\right), \label{eq:varE} \end{equation} the standard error of $\bar{E}$ is $se(\bar{E}) = \frac{1}{n_1}\sqrt{\sum_{f \in \mb{F}} f}$. We obtain bootstrapped data for on- and off-axis points for bootstrap instances $b=1, ..., B$, and calculate the corresponding mean residual and standard error $\bar{E}_b$ and $se(\bar{E}_b)$. Then define the statistic \begin{equation} Q_b = \frac{\bar{E}_b-\bar{E}}{se(\bar{E}_b)} \label{eq:confIntSingle} \end{equation} Note that $se(\bar{E}_b)$ relies on the bootstrap covariance matrix $\mb{F}^b$. Call ($q_{\alpha/2}$, $q_{1-\alpha/2}$) the quantiles of the bootstrap distribution such that \begin{equation} \prob{Q_b<q_{\alpha/2}} = \prob{Q_b>q_{1-\alpha/2} } = \alpha/2 \end{equation} Finally, we find the confidence interval as \begin{equation} \bar{E} - se(\bar{E}_b)q_{\alpha/2}, \bar{E} + se(\bar{E}_b)q_{1-\alpha/2} \end{equation} Of course, this procedure can easily be adapted to find the sum of all raw residuals, but the mean may be better comparable across experiments with different designs. ## On resampling residuals\label{sec:resampling} Depending on the mean-variance structure in the data, residuals are resampled differently. The same strategies are used for resampling 1) residuals with respect to the expected response surface $R_{ij}$ under the null hypothesis for the meanR and maxR statistics as for 2) residuals $A_{ij}$ with respect to the average at the off-axis point under the alternative hypothesis for constructing the confidence intervals. In the description below, we use $U_{ij}$ as generic notation for either $R_{ij}$ or $A_{ij}$. In case of constant variability within the off-axis points, the residuals $U_{ij}$ can simply be pooled and resampled with replacement. When a linear mean-variance structure is assumed, one option is to rescale the pointwise residuals first to $b_{ij} = \frac{U_{ij}}{\sqrt{v^{-1}(\beta_0+\beta_1\mu_i)}}$, then pool and resample them, and then scale them back to $b_{ij}\sqrt{v^{-1}(\beta_0+\beta_1\mu_i)}$ according to their new position $i$. Yet in practice this leads to extreme observations, which destabilizes the fitting procedure. A second option is to use random draws from a zero-mean normal distribution with the modelled variance $v^{-1}(\beta_0+\beta_1\mu_i)$ to generate new $U_{ij}$'s. This latter option was found to be more stable and is used as a default in the BIGL package.
/scratch/gouwar.j/cran-all/cranData/BIGL/inst/doc/methodology.Rmd
library(BIGL) library(DT) set.seed(314159265) shinyServer(function(input, output, session) { doseGrid <- reactive({ if (input$logScale) { dose1 <- dose2 <- round(c(0, 3^(-6:0)), 4) } else { dose1 <- dose2 <- round(seq(0, 3, length.out = 7), 4) } doseGrid <- expand.grid(list("d1" = sort(dose1), "d2" = sort(dose2))) }) pars <- reactive({ c("h1" = input$h1, "h2" = input$h2, "b" = input$b, "m1" = input$m1, "m2" = input$m2, "e1" = log(input$e1), "e2" = log(input$e2)) }) ## Default parameters observeEvent(input$defaultpars, { updateNumericInput(session, "h1", value = 1) updateNumericInput(session, "h2", value = 1) updateNumericInput(session, "b", value = 0) updateNumericInput(session, "m1", value = 1) updateNumericInput(session, "m2", value = 1) updateNumericInput(session, "e1", value = 0.1) updateNumericInput(session, "e2", value = 0.1) updateNumericInput(session, "noise", value = 0) }) ## Agonist and partial agonist observeEvent(input$pagonist, { updateNumericInput(session, "h1", value = 1) updateNumericInput(session, "h2", value = 1) updateNumericInput(session, "b", value = 0) updateNumericInput(session, "m1", value = 1) updateNumericInput(session, "m2", value = 0.5) updateNumericInput(session, "e1", value = 0.1) updateNumericInput(session, "e2", value = 0.1) updateNumericInput(session, "noise", value = 0) }) ## Agonist and antagonist observeEvent(input$antagonist, { updateNumericInput(session, "h1", value = 1) updateNumericInput(session, "h2", value = 1) updateNumericInput(session, "b", value = 1) updateNumericInput(session, "m1", value = 2) updateNumericInput(session, "m2", value = 0) updateNumericInput(session, "e1", value = 0.1) updateNumericInput(session, "e2", value = 0.1) updateNumericInput(session, "noise", value = 0.0001) }) comp <- reactive({ a <- BIGL:::generalizedLoewe(doseGrid(), pars()) trueo <- with(a$occupancy, (d1/exp(pars()["e1"]) + d2/exp(pars()["e2"]))^pars()["h1"] / (1 + (d1/exp(pars()["e1"]) + d2/exp(pars()["e2"]))^pars()["h1"])) a$occupancy$occupancy <- trueo a }) dr <- reactive({ data <- cbind(comp()$occupancy[, c("d1", "d2")], "effect" = comp()$response[-1]) data <- rbind(c(0, 0, pars()["b"]), data) data <- data[rep(row.names(data), input$replicates), ] data$effect <- data$effect + input$noise * rnorm(nrow(data), 0, 1) data }) fit <- reactive({ if (input$null == "stdloewe") { constraints <- list("matrix" = c(0, 0, 0, 1, -1, 0, 0), "vector" = 0) } else { constraints <- NULL } fitMarginals(dr(), method = "nlslm", control = list(maxiter = 200), constraints = constraints) }) compE <- reactive({ BIGL:::generalizedLoewe(doseGrid(), fit()$coef) }) ## Coefficient table output$coefs <- renderTable({ coefs <- t(rbind(pars()[c("h1", "h2", "e1", "e2", "b", "m1", "m2")], fit()$coef[c("h1", "h2", "e1", "e2", "b", "m1", "m2")])) rownames(coefs) <- c("h1", "h2", "e1", "e2", "b", "m1", "m2") colnames(coefs) <- c("True", "Est.") coefs[3:4,] <- exp(coefs[3:4,]) coefs }, include.rownames = TRUE, digits = 3) ## Monotherapy plots output$marginals <- renderPlot({ plot(fit(), logScale = input$logScale) }) ## Table with occupancy values and constructed response output$occuptable <- DT::renderDataTable({ d1e1 <- (compE()$occupancy$d1 / exp(pars()["e1"])) d2e2 <- (compE()$occupancy$d2 / exp(pars()["e2"])) occp1 <- (1 / compE()$occupancy$occupancy - 1)^(1/pars()["h1"]) occp2 <- (1 / compE()$occupancy$occupancy - 1)^(1/pars()["h2"]) weight1 <- d1e1 * occp1 weight2 <- d2e2 * occp2 contrib1 <- compE()$occupancy$occupancy * weight1 * (pars()["m1"] - pars()["b"]) contrib2 <- compE()$occupancy$occupancy * weight2 * (pars()["m2"] - pars()["b"]) baseline <- pars()["b"] ## Only valid if Hill coefficients are equal ## trueOcc <- with(comp()$occupancy, (d1/exp(pars()["e1"]) + d2/exp(pars()["e2"]))^pars()["h1"] / ## (1 + (d1/exp(pars()["e1"]) + d2/exp(pars()["e2"]))^pars()["h1"])) printTable <- data.frame(comp()$occupancy[, c("d1", "d2")], "Occupancy" = compE()$occupancy$occupancy, "Weight1" = weight1, "Weight2" = weight2, "Baseline" = rep(baseline, nrow(compE()$occupancy)), "Contrib1" = contrib1, "Contrib2" = contrib2, "Response" = comp()$response[-1]) printTable <- printTable[order(abs(printTable$d1), decreasing = TRUE),] dat <- datatable(printTable, options = list(pageLength = nrow(printTable), searching = FALSE), rownames = FALSE, selection = "single") %>% formatStyle(c("d1", "Weight1", "Contrib1"), color = "blue") %>% formatStyle(c("d2", "Weight2", "Contrib2"), color = "green") %>% formatRound(c("Occupancy", "Response", "Weight1", "Weight2", "Contrib1", "Contrib2", "Baseline"), digits = 4) %>% formatRound(c("d1", "d2"), digits = 5) }) output$isobologram <- renderPlot({ surfaceFit <- list("data" = dr(), fitResult = fit(), "null_model" = if (input$null == "stdloewe") "loewe" else input$null) class(surfaceFit) <- "ResponseSurface" isobologram(surfaceFit, logScale = input$logScale) }) ## Response surface plot output$surface <- plotly::renderPlotly({ plotResponseSurface( data = dr(), fitResult = fit(), logScale = input$logScale, null_model = if (input$null == "stdloewe") "loewe" else input$null, legend = FALSE, colorBy = compE()$occupancy, breaks = c(0, 0.25, 0.5, 0.75, 1), plotfun = median, colorPalette = c("#EFF3FF", "#BDD7E7", "#6BAED6", "#2171B5") ) }) })
/scratch/gouwar.j/cran-all/cranData/BIGL/inst/ui/server.R
library(shiny) shinyUI(fluidPage( titlePanel("BIGL"), br(), sidebarPanel( ## Hill slope inputs fluidRow( column(4, numericInput("h1", label = "Hill_1", 1, step = 0.1)), column(4, numericInput("h2", label = "Hill_2", 1, step = 0.1)) ), ## EC50 inputs fluidRow( column(4, numericInput("e1", label = "EC50_1", 0.1, step = 0.05)), column(4, numericInput("e2", label = "EC50_2", 0.1, step = 0.05)) ), ## Choice of baseline fluidRow( column(4, numericInput("b", label = "Baseline", 0, step = 0.05)) ), ## Choice of asymptotes fluidRow( column(4, numericInput("m1", label = "Asymptote_1", 1, step = 0.05)), column(4, numericInput("m2", label = "Asymptote_2", 1, step = 0.05)) ), ## Choice of parameters for null data simulation fluidRow( column(4, numericInput("noise", label = "Noise level", 0, step = 0.05)), column(4, numericInput("replicates", label = "Replicates", 1, step = 1)) ), ## Whether doses are evenly spaced in log-scale checkboxInput("logScale", "Doses are evenly spaced in log-scale", TRUE), ## Null model radioButtons("null", label = "Null model", choices = c("Generalized Loewe" = "loewe", "Classical Loewe" = "stdloewe", "Highest Single Agent" = "hsa"), selected = "loewe"), ## Pre-selected parameters strong("Pre-defined examples"), br(), actionButton("defaultpars", "Agonist / Agonist"), br(), actionButton("pagonist", "Agonist / Partial agonist"), br(), actionButton("antagonist", "Agonist / Antagonist") ), mainPanel( tabsetPanel( tabPanel("Plots", br(), strong("Monotherapy coefficients and estimated dose-response curves"), br(), br(), fluidRow( column(3, tableOutput("coefs")), column(9, plotOutput("marginals"))), br(), strong("Isobologram of the null model"), br(), br(), plotOutput("isobologram"), br(), br(), strong("Expected response surface"), plotly::plotlyOutput("surface", width = "1024px", height = "800px") ), tabPanel("Table", br(), DT::dataTableOutput("occuptable")), tabPanel("About", br(), p(strong("BIGL"), "shiny application takes as input parameter values for two ", "4-parameter logistic dose-response curves which share the same baseline."), p("In order to test estimation stability, the application allows to add noise to the data ", "generated according to the null model as well as a number of replicates for ", "each dose combination. ", strong("Plots"), " and ", strong("Table"), "tabs use ", "coefficients fitted to this dataset."), p("Choice of null model determines which model will be used to construct the ", "monotherapy and response surface plots. This choice has no real impact for ", "the ", strong("Table"), " tab which is always constructed based on Loewe ", "additivity model."), p("Doses for both compounds are assumed to be the same and are generated using ", "either ", code("round(seq(0, 3, length.out = 7), 4)"), " or ", code("round(c(0, 3^(-6:0)), 4)"), "commands for both compounds depending on ", "whether evenly spaced logarithmic scale is chosen."), br(), h4("Plots"), p("This tab includes a plot of the monotherapy curves with the estimated monotherapy ", "coefficients. Setting noise level to zero will lead estimated coefficients to be ", "numerically very close, if not identical, to the true coefficients. A table of true ", "and estimated coefficients is provided as well."), p("Additionally, 3-dimensional response surface is plotted using the estimated ", "coefficients. Its gradient color represents occupancy values at a given dose ", "combination according to the generalized Loewe model. Transparent color indicates ", "occupancy close to zero, whereas dark blue indicates it being close to one."), p("Choice of the null model will be reflected in all components of this tab. If ", "classical Loewe is selected, asymptote estimates are constrained to be the same ", "for both compounds. For generalized Loewe and Highest Single Agent models this ", "restriction does not apply. In the case of 3-dimensional plot, generalized Loewe ", "and classical Loewe models imply that response surface is constructed according ", "to the procedure depicted ", "in the ", strong("Table"), " tab. Expected response for the Highest Single Agent ", "model, on the other hand, is constructed simply by taking either the minimum ", "(if dose-response curves are decreasing) or the maximum (if dose-response curves are ", "increasing) of dose-response values at a particular dose combination."), br(), h4("Table"), p(strong("Table"), " tab contains a detailed summary of how expected response is ", "constructed under the null of either generalized or classical Loewe models given the ", "estimated parameters. HSA model is not represented in this table."), p("By definition, expected response is equal to the sum of the baseline and contributions ", "from each compound. Contribution of a compound is obtained by multiplying its monotherapy ", "response at a given dose combination by its weight computed from the occupancy equation.") ) ) ) ))
/scratch/gouwar.j/cran-all/cranData/BIGL/inst/ui/ui.R
--- title: "Synergy analysis" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true fig_caption: yes vignette: > %\VignetteIndexEntry{Synergy analysis} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- We first load the necessary packages and set some pre-defined values needed to replicate the analysis. ```{r init, message = FALSE} library(BIGL) library(knitr) library(ggplot2) set.seed(12345) if (!requireNamespace("rmarkdown", quietly = TRUE) || !rmarkdown::pandoc_available("1.14")) { warning(call. = FALSE, "These vignettes assume rmarkdown and pandoc version 1.14. These were not found. Older versions will not work.") knitr::knit_exit() } ``` ```{r settings} nExp <- 4 # Dataset has 11 experiments, we consider only 4 cutoff <- 0.95 # Cutoff for p-values to use in plot.maxR() function ``` ## Process and clean the data The data for the analysis must come in a data-frame with required columns `d1`, `d2` and `effect` for doses of two compounds and observed cell counts respectively. The `effect` column may represent also a type of normalized data and subsequent transformation functions should be adjusted. We will use sample data included in the package - `directAntivirals`. ```{r data} data("directAntivirals", package = "BIGL") head(directAntivirals) ``` This data consists of 11 experiments that can be processed separately. For initial illustration purposes we choose just one experiment and retain only the columns of interest. We define a simple function to do just that. ```{r} subsetData <- function(data, i) { ## Subset data to a single experiment and, optionally, select the necessary ## columns only subset(data, experiment == i)[, c("effect", "d1", "d2")] } ``` Now let us only pick `Experiment 4` to illustrate the functionality of the package. ```{r subset, out.width="100%"} i <- 4 data <- subsetData(directAntivirals, i) ``` Dose-response data for `Experiment 4` will be used for a large share of the analysis presented here, therefore this subset is stored in a dataframe called `data`. Later, we will run the analysis for some other experiments as well. ## Data transformation If raw data is measured in cell counts, data transformation might be of interest to improve accuracy and interpretation of the model. Of course, this will depend on the model specification. For example, if a generalized Loewe model is assumed on the growth rate of the cell count, the appropriate conversion should be made from the observed cell counts. The formula used would be $$y = N_0\exp\left(kt\right)$$ where $k$ is a growth rate, $t$ is time (fixed) and $y$ is the observed cell count. If such a transformation is specified, it is referred to as the biological transformation. In certain cases, variance-stabilizing transformations (Box-Cox) can also be useful. We refer to these transformations as power transformations. In many cases, a simple logarithmic transformation can be sufficient but, if desired, a helper function `optim.boxcox` is available to automate the selection of Box-Cox transformation parameters. In addition to specifying biological and power transformations, users are also asked to specify their inverses. These are later used in the bootstrapping procedure and plotting methods. As an example, we might define a `transforms` list that will be passed to the fitting functions. It contains both biological growth rate and power transformations along with their inverses. ```{r transformations} ## Define forward and reverse transform functions transforms <- list( "BiolT" = function(y, args) with(args, N0*exp(y*time.hours)), "InvBiolT" = function(T, args) with(args, 1/time.hours*log(T/N0)), "PowerT" = function(y, args) with(args, log(y)), "InvPowerT" = function(T, args) with(args, exp(T)), "compositeArgs" = list(N0 = 1, time.hours = 72) ) ``` `compositeArgs` contains the initial cell counts (`N0`) and incubation time (`time.hours`). In certain cases, the `getTransformations` wrapper function can be employed to automatically obtain a prepared list with biological growth rate and power transformations based on results from `optim.boxcox`. Its output will also contain the inverses of these transforms. ```{r autotransform, eval=FALSE} transforms_auto <- getTransformations(data) fitMarginals(data, transforms = transforms_auto) ## In the case of 1-parameter Box-Cox transformation, it is easy ## to retrieve the power parameter by evaluating the function at 0. ## If parameter is 0, then it is a log-transformation. with(transforms_auto, -1 / PowerT(0, compositeArgs)) ``` # Analysis Once dose-response dataframe is correctly set up, we may proceed onto synergy analysis. We will use `transforms` as defined above with a logarithmic transformation. If not desired, `transforms` can be set to `NULL` and would be ignored. Synergy analysis is quite modular and is divided into 3 parts: 1. Determine marginal curves for each of the compounds. These curves are computed based on monotherapy data, i.e. those observations where one of the compounds is dosed at 0. 2. Compute expected effects for a chosen null model given the previously determined marginal curves at various dose combinations. 3. Compare the expected response with the observed effect using statistical testing procedures. ## Fitting marginal (on-axis) data The first step of the fitting procedure will consist in treating marginal data only, i.e. those observations within the experiment where one of the compounds is dosed at zero. For each compound the corresponding marginal doses are modelled using a 4-parameter logistic model. The marginal models will be estimated together using non-linear least squares estimation procedure. Estimation of both marginal models needs to be simultaneous since it is assumed they share a common baseline that also needs to be estimated. The `fitMarginals` function and other marginal estimation routines will automatically extract marginal data from the dose-response data frame. Before proceeding onto the estimation, we get a rough guess of the parameters to use as starting values in optimization and then we fit the model. `marginalFit`, returned by the `fitMarginals` routine, is an object of class `MarginalFit` which is essentially a list containing the main information about the marginal models, in particular the estimated coefficients. The optional `names` argument allows to specify the names of the compounds to be shown on the plots and in the summary. If not defined, the defaults ("Compound 1" and "Compound 2") are used. ```{r marginalFit} ## Fitting marginal models marginalFit <- fitMarginals(data, transforms = transforms, method = "nls", names = c("Drug A", "Drug B")) summary(marginalFit) ``` `marginalFit` object retains the data that was supplied and the transformation functions used in the fitting procedure. It also has a `plot` method which allows for a quick visualization of the fitting results. ```{r marginalPlot, fig.align="center", fig.height = 4, fig.width = 6} ## Plotting marginal models plot(marginalFit) + ggtitle(paste("Direct-acting antivirals - Experiment" , i)) ``` Note as well that the `fitMarginals` function allows specifying linear constraints on parameters. This provides an easy way for the user to impose asymptote equality, specific baseline value and other linear constraints that might be useful. See `help(constructFormula)` for more details. ```{r marginalFitC, eval = FALSE} ## Parameter ordering: h1, h2, b, m1, m2, e1, e2 ## Constraint 1: m1 = m2. Constraint 2: b = 0.1 constraints <- list("matrix" = rbind(c(0, 0, 0, -1, 1, 0, 0), c(0, 0, 1, 0, 0, 0, 0)), "vector" = c(0, 0.1)) ## Parameter estimates will now satisfy equality: ## constraints$matrix %*% pars == constraints$vector fitMarginals(data, transforms = transforms, constraints = constraints) ``` The `fitMarginals` function allows an alternative user-friendly way to specify one or more fixed-value constraints using a named vector passed to the function via `fixed` argument. ```{r marginalFitFixed, eval = FALSE} ## Set baseline at 0.1 and maximal responses at 0. fitMarginals(data, transforms = transforms, fixed = c("m1" = 0, "m2" = 0, "b" = 0.1)) ``` By default, no constraints are set, thus asymptotes are not shared and so a generalized Loewe model will be estimated. ### Optimization algorithms We advise the user to employ the `method = "nlslm"` argument which is set as the default in monotherapy curve estimation. It is based on `minpack.lm::nlsLM` function with an underlying Levenberg-Marquardt algorithm for non-linear least squares estimation. This algorithm is known to be more robust than `method = "nls"` and its Gauss-Newton algorithm. In cases with nice sigmoid-shaped data, both methods should however lead to similar results. `method = "optim"` is a simple sum-of-squared-residuals minimization driven by a default Nelder-Mead algorithm from `optim` minimizer. It is typically slower than non-linear least squares based estimation and can lead to a significant increase in computational time for larger datasets and bootstrapped statistics. In nice cases, Nelder-Mead algorithm and non-linear least squares can lead to rather similar estimates but this is not always the case as these algorithms are based on different techniques. In general, we advise that in automated batch processing whenever `method = "nlslm"` does not converge fast enough and/or emits a warning, user should implement a fallback to `method = "optim"` and re-do the estimation. If none of these suggestions work, it might be useful to fiddle around and slightly perturb starting values for the algorithms as well. By default, these are obtained from the `initialMarginal` function. ```{r fallback, eval = FALSE} nlslmFit <- tryCatch({ fitMarginals(data, transforms = transforms, method = "nlslm") }, warning = function(w) w, error = function(e) e) if (inherits(nlslmFit, c("warning", "error"))) optimFit <- tryCatch({ fitMarginals(data, transforms = transforms, method = "optim") }) ``` Note as well that additional arguments to `fitMarginals` passed via `...` ellipsis argument will be passed on to the respective solver function, i.e. `minpack.lm::nlsLM`, `nls` or `optim`. ### Custom marginal fit While `BIGL` package provides several routines to fit 4-parameter log-logistic dose-response models, some users may prefer to use their own optimizers to estimate the relevant parameters. It is rather easy to integrate this into the workflow by constructing a custom `MarginalFit` object. It is in practice a simple list with * `coef`: named vector with coefficient estimates * `sigma`: standard deviation of residuals * `df`: degrees of freedom from monotherapy curve estimates * `model`: model of the marginal estimation which allows imposing linear constraints on parameters. If no constraints are necessary, it can be left out or assigned the output of `constructFormula` function with no inputs. * `shared_asymptote`: whether estimation is constrained to share the asymptote. During the estimation, this is deduced from `model` object. * `method`: method used in dose-response curve estimation which will be re-used in bootstrapping * `transforms`: power and biological transformation functions (and their inverses) used in monotherapy curve estimation. This should be a list in a format described above. If `transforms` is unspecified or `NULL`, no transformations will be used in statistical bootstrapping unless the user asks for it explicitly via one of the arguments to `fitSurface`. Other elements in the `MarginalFit` are currently unused for evaluating synergy and can be disregarded. These elements, however, might be necessary to ensure proper working of available methods for the `MarginalFit` object. As an example, the following code generates a custom `MarginalFit` object that can be passed further to estimate a response surface under the null hypothesis. ```{r eval=FALSE} customMarginalFit <- list("coef" = c("h1" = 1, "h2" = 2, "b" = 0, "m1" = 1.2, "m2" = 1, "e1" = 0.5, "e2" = 0.5), "sigma" = 0.1, "df" = 123, "model" = constructFormula(), "shared_asymptote" = FALSE, "method" = "nlslm", "transforms" = transforms) class(customMarginalFit) <- append(class(customMarginalFit), "MarginalFit") ``` Note that during bootstrapping this would use `minpack.lm::nlsLM` function to re-estimate parameters from data following the null. A custom optimizer for bootstrapping is currently not implemented. ## Compute expected response for off-axis data Five types of null models are available for calculating expected response surfaces. * Generalized Loewe model is used if maximal responses are not constrained to be equal, i.e. `shared_asymptote = FALSE`, in the marginal fitting procedure and `null_model = "loewe"` in response calculation. * Classical Loewe model is used if constraints are such that `shared_asymptote = TRUE` in the marginal fitting procedure and `null_model = "loewe"` in response calculation. * Highest Single Agent is used if `null_model = "hsa"` irrespective of the value of `shared_asymptote`. * Bliss independence model is used when `null_model = "bliss"`. In the situations when maximal responses are constrained to be equal, the classical Bliss independence approach is used, when they are not equal, the Bliss independence calculation is performed on responses rescaled to the maximum range (i.e. absolute difference between baseline and maximal response). * Alternative Loewe Generalization is used when `null_model = "loewe2"`. If the asymptotes are constrained to be equal, this reduces to the classical Loewe. Note that if `shared_asymptote = TRUE` constraints are used, this also reduces to classical Loewe model. Three methods are available to control for errors * Family wise error rate is used if `control = "FWER"` * False coverage rate is used if `control = "FCR"` * Directional false coverage rate is used if `control = "dFCR"` ### (Generalized) Loewe model If transformation functions were estimated using `fitMarginals`, these will be automatically recycled from the `marginalFit` object when doing calculations for the response surface fit. Alternatively, transformation functions can be passed by a separate argument. Since the `marginalFit` object was estimated without the shared asymptote constraint, the following will compute the response surface based on the generalized Loewe model. ```{r analysis, message=FALSE, comment = NA} rs <- fitSurface(data, marginalFit, null_model = "loewe", B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rs) ``` The occupancy matrix used in the expected response calculation for the Loewe models can be accessed with `rs$occupancy`. For off-axis data and a fixed dose combination, the Z-score for that dose combination is defined to be the standardized difference between the observed effect and the effect predicted by a generalized Loewe model. If the observed effect differs significantly from the prediction, it might be due to the presence of synergy or antagonism. If multiple observations refer to the same combination of doses, then a mean is taken over these multiple standardized differences. The following plot illustrates the isobologram of the chosen null model. Coloring and contour lines within the plot should help the user distinguish areas and dose combinations that generate similar response according to the null model. Note that the isobologram is plotted by default on a logarithmically scaled grid of doses. ```{r image, warning=FALSE, comment = NA, fig.width = 6, fig.height = 4, fig.align = "center"} isobologram(rs) ``` The plot below illustrates the above considerations in a 3-dimensional setting. In this plot, points refer to the observed effects whereas the surface is the model-predicted response. The surface is colored according to the median Z-scores where blue coloring indicates possible synergistic effects (red coloring would indicate possible antagonism). ```{r plot3d, warning=FALSE, fig.align="center", fig.height=7, fig.width=7} plot(rs, legend = FALSE, main = "") ``` ### Highest Single Agent For the Highest Single Agent null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_hsa, message=FALSE, comment = NA} rsh <- fitSurface(data, marginalFit, null_model = "hsa", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsh) ``` <!-- Occupancy estimates provided with HSA response surface still rely on the (generalized) Loewe model. --> ### Bliss Independence Also for the Bliss independence null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_bliss, message=FALSE, comment = NA} rsb <- fitSurface(data, marginalFit, null_model = "bliss", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsb) ``` <!-- Occupancy estimates provided with Bliss response surface still rely on the (generalized) Loewe model. --> ### Alternative Loewe Generalization Also for the Alternative Loewe Generalization null model to work properly, it is expected that both marginal curves are either decreasing or increasing. Equivalent `summary` and `plot` methods are also available for this type of null model. ```{r analysis_loewe2, message=FALSE, comment = NA} rsl2 <- fitSurface(data, marginalFit, null_model = "loewe2", B.CP = 50, statistic = "both", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(rsl2) ``` <!-- Occupancy estimates provided still rely on the (generalized) Loewe model. --> ## Plot 2D cross section of response surface Α 2-dimensional predicted response surface plot can be generated for a group of null models. In the plot, the points refer to the observed effects whereas the colored lines are the model-predicted responses for the different null models. The panels correspond to concentration levels of one compound and the x-axis shows the concentration levels of the second compound. The user has the option to define which compound will be shown in the panels and in the x-axis. ```{r plot_2d_cross_section, message=FALSE, comment = NA, fig.width = 8, fig.height = 6} nullModels <- c("loewe", "loewe2", "bliss", "hsa") rs_list <- Map(fitSurface, null_model = nullModels, MoreArgs = list( data = data, fitResult = marginalFit, B.CP = 50, statistic = "none", parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ) synergy_plot_bycomp(rs_list, ylab = "Response", plotBy = "Drug A", color = TRUE) ``` ## Statistical testing Presence of synergistic or antagonistic effects can be formalized by means of statistical tests. Two types of tests are considered here and are discussed in more details in the [methodology vignette](methodology.html) as well as the [accompanying paper](https://dx.doi.org/10.1038/s41598-017-18068-5). * `meanR` test evaluates how the predicted response surface based on a specified null model differs from the observed one. If the null hypothesis is rejected, this test suggests that at least some dose combinations may exhibit synergistic or antagonistic behaviour. The `meanR` test is not designed to pinpoint which combinations produce these effects nor what type of deviating effect is present. * `maxR` test allows to evaluate presence of synergistic/antagonistic effects for each dose combination and as such provides a point-by-point classification. Both of the above test statistics have a well specified null distribution under a set of assumptions, namely normality of Z-scores. If this assumption is not satisfied, distribution of these statistics can be estimated using bootstrap. Normal approximation is significantly faster whereas bootstrapped distribution of critical values is likely to be more accurate in many practical cases. ### meanR Here we will use the previously computed `CP` covariance matrix to speed up the process. * normal errors ```{r meanrnorm, message = FALSE} meanR_N <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = NULL, parallel = FALSE) ``` * non-normal errors The previous piece of code assumes normal errors. If we drop this assumption, we can use bootstrap methods to resample from the observed errors. Other parameters for bootstrapping, such as additional distribution for errors, wild bootstrapping to account for heteroskedasticity, are also available. See `help(fitSurface)`. ```{r meanrnonnorm, message = FALSE} meanR_B <- fitSurface(data, marginalFit, statistic = "meanR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` Both tests use the same calculated F-statistic but compare it to different null distributions. In this particular case, both tests lead to identical results. ```{r meanresults, echo=FALSE} MeanR_both <- rbind("Normal errors" = c(meanR_N$meanR$FStat, meanR_N$meanR$p.value), "Bootstrapped errors" = c(meanR_B$meanR$FStat, meanR_B$meanR$p.value)) colnames(MeanR_both) <- c("F-statistic", "p-value") kable(MeanR_both) ``` ### maxR The `meanR` statistic can be complemented by the `maxR` statistic for each of available dose combinations. We will do this once again by assuming both normal and non-normal errors similar to the computation of the `meanR` statistic. ```{r maxboth, message = FALSE} maxR_N <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = NULL, parallel = FALSE) maxR_B <- fitSurface(data, marginalFit, statistic = "maxR", CP = rs$CP, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") maxR_both <- rbind(summary(maxR_N$maxR)$totals, summary(maxR_B$maxR)$totals) ``` Here is the summary of `maxR` statistics. It lists the total number of dose combinations listed as synergistic or antagonistic for Experiment `r i` given the above calculations. ```{r printmax, echo = FALSE} rownames(maxR_both) <- c("Normal errors", "Bootstrapped errors") kable(maxR_both) ``` By using the `outsidePoints` function, we can obtain a quick summary indicating which dose combinations in Experiment `r i` appear to deviate significantly from the null model according to the `maxR` statistic. ```{r maxoutside, results="asis"} outPts <- outsidePoints(maxR_B$maxR$Ymean) kable(outPts, caption = paste0("Non-additive points for Experiment ", i)) ``` Synergistic effects of drug combinations can be depicted in a bi-dimensional contour plot where the `x-axis` and `y-axis` represent doses of `Compound 1` and `Compound 2` respectively and each point is colored based on the *p*-value and sign of the respective `maxR` statistic. ```{r maxcontour, fig.align="center", fig.width=6, fig.height=5} contour(maxR_B, colorPalette = c("blue", "white", "red"), main = paste0(" Experiment ", i, " contour plot for maxR"), scientific = TRUE, digits = 3, cutoff = cutoff ) ``` Previously, we had colored the 3-dimensional predicted response surface plot based on its Z-score, i.e. deviation of the predicted versus the observed effect. We can also easily color it based on the computed `maxR` statistic to account for additional statistical variation. ```{r plot3dmax, warning=FALSE, fig.height=7, fig.width=7} plot(maxR_B, color = "maxR", legend = FALSE, main = "") ``` ### Effect sizes and confidence interval The BIGL package also yields effect sizes and corresponding confidence intervals with respect to any response surface. The overall effect size and confidence interval is output in the summary of the `ResponseSurface`, but can also be called directly: ```{r summarySingleConfInt} summary(maxR_B$confInt) ``` In addition, a contour plot can be made with pointwise confidence intervals. Contour plot colouring can be defined according to the effect sizes or according to maxR results. ```{r plotSingleConfInt, fig.height=5, fig.width=8} plotConfInt(maxR_B, color = "effect-size") ``` You can also customize the coloring of the contour plot and 3-dimensional predicted response surface plot based on effect sizes: ```{r contour_effectsize, warning=FALSE, fig.align="center", fig.width=6, fig.height=5, message=FALSE, comment = NA} contour( maxR_B, colorPalette = c("Syn" = "blue", "None" = "white", "Ant" = "red"), main = paste0(" Experiment ", i, " contour plot for effect size"), colorBy = "effect-size", scientific = TRUE, digits = 3, cutoff = cutoff ) ``` ```{r plot3d_effectsize, warning=FALSE, fig.height=7, fig.width=7, message=FALSE, comment = NA} plot(maxR_B, color = "effect-size", legend = FALSE, main = "", gradient = FALSE, colorPalette = c("Ant" = "red", "None" = "white", "Syn" = "blue"), colorPaletteNA = "white") ``` # Analysis in case of variance heterogeneity Starting from the package version `1.2.0` the variance can be estimated separately for on-axis (monotherapy) and off-axis points using `method` argument to `fitSurface`. The possible values for `method` are: * `"equal"`, equal variances assumed (as above, default), * `"unequal"`, variance is estimated separately for on-axis and off-axis points, * `"model"`, the variance is modelled as a function of the mean. Please see the [methodology vignette](methodology.html) for details. Below we show an example analysis in such case. Note that transformations are not possible if variances are not assumed equal. ```{r heterogenanalysis, fig.width=6, fig.height=5} marginalFit <- fitMarginals(data, transforms = NULL) summary(marginalFit) resU <- fitSurface(data, marginalFit, method = "unequal", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") summary(resU) ``` For the variance model, an exploratory plotting function is available to explore the relationship between the mean and the variance. ```{r modelVariancePlot, fig.width=6, fig.height=5} plotMeanVarFit(data) plotMeanVarFit(data, log = "xy") #Clearer on the log-scale plotMeanVarFit(data, trans = "log") #Thresholded at maximum observed variance ``` The linear fit seems fine in this case. ```{r modelVarianceSum, fig.width=6, fig.height=5} resM <- fitSurface(data, marginalFit, method = "model", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` If the log transformation yielded a better fit, then this could be achieved by using the following option. ```{r modelVarianceSumLogTransform, fig.width=6, fig.height=5, eval = FALSE} resL <- fitSurface(data, marginalFit, method = "model", trans = "log", statistic = "both", B.CP = 20, B.B = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR") ``` Negative variances were modelled, but variance model has the smallest observed variances as minimum so we can proceed.` ```{r resM} summary(resM) ``` # Analysis of multiple experiments In order to proceed with multiple experiments, we repeat the same procedure as previously. We collect all the necessary objects for which estimations do not have to be repeated to generate `meanR` and `maxR` statistics in a simple list. ```{r fullanalysis, message=FALSE} marginalFits <- list() datasets <- list() respSurfaces <- list() maxR.summary <- list() for (i in seq_len(nExp)) { ## Select experiment data <- subsetData(directAntivirals, i) ## Fit joint marginal model marginalFit <- fitMarginals(data, transforms = transforms, method = "nlslm") ## Predict response surface based on generalized Loewe model respSurface <- fitSurface(data, marginalFit, statistic = "maxR", B.CP = 20, parallel = FALSE, wild_bootstrap = TRUE, wild_bootType = "normal", control = "dFCR" ) datasets[[i]] <- data marginalFits[[i]] <- marginalFit respSurfaces[[i]] <- respSurface maxR.summary[[i]] <- summary(respSurface$maxR)$totals } ``` We use the `maxR` procedure with a chosen p-value cutoff of `r cutoff`. If `maxR` statistic falls outside the `r cutoff*100`th percentile of its distribution (either bootstrapped or not), the respective off-axis dose combination is said to deviate significantly from the generalized Loewe model and the algorithm determines whether it deviates in a synergistic or antagonistic way. Below is the summary of overall calls and number of deviating points for each experiment. ```{r maxrfull, echo=FALSE} allMaxR <- do.call(rbind, maxR.summary) rownames(allMaxR) <- paste("Experiment", 1:nrow(allMaxR)) kable(allMaxR, row.names = TRUE) ``` Previous summarizing and visual analysis can be repeated on each of the newly defined experiments. For example, `Experiment 4` indicates a total of 16 combinations that were called synergistic according to the `maxR` test. ```{r tabs, echo = FALSE, results = "asis"} i <- 4 genCaption <- function(k) paste("Non-additive points for Experiment", k) outPts <- outsidePoints(respSurfaces[[i]]$maxR$Ymean) print(kable(outPts, caption = genCaption(i))) ``` Consequently, above table for `Experiment 4` can be illustrated in a contour plot. ```{r fullcontour, echo=FALSE, fig.align = "center", fig.width = 6, fig.height = 5} i <- 4 contour(respSurfaces[[i]], main = paste("Experiment", i), scientific = TRUE, digits = 3, cutoff = cutoff) ```
/scratch/gouwar.j/cran-all/cranData/BIGL/vignettes/analysis.Rmd
--- title: "Methodology" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{Methodology} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- \newcommand{\MSE} {\text{MSE}} \newcommand{\prob}[1] {\text{P}\left\{#1\right\}} \newcommand{\diag}[1] {\text{diag} \left( #1 \right)} \newcommand{\mb}[1] {\boldsymbol{#1}} \newcommand{\cov}[1] {\mbox{Cov}\left\{#1\right\}} \newcommand{\covf}[2] {\mbox{Cov}_{#1}\left\{#2\right\}} \newcommand{\var}[1] {\mbox{Var}\left\{#1\right\}} \newcommand{\varf}[2] {\mbox{Var}_{#1}\left\{#2\right\}} Methodology described in this vignette is adapted from the article *"BIGL: Biochemically Intuitive Generalized Loewe null model for prediction of the expected combined effect compatible with partial agonism and antagonism"* (2017) by K. Van der Borght, A. Tourny, R. Bagdziunas, O. Thas, M. Nazarov, H. Turner, B. Verbist and H. Ceulemans ([doi:10.1038/s41598-017-18068-5](https://dx.doi.org/10.1038/s41598-017-18068-5)) as well as its technical supplement. We advise the reader to consult it for a deeper understanding of the procedure described next. Further chapters were added as extensions on top of the original article regarding variance heterogeneity, Bliss independence and alternative Loewe generalization. ## Marginal monotherapy curves First, a monotherapy model is described by the following equation. $$ y\left(d\right) = b + \dfrac{m - b}{1 + \left(\frac{\operatorname{EC50}}{d}\right)^{|h|}} $$ where $y$ is the response (or effect), $d$ is the dose (or concentration) of the compound, $h$ is the Hill's coefficient and $b$ and $m$ are respectively baseline and maximum response for that compound. Lastly, $\textrm{EC50}$ stands for the dose level of the compound needed to attain the midpoint effect, i.e. $$y\left(\textrm{EC50}\right) = b + \frac{m - b}{2}$$ Note that $m > b$ if and only if the response is increasing with the dose of the compound. If the response is decreasing, then $m < b$. This monotherapy equation is estimated for both compounds with the constraint that $b$, the baseline level, is shared across compounds. This baseline level is denoted by `b` in the parameter vector. Additionally, `m1` and `m2` in the parameter vector stand for estimates of maximal responses $m_{1}$ and $m_{2}$, respectively, whereas `h1` and `h2` are Hill's coefficients (slope) of the monotherapy curve for each compound. Lastly, `e1` and `e2` are log-transformed inflection points, i.e. `e1` $= \log\left(\textrm{EC50}_{1}\right)$ and `e2` $= \log\left(\textrm{EC50}_{2}\right)$. ## Null models of no synergy ### Occupancy Define the occupancy level $\textrm{occup}$, i.e. the fractional (enzymatic) effect or observed effect relative to maximal effect, for both compounds at given dose levels as $$ \textrm{occup}_{1}\left(d_{1}\right) = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{1}}{d_{1}}\right)^{h_{1}}} $$ $$ \textrm{occup}_{2}\left(d_{2}\right) = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{2}}{d_{2}}\right)^{h_{2}}} $$ Alternatively, the above equations can be rearranged to express dose in terms of occupancy so that $$ d_{1} = \operatorname{EC50}_{1} \left(\frac{1}{\operatorname{occup_{1}}} - 1 \right)^{-1/h_{1}} $$ $$ d_{2} = \operatorname{EC50}_{2} \left(\frac{1}{\operatorname{occup_{2}}} - 1 \right)^{-1/h_{2}} $$ Although the occupancy was considered here in the marginal case, it is equally well-defined when compounds are combined and is understood as the fraction of enzyme bound to any compound. It can thus be used to re-express classical Loewe additivity equations. ### Classical Loewe model In the classical Loewe model where both marginal models share upper $(m)$ and lower $(b)$ asymptotes, occupancy is defined as the solution to this additivity equation for each dose combination $(d_{1}, d_{2})$, namely $$\frac{d_1\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}}+ \frac{d_2\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}} = 1$$ Once occupancy is computed, in the classical Loewe model the predicted response at dose combination $(d_{1}, d_{2})$ can be calculated to be \begin{equation} \begin{split} y & = b + \left(m - b\right) \times \textrm{occup} = \\ & = b + \left(m - b\right) \times \textrm{occup} \times \left[\frac{d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right] = \\ & = b + \textrm{occup} \times \left[\frac{ \left(m - b\right) d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{ \left(m - b\right) d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right] \end{split} \end{equation} ### Generalized Loewe model Generalized Loewe model extends the classical Loewe model by allowing compounds to have different upper asymptotes so that when adjusted, the above predicted response is written instead as $$ y = b + \textrm{occup} \times \left[\frac{\left(m_{1} - b\right) d_{1}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{1}}}{\textrm{EC50}_{1}} + \frac{\left(m_{2} - b\right) d_{2}\left(\textrm{occup}^{-1} - 1\right)^{1/h_{2}}}{\textrm{EC50}_{2}}\right]$$ In particular, if $m_{1} = m_{2}$, then generalized Loewe is equivalent to the classical Loewe. ### Highest Single Agent A null model based on the Highest Single Agent (HSA) model does not attempt to model interaction effects at all and the predicted effect of a combination is either the minimum (if marginal curves are decreasing) or the maximum (if marginal curves are increasing) of both monotherapy curves. ### Bliss independence model Bliss independence implies that two agents do not cooperate, i.e. act independently of each other. <!-- In the classical Bliss independence model where marginal models share baseline and maximum response, the predicted response at dose combination $(d_{1}, d_{2})$ is based on the fractional effects of the marginal responses: --> Additionally, the assumption is that a decreasing monotherapy curves express the fractions of _unaffected_ control populations, while increasing curves express the fractions of _affected_ control populations. Bliss independence model is formulated for the fractional responses $f$ ("fraction affected"), where the predicted response $f_{12}$ at dose combination $(d_{1}, d_{2})$ is defined as: $$ f_{12}(d_1, d_2) = f_1(d_1) + f_2(d_2) - f_1(d_1)f_2(d_2), $$ with $$f_1(d_1) = \frac{y\left(d_1\right) - b}{m_1 - b} = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{1}}{d_{1}}\right)^{|h_{1}|}}$$ $$f_2(d_2) = \frac{y\left(d_2\right) - b}{m_2 - b} = \frac{1}{1 + \left(\frac{\operatorname{EC50}_{2}}{d_{2}}\right)^{|h_{2}|}}$$ In the classical Bliss independence model, marginal models share baseline and maximum response. To allow the compounds to have different maximal responses, the fractional responses are rescaled to the maximum range (i.e. absolute difference between baseline and maximal response). Then the predicted response is defined as: $$ y = b + (m_{max}-b) \left[ \tilde{f_1}(d_1) + \tilde{f_2}(d_2) - \tilde{f_1}(d_1)\tilde{f_2}(d_2) \right], $$ where $m_{max}$ is one of $m_1$ or $m_2$, for which the value of $|m_i - b|$ is larger, and $$ \tilde{f_i} = f_i\frac{m_i-b}{m_{max}-b}~~\text{for}~~i = 1, 2.$$ This implementation of Bliss independence supports both compounds with decreasing and increasing monotherapy profiles. However using one compound with a decreasing profile and another with an increasing profile in combination is not supported. ### Alternative Loewe Generalization An alternative generalization of Loewe Additivity for the case of different asymptotes can be defined as a combination of Loewe and HSA approaches as follows: In a classical Loewe equation, predicted response $y$ at a given dose combination $(d_1, d_2)$ can be found by solving the equation: $$ \frac{d_1}{D_1(y)} + \frac{d_2}{D_2(y)} = 1, $$ where $D_i(y) = \operatorname{EC50}_{i}\left(\frac{y-b}{m_i-y}\right)^{\frac{1}{|h_i|}}$, for $i = 1, 2$ is the dose of the $i$-th compound that gives response $y$. Note that here $D_i$ is properly defined only if $y$ is between $b$ and $m_i$. For the case of different asymptotes, say when $y > m_1$ (increasing curve) or $y < m_1$ (decreasing curve), we set $D_1(y) = +\infty,$ so that the $y$ is determined from the equation $d_2 = D_2(y)$, replicating what is done in the HSA approach. ### Calculation procedure In order to evaluate any of the null models described above, the `fitSurface` function will use the monotherapy parameter estimates from the previous step. The idea is if there are synergistic or antagonistic effects, then administration of both compounds will lead to important deviations from what combined monotherapy data would suggest according to the null model. Routines within `fitSurface` function do essentially the following. 1. Find occupancy for each combination of doses by solving the additivity equation of the classical Loewe model. This step does not require knowledge of the baseline or maximal response for either of the compounds. Occupancy solution is also reported in the HSA model case although occupancy plays no role in such a model. 2. Compute the predicted response based on the above described response equations and the previously computed occupancy rate for each dose combination. 3. If desired, the function will then calculate the selected statistic to evaluate the deviation of the predictions from the desired null model. ## Synergy evaluation assuming equal variances for on- and off-axis points Synergy is evaluated for all off-axis dose combinations, i.e. dose combinations that are not used in the monotherapy curve estimation. Synergy evaluation depends on the underlying null model and any of the above models, i.e. generalized or classical Loewe or Highest Single Agent, can be used for this purpose. We provide here a brief summary of both statistical tests. Technical derivations and further details are available in the article cited at the beginning of the document. To define test statistics, the following notations are used. * Let $y_{ij}$ be the observed effect for replicate $j$ of dose combination $i$, so that $y_{11}, y_{12}, y_{13}, y_{21}, ..., y_{kn_{k}}$ is a set of observed effects. We assume $k$ different dose combinations and $n_{k}$ replicates for each combination. The number of different off-axis dose combinations is denoted as $n_{1}$. * $p_{1}, ..., p_{k}$ are the predicted responses for the $k$ off-axis dose combinations. * $\sigma^{2}$ is the variance of the replicate observations, assumed to be constant over all dose combinations, and estimated by taking MSE of the null model. * $\operatorname{df}_{0}$ is the number of degrees of freedom from the marginal model estimation. We construct a vector $R = (r_{1}, ..., r_{k})$ which represents mean deviation from the predicted effect. In particular, $$ r_{k} = \frac{1}{n_{k}} \sum_{i = 1}^{n_{k}} y_{ki} - p_{k} $$ With the help of bootstrapping, the covariance matrix of $R$ can be estimated under the null hypothesis of no synergy so that $\operatorname{Var}\left(R\right) = \sigma^{2}\left(D + C_{p}\right)$ where $D$ is a diagonal matrix with $1 / n_{i}$ in the $i$-th position and $C_{p}$ is the covariance matrix obtained from bootstrap. ### `meanR` The `meanR` test will evaluate whether the null model globally fits well the observed data. It is derived using a lack-of-fit sum of squares technique. In particular, the test statistic is given by $$ \operatorname{TestStat} = \frac{R^{T}\left(D + C_{p}\right)^{-1}R}{n_{1}\sigma^{2}} $$ Assuming that residuals from the generalized Loewe model are normally distributed, it can be shown that this statistic follows an $F_{n_{1}, \operatorname{df}_{0}}$ distribution under the null. If these assumptions are not satisfied, the null distribution can be approximated by bootstrapping. ### `maxR` The `maxR` test evaluates whether the null model locally fits the observed data. In particular, it provides a test score for each off-axis combination. Based on the sign of this score, it can be determined whether synergy or antagonism is more likely and a formal test can be constructed. Under the null hypothesis of no lack-of-fit and normally distributed effects, $$ \max \left| R^{T}\left(D + C_{p}\right)^{-1/2} \right| / \sigma \sim \max \left| Z_{1}, \dots, Z_{k} \right| $$ where $Z_{j} \sim N\left(0,1\right)$. More particularly, the test statistic for the $k$-th off-axis dose combination $(d_{1}, d_{2})$ is computed as $$ \operatorname{TestStat}\left(d_{1}, d_{2}\right) = \left[\left| R^{T}\left(D + C_{p}\right)^{-1/2} \right| / \sigma\right]_{k} $$ where $\left[\cdot\right]_{k}$ indicates the $k$-th coordinate. This test statistic is then compared either to the null distribution based on normal approximation or a bootstrapped approximation. ## Synergy evaluation in case of variance heterogeneity In the methodology described above one important assumption is made regarding the variance of the on- and off-axis dose combinations. It is considered to be equal across all points. This assumption is also mentioned in the original article and its technical supplement. In reality it is often seen that the variance of the monotherapies is not equal to the variance of the off-axis combinations. The assumption of equal variances is thus not always valid. That is why the `meanR` and `maxR` test-statistics can also estimate the variances for on-axis (monotherapies) and off-axis dose-combinations separately. Two extra methods are described below: the `unequal` method (Separated variance) and the `model` method (Modeled variance). For both methods replicates are required and no variance-stabilizing transformations are required. The latter is often necessary when assuming equal variances. ### Adapted `meanR` The adapted `meanR` test uses two separate variance estimates for (a) the monotherapies (= $\sigma^{2}_{0}$) and (b) the dose combinations (= $\Sigma_{1}$, a diagonal matrix). The notation for both `unequal` as `model` will be the same, but the estimation of $\Sigma_{1}$ will be different. The variance of the monotherapies $\sigma^{2}_{0}$ is estimated as $\sigma^{2}$ above by taking the MSE of the null model. The test statistic is given by: $$ \operatorname{TestStat} = \frac{R^{T}\left(\Sigma_{1}D + \sigma^{2}_{0}C_{p}\right)^{-1}R}{n_{1}} $$ 1. **`unequal` method**: The variance for the dose combinations is estimated by taking the variance in each dose combination and then taking the mean of all these variances, thus $\Sigma_{1} = \sigma^2_1 I_{n_1}$. The downside of this method is that the variance for all combinations is assumed to be equal. In reality the variance often depends on the mean effect. This is taken into account in the `model` method. 2. **`model` method**: In this method the diagonal elements of $\Sigma_{1}$ are no longer estimated as single number but rather as a vector of variances. Each off-axis point has now its own variance. A linear model is fitted on the original dataset, modeling the variance of each off-axis point as a function of its mean effect. The estimated model parameters are then used to predict the variance for the corresponding mean effect measured for that dose combination. These predicted variances are placed in the diagonal of $\Sigma_{1}$. Modelling the variance with a linear model may require a transformation, to achieve a better fit and to avoid negative variances being modelled. A log-transformation often makes a good impression. ### Adapted `maxR` The same approach is taken for the adapted `maxR` test statistic. Instead of using one estimated variance for both on- and off-axis points, two separate estimates are used. The estimates for $\Sigma_{1}$ are different depending on the method used (`unequal` or `model`). The methodology of estimating the variance is the same as was described in the "Adapted `meanR`" section above. The `maxR` test becomes $$ \max \left| R^{T}\left(\Sigma_{1} D + \sigma^{2}_{0} C_{p}\right)^{-1/2} \right| \sim \max \left| Z_{1}, \dots, Z_{k} \right| $$ where $Z_{j} \sim N\left(0,1\right)$. In particular, the test statistic for the $k$-th off-axis dose combination $(d_{1}, d_{2})$ is computed as $$ \operatorname{TestStat}\left(d_{1}, d_{2}\right) = \left[\left| R^{T}\left(\Sigma_{1} D + \sigma^{2}_{0} C_{p}\right)^{-1/2} \right| \right]_{k} $$ where $\left[\cdot\right]_{k}$ indicates the $k$-th coordinate. ### Bootstrapping under unequal variances In case of the `unequal` variance assumption, the bootstrap proceeds as before, with the off-axis residuals being pooled and resampled. With the `model` assumption, the resampling is more complicated, as the residuals are no longer exchangeable. One option is to rescale the observed residuals according to the mean-variance model (i.e. dividing them by their standard deviations), resample from this pool of standardized residuals, and then scale back to the true variance (by multiplying by the standard deviation). Yet this approach has proven to be unstable as it leads to extreme observations. An alternative (the default) is to generate zero-mean normal data with the modelled variances (see the `rescaleResids` argument in `fitSurface()`). ### Advantages of `unequal` and `model` methods compared to assumption of equal variances The assumption of equal variances between monotherapies and off-axis dose-combinations fails to control the type I error rate around pre-specified level, when the variance of off-axis points increases (natural variance or outliers). This results in false positive synergy calls when in reality there were none. Both the `unequal` and the `model` methods control the type I error rate far better, with slightly better results obtained by the `model` method. Furthermore, the sensitivity and specificity of the `maxR` test statistics are higher with the methods assuming variance heterogeneity compared to the methods where equal variances are assumed. ## Effect size for off-axis points As with many statistical tests, the researcher may not only be interested in a measure of significance (e.g. a p-value), but also in a measure of effect size, and a measure of the imprecision of this estimated effect size. Here we develop confidence intervals for two types of effect sizes. The first is a pointwise effect size, which is defined at every off axis point as the difference between the true mean response and the expected response under additivity. It is estimated as $E_{i} = \frac{1}{n_i}\sum_{j=1}^{n_i}(R_{ij}-\hat{R}_i)$ with $j=1, ..., n_i$, for every off-axis point $i=1, ..., n_1$, whereby we strive to achieve a simultaneous coverage for all off-axis points of 95\%. ### Confidence interval\label{subsec:ci} Let $e_i$ denote the true effect size on off axis point $i$, and call $E_{i}$ its estimate based on the data. Relying on the asymptotic normality of the estimator, an approximate (asymptotic) confidence interval would be formed as the set: \begin{equation} \left\{ e: \left\vert \frac{E_{i}-e_i}{\hat{s}_i} \right\vert < z_{\alpha/2} \right\} \end{equation} with $\hat{s}_i$ the estimated standard error of $E_i$, and $z_{\alpha/2}$ the $1-\alpha/2$ quantile of the standard normal distribution. We know, however, from the meanR and maxR tests that the asymptotic distributions provide poor approximations. Therefore, we use the bootstrap here too to build the confidence intervals. For every bootstrap instance, bootstrap observations are sampled for on- as well as off axis points. For the on-axis points, a parametric bootstrap based on the estimated monotherapy curves is used, as for the calculation of the meanR and maxR statistics. Based on these on-axis bootstrap samples, new monotherapy curves are fitted with resulting residual variances bootstrap variance $MSE_0^b$, and corresponding response surfaces with expected outcomes $\hat{R}_i^b $ are derived. For the off-axis points, with $n_i$ replicates at a given point, $A_{ij} = R_{ij} - \bar{R}_i$ are the pointwise residuals $j = 1, ..., n_i$. Here $R_{ij}$ is the observed outcome and $\bar{R}_i$, the average outcome at point $i$, serves as an unbiased estimator of the true response. Note that these residuals are different from the residuals $E_i = \bar{R}_i-\hat{R}_i$ used to construct the test statistics; for $A_{ij}$ the departure with respect to the mean outcome at that off-axis point is used. These residuals are resampled with replacement from the observed residuals, possibly using rescaling as explained below. The resampled residuals are then added to the estimated effect sizes to obtain bootstrapped observations $R_{ij}^b = \bar{R}_i + A_{ij}^b$, with $b = 1, ...,B$ denoting the bootstrap instance. An extra option of using the wild bootstrap is also available. In this method, a new response variable is calculated by multiplying the sampled residuals $R_i$ with yet another random variable $\upsilon_{ij}$ so that the bootstrapped observations are given by $$R_{ij}^b = \bar{R}_i + A_{ij}^b \upsilon_{ij}$$ where $v_{ij}$ are independent random variables with mean zero and variance 1, and can be sampled from a normal, gamma, rademacher or two-point distributions. This leads to the bootstrap effect sizes $E^b_i = \bar{R}_i^b-\hat{R}_i^b$ and test statistics $\left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert$, using the standard deviations $\hat{s}_i^b$ from the bootstrap. Over all bootstrap instances, we then find the distribution of \begin{equation} T = \max_i \left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert \end{equation} Call $t_\alpha$ the threshold such that \begin{equation} \prob{T>t_\alpha} = \alpha \end{equation} Finally, we find for every off-axis point the confidence interval as the collection: \begin{equation} \left\{ e: \left\vert \frac{E^b_i -E_i}{\hat{s}^b_i} \right\vert < t_\alpha \right\} \end{equation} ### Standard error\label{subsec:se} As an estimate of the standard error we use \begin{equation} \hat{s}_i^b = \sqrt{\text{diag}(\mb{F}^b+\MSE_0^b \mb{C}_p)_i}, \label{eq:confIntSe} \end{equation} with $\mb{F}^b$ equal to $\MSE_0^b\mb{D}$, $\MSE_1^b\mb{D}$ or $\mb{S}^b$, depending on which variance model is used. This standard error resembles the variance estimators of the meanR and maxR statistics discussed there. By using only the diagonal elements of $\mb{F}^b$, we ignore the covariance between test statistics. We use the studentised-range concept (which is also central to Tukey's method for multiple testing) for controlling the family-wise error rate (FWER) as default. Other available options are false coverage rate (FCR) and directional false coverage rate (dFCR). For the covariance matrix $\mb{C}_p$ we use the one estimated for the observed data. This matrix turns out to be quite stable over the bootstrap runs; moreover the calculation of the matrix for each bootstrap sample would imply a time-consuming nested bootstrap procedure. On the other hand, $MSE_0^b$ is re-estimated with each bootstrap sample. Also $\mb{F}^b$ is re-estimated based on the bootstrapped data. ### Single effect measure Researchers may also want to have a single measure for the strength of the synergy for a single experiment, e.g. in view of ranking compound combinations according to their synergistic effect. For this we calculate the average of the pointwise off-axis effect sizes. This is estimated as $\bar{E} = \frac{1}{\sum_{i= 1}^{n_1}n_i }\sum_{i=1}^{n_1}\sum_{j= 1}^{n_i}(R_{ij}-\hat{R}_i)$. It may be considered as a measure of the "volume" between the expected and observed response surfaces, similar to the "integrated synergy" of \textcite{DiVeroli2016}. Note that this effect size may cause synergistic and antagonistic points to cancel out against one another, but we believe this scenario is unlikely. As before we would like a measure of imprecision for this effect size. #### Confidence interval The construction of the bootstrap confidence interval for the single effect size follows the general procedure for bootstrap-t confidence intervals \parencite{Efron1982, Hall1988}. Let $\mb{F}$ denote the estimated covariance matrix of the raw residuals as before. Using Equation \eqref{Eq_Var} in the main text and the fact that \begin{equation} \var{\bar{E}} = \var{\frac{1}{n_1}\sum_{i=1}^{n_1} E_i} = \frac{1}{n_1^2}\left(\sum_{i=1}^{n_1} \var{E_i} + \sum_{i=1}^{n_1}\sum_{j=1}^{n_1}\cov{E_i, E_j}\right), \label{eq:varE} \end{equation} the standard error of $\bar{E}$ is $se(\bar{E}) = \frac{1}{n_1}\sqrt{\sum_{f \in \mb{F}} f}$. We obtain bootstrapped data for on- and off-axis points for bootstrap instances $b=1, ..., B$, and calculate the corresponding mean residual and standard error $\bar{E}_b$ and $se(\bar{E}_b)$. Then define the statistic \begin{equation} Q_b = \frac{\bar{E}_b-\bar{E}}{se(\bar{E}_b)} \label{eq:confIntSingle} \end{equation} Note that $se(\bar{E}_b)$ relies on the bootstrap covariance matrix $\mb{F}^b$. Call ($q_{\alpha/2}$, $q_{1-\alpha/2}$) the quantiles of the bootstrap distribution such that \begin{equation} \prob{Q_b<q_{\alpha/2}} = \prob{Q_b>q_{1-\alpha/2} } = \alpha/2 \end{equation} Finally, we find the confidence interval as \begin{equation} \bar{E} - se(\bar{E}_b)q_{\alpha/2}, \bar{E} + se(\bar{E}_b)q_{1-\alpha/2} \end{equation} Of course, this procedure can easily be adapted to find the sum of all raw residuals, but the mean may be better comparable across experiments with different designs. ## On resampling residuals\label{sec:resampling} Depending on the mean-variance structure in the data, residuals are resampled differently. The same strategies are used for resampling 1) residuals with respect to the expected response surface $R_{ij}$ under the null hypothesis for the meanR and maxR statistics as for 2) residuals $A_{ij}$ with respect to the average at the off-axis point under the alternative hypothesis for constructing the confidence intervals. In the description below, we use $U_{ij}$ as generic notation for either $R_{ij}$ or $A_{ij}$. In case of constant variability within the off-axis points, the residuals $U_{ij}$ can simply be pooled and resampled with replacement. When a linear mean-variance structure is assumed, one option is to rescale the pointwise residuals first to $b_{ij} = \frac{U_{ij}}{\sqrt{v^{-1}(\beta_0+\beta_1\mu_i)}}$, then pool and resample them, and then scale them back to $b_{ij}\sqrt{v^{-1}(\beta_0+\beta_1\mu_i)}$ according to their new position $i$. Yet in practice this leads to extreme observations, which destabilizes the fitting procedure. A second option is to use random draws from a zero-mean normal distribution with the modelled variance $v^{-1}(\beta_0+\beta_1\mu_i)$ to generate new $U_{ij}$'s. This latter option was found to be more stable and is used as a default in the BIGL package.
/scratch/gouwar.j/cran-all/cranData/BIGL/vignettes/methodology.Rmd
###################################################################### #:: bin_cor function - R package BINCOR # #:: Programmed by Josué M. Polanco-Martinez a.k.a jomopo # #:: Email: [email protected] # ###################################################################### # Copyright (C) 2017 by Josué M. Polanco-Martínez # # This file/code is part of the R package BINCOR # ###################################################################### # # BINCOR is free software: you can redistribute it and/or modify it # it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # BINCOR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BINCOR If not, see <http://www.gnu.org/licenses/>. # ##################################################################### bin_cor <- function(ts1, ts2, FLAGTAU=3, ofilename) { #:: inputs: #:: ts1 and ts2 are the time series under analysis (the first column #:: are the times/ages (in ascending order) & the second column are the #:: elements of the variable under stiudy). "ofilename" is the output's #:: filename, which will contains the binned data. #:: FLAGTAU (the persistence method). #:: Options (by default it is 3): #:: 1 tau_x + tau_y [Eq. 7.44, Mudelsee (2010, 2014)] #:: 2 max(tau_x, tau_y) [Eq. 7.45, Mudelsee (2010, 2014)] #:: 3 dist_x_y/ln(a_x_y_est) [Eq. 7.48, Mudelsee (2010, 2014)] #:: tau_x, tau_y are the persistence (memory) time for ts1 and ts2, #:: respectively. #:: Checking the input data if(dim(ts1)[2] !=2 | dim(ts2)[2] != 2) stop ("There is a problem with the dimension in your input data. The input data should be a couple of vectors of dimension N x 2 (rows x columns). Thank you for using the BINCOR package. \n") if(length(which(diff(ts1[,1]) <= 0)) | length(which(diff(ts2[,1]) <= 0))) stop ("There are some times/ages that are not strictly monotonic ascending. Please, check your data. Thank you for using the BINCOR package. \n") if(all.equal(ts1[,1], ts2[,1]) == "TRUE") cat("The time series have the same timescales, it's not necessary to perform the binned procedure, but it will be computed. \n") #:: Getting the names of the variables names.ts1 <- names(ts1) names.ts2 <- names(ts2) if(is.null(colnames(ts1))) colnames(ts1) <- c("Time-ts1", "Variable-ts1") if(is.null(colnames(ts2))) colnames(ts2) <- c("Time-ts2", "Variable-ts2") #:: Getting Nx & Ny (number of elements for each time series) Nx <- length(ts1[,1]) Ny <- length(ts2[,1]) #:: Computing Eqs. 7.46 (Mudelsee 2010, 2014) # or smarter in R ;-) dist_X <- (ts1[Nx,1] - ts1[1,1])/ (Nx - 1) # mean(diff(ts"x"[,1])) dist_Y <- (ts2[Ny,1] - ts2[1,1])/ (Ny - 1) Tmax_m <- max(ts1[Nx,1], ts2[Ny,1]) Tmin_m <- min(ts1[1,1], ts2[1,1]) dist_XY <- (Tmax_m - Tmin_m) / (Nx + Ny - 1) #:: Getting Tau (persistence time or memory for each t.s.) #:: There are several ways to get 'tau', but we use Mudelsee (2002). #:: This tau estimation can be obtained in R from the REDFIT (Schulz #:: & Mudelsee 2002) function included in the 'dplR' R package #:: (Bunn et al 2015. https://cran.r-project.org/package=dplR). #:: We compute the raw spectrum (dof=2) for each time series (n50=1 & iwin=0) #:: in order to get tau (subroutine redfitTauest), we have modified slightly #:: the version of the redfit subroutine redfitTauest. This piece of #:: code ("tauest_dplR.R") is provided in our BINCOR package. tau1 <- unlist(redfitTauest(ts1[,1], ts1[,2])) #If you use tauest_dplR.R tau2 <- unlist(redfitTauest(ts2[,1], ts2[,2])) #If you use subroutine redfitTauest from 'dplR' R package #redfit.ts1 <- redfit(ts1[,2], ts1[,1], ofac=1, n50=1, iwin=0) #redfit.ts2 <- redfit(ts2[,2], ts2[,1], ofac=1, n50=1, iwin=0) #tau1 <- redfit.ts1$tau #tau2 <- redfit.ts2$tau a_X_est <- tau1[2] a_Y_est <- tau2[2] tau_X_est <- tau1[3] tau_Y_est <- tau2[3] #:: FLAGTAU (persistence for both time series) options! if (FLAGTAU == 1) { #:: Eq. 7.44 (Mudelsee 2010 & 2014). taub <- tau_X_est + tau_Y_est cat("Hi!, option 1: taub <- tau_X_est + tau_Y_est [Eq. 7.44 (Mudelsee 2010 & 2014)] \n") } if (FLAGTAU == 2) { #:: Eq. 7.45 (Mudelsee 2010 & 2014) taub <- max(tau_X_est, tau_Y_est) cat("Hi!, option 2: taub <- max(tau_X_est, tau_Y_est) [Eq. 7.45 (Mudelsee 2010 & 2014)] \n") } if (FLAGTAU == 3) { #:: Eq. 7.47 (Mudelsee 2010 & 2014) a_XY_est <- sqrt(a_X_est*a_Y_est) #:: Eq. 7.48 (Mudelsee 2010 & 2014) taub <- -dist_XY / log( a_XY_est ) cat("Hi!, option 3: taub <- -dist_XY / log(a_XY_est) [Eq. 7.47 & 7.48 (Mudelsee 2010 & 2014)] \n") } #:: Inspired from the M. Mudelsee's code "mc-brxy.f90") taub <- min(taub, (Tmax_m - Tmin_m)*0.5) taub <- max(taub, (Tmax_m - Tmin_m)/(Nx - 1)) #:: Computing the number of "bins" remi <- (Tmax_m - Tmin_m) / taub Nb <- round(remi) cat("Testing the number of bins: taub=", taub," Nb=", Nb,"\n") id1 <- rep(9999, Nb) id2 <- rep(9999, Nb) mean.ts1 <- rep(9999, Nb) mean.ts2 <- rep(9999, Nb) tau.t.mean <- rep(9999, Nb) limI <- Tmin_m #:: Here, the binned time series are created! for (N in 1:Nb) { limS <- Tmin_m + N*taub if (N == 1) { id1t <- which(ts1[,1] >= limI & ts1[,1] <= limS) id2t <- which(ts2[,1] >= limI & ts2[,1] <= limS) } if (N > 1) { id1t <- which(ts1[,1] > limI & ts1[,1] <= limS) id2t <- which(ts2[,1] > limI & ts2[,1] <= limS) } id1[N] <- length(id1t) id2[N] <- length(id2t) #:: Evaluating IF a "bin" contains BOTH more than zero #:: X (ts1) & Y (ts2) points (pp. 312, Mudelsee 2010) if (id1[N] & id2[N] > 0) { mean.ts1[N] <- mean(ts1[id1t,2]) mean.ts2[N] <- mean(ts2[id2t,2]) tau.t.mean[N] <- mean(c(limI, limS)) #or (limI + limS)/2 } else { #:: This's a simple way to face this "problem", but you need to remove #:: the NA's to estimate the correlation btw "bin ts1" and "bin ts2". mean.ts1[N] <- NA mean.ts2[N] <- NA tau.t.mean[N] <- mean(c(limI, limS)) #(limI + limS)/2 } limI <- limS } Datin <- cbind(tau.t.mean, mean.ts1, mean.ts2) # mean.ts1 and mean.ts2 are the binned time series #:: Computing some basic statistics id.noNA <- which(tau.t.mean != "NA") avg.bin <- round(mean(diff(tau.t.mean[id.noNA])), 2) #avg.bin <- mean(diff(na.omit(tau.t.mean))) VAR.ts1 <- round(cbind(var(ts1[,2]), var(na.omit(mean.ts1))), 2) VAR.ts2 <- round(cbind(var(ts2[,2]), var(na.omit(mean.ts2))), 2) chg.VARts1 <- round(VAR.ts1[1] - VAR.ts1[2], 2) chg.VARts2 <- round(VAR.ts2[1] - VAR.ts2[2], 2) per_chg.VARts1 <- round((chg.VARts1 / VAR.ts1[1])*100, 2) per_chg.VARts2 <- round((chg.VARts2 / VAR.ts2[1])*100, 2) write.table(Datin, file=ofilename, col.names=F, row.names=F) names.ls <- c("Binned_time_series", "Auto._cor._coef._ts1", "Persistence_ts1", "Auto._cor._coef._ts2", "Persistence_ts2", "bin width", "Number_of_bins", "Average spacing", "VAR. ts1", "VAR. bin ts1", "VAR. ts2", "VAR. bin ts2", "VAR. ts1 - VAR bints1", "VAR. ts2 - VAR bints2", "% of VAR. lost ts1", "% of VAR. lost ts2") LIST <- list(Datin, a_X_est, tau_X_est, a_Y_est, tau_Y_est, taub, Nb, avg.bin, VAR.ts1[1], VAR.ts1[2], VAR.ts2[1], VAR.ts2[2], chg.VARts1, chg.VARts2, per_chg.VARts1, per_chg.VARts2) names(LIST) <- names.ls return(LIST) }
/scratch/gouwar.j/cran-all/cranData/BINCOR/R/bin_cor_function.R
###################################################################### #:: ccf_ts function - R package BINCOR # #:: Programmed by Josué M. Polanco-Martinez a.k.a jomopo # #:: Email: [email protected] # ###################################################################### # Copyright (C) 2017 by Josué M. Polanco-Martínez # # This file/code is part of the R package BINCOR # ###################################################################### # # BINCOR is free software: you can redistribute it and/or modify it # it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # BINCOR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BINCOR If not, see <http://www.gnu.org/licenses/>. # ##################################################################### ccf_ts <- function(bints1, bints2, lagmax=NULL, ylima=-1, ylimb=1, rmltrd="N", RedL=T, device="screen", Hfig, Wfig, Hpdf, Wpdf, resfig, ofilename) { #:: Checking the input data if( dim(bints1)[2] !=2 | dim(bints2)[2] != 2) stop ("There is a problem with the input data. The input data should be a couple of vectors of dimension N x 2 (rows x columns). Please, use in the R's command line: dim(ts'x')[2] to verify the number of columns. Thank you for using our BINCOR package.") if( dim(bints1)[1] != dim(bints2)[1] ) stop ("The binned time series under analysis do not have the same number of elements. Thank you for using our BINCOR package.") #:: Devices options: png, jpg & pdf! if (device=="png") { fileout <- paste("ccf_", ofilename, ".png", sep="") png(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="jpeg" || device=="jpg") { fileout <- paste("ccf_", ofilename, ".jpg", sep="") jpeg(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="pdf") { fileout <- paste("ccf_", ofilename, ".pdf", sep="") pdf(fileout, height=Hpdf, width=Wpdf) } if (rmltrd == "N" || rmltrd == "n") ccf.12 <- ccf(bints1[,2], bints2[,2], main="", ylab="Corr. coef.", lag.max=lagmax, ylim=c(ylima,ylimb), las=1) if (rmltrd == "Y" || rmltrd == "y") #:: The linear trend is removed -the R pack. "pracma" is required! ccf.12 <- ccf(c(detrend(bints1[,2])), c(detrend(bints2[,2])), main="", ylab="Corr. coef.", lag.max=lagmax, ylim=c(ylima,ylimb), las=1) if (RedL == TRUE | RedL == T) { idmidp <- which(ccf.12$lag == 0) abline(h=ccf.12$acf[idmidp], col="red") } if (device != "screen") dev.off() return(ccf.12) }
/scratch/gouwar.j/cran-all/cranData/BINCOR/R/ccf_ts.R
###################################################################### #:: cor_ts function - R package BINCOR # #:: Programmed by Josué M. Polanco-Martinez a.k.a jomopo # #:: Email: [email protected] # ###################################################################### # Copyright (C) 2017 by Josué M. Polanco-Martínez # # This file/code is part of the R package BINCOR # ###################################################################### # # BINCOR is free software: you can redistribute it and/or modify it # it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # BINCOR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BINCOR If not, see <http://www.gnu.org/licenses/>. # ##################################################################### cor_ts <- function(bints1, bints2, varnamets1="NULL", varnamets2="NULL", KoCM, rmltrd="N", device="screen", Hfig, Wfig, Hpdf, Wpdf, resfig, ofilename) { #:: Checking the input data if( dim(bints1)[1] != dim(bints2)[1] ) stop ("The binned time series under analysis do not have the same number of elements. Thank you for using our BINCOR package.") if( dim(bints1)[2] !=2 | dim(bints2)[2] != 2) stop ("There is a problem with the input data. The input data should be a couple of vectors of dimension N x 2 (rows x columns). Please, use in the R's command line: dim(ts'x')[2] to verify the number of columns. Thank you for using our BINCOR package.") if (rmltrd == "N" || rmltrd == "n") cor.ts <- cor.test(bints1[,2], bints2[,2], method=KoCM) if (rmltrd == "Y" || rmltrd == "y") #:: The linear trend is removed -the R pack. "pracma" is required! cor.ts <- cor.test(c(detrend(bints1[,2])), c(detrend(bints2[,2])), method=KoCM) TOPRINT <- paste("The binned ", KoCM, "'s correlation coefficient is ", sep="") if (KoCM=="pearson") cat(paste(TOPRINT, round(cor.ts$estimate,4), " [", round(cor.ts$conf.int[1], 4), "; ", round(cor.ts$conf.int[2],4), "]", sep=""), "\n") if (KoCM=="spearman" || KoCM=="kendall") cat(paste(TOPRINT, round(cor.ts$estimate,4), "," ," p-value = ", round(cor.ts$p.value, 15), sep=""), "\n") if (device=="png") { fileout <- paste("scatterplot_", ofilename, ".png", sep="") png(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="jpeg" || device=="jpg") { fileout <- paste("scaterplot_", ofilename, ".jpg", sep="") jpeg(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="pdf") { fileout <- paste("scaterplot_", ofilename, ".pdf", sep="") pdf(fileout, height=Hpdf, width=Wpdf) } plot(bints1[,2], bints2[,2], t="p", xlab=varnamets1, ylab=varnamets2, las=1, pch=16) if (device != "screen") dev.off() return(cor.ts) }
/scratch/gouwar.j/cran-all/cranData/BINCOR/R/cor_ts.R
###################################################################### #:: plot_ts - R package BINCOR # #:: Programmed by Josué M. Polanco-Martinez a.k.a jomopo # #:: Email: [email protected] # ###################################################################### # Copyright (C) 2017 by Josué M. Polanco-Martínez # # This file/code is part of the R package BINCOR # ###################################################################### # # BINCOR is free software: you can redistribute it and/or modify it # it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, # or (at your option) any later version. # # BINCOR is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with BINCOR If not, see <http://www.gnu.org/licenses/>. # ##################################################################### plot_ts <- function(ts1, ts2, bints1, bints2, varnamets1="", varnamets2="", colts1=1, colts2=1, colbints1=2, colbints2=2, ltyts1=1, ltyts2=1, ltybints1=2, ltybints2=2, device="screen", Hfig, Wfig, Hpdf, Wpdf, resfig, ofilename) { #:: Checking the input data if( dim(bints1)[1] != dim(bints2)[1] ) stop ("The binned time series under analysis do not have the same number of elements. Thank you for using our BINCOR package.") if( dim(ts1)[2] !=2 | dim(ts2)[2] != 2) stop ("There is a problem with the input data. The input data should be a couple of vectors of dimension N x 2 (rows x columns). Please, use in the R's command line: dim(ts'x')[2] to verify the number of columns. Thank you for using our BINCOR package.") #:: Getting the max./min. values for times range.timesORts1 <- range(ts1[,1]) range.timesORts2 <- range(ts2[,1]) #:: Devices options: png, jpg & pdf if (device=="png") { fileout <- paste("plot_", ofilename, "%02d.png", sep="") png(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="jpeg" || device=="jpg") { fileout <- paste("plot_", ofilename, "%02d.jpg", sep="") jpeg(fileout, height=Hfig, width=Wfig, res=resfig) } if (device=="pdf") { fileout <- paste("plot_", ofilename, ".pdf", sep="") pdf(fileout, height=Hpdf, width=Wpdf) } par(mar=c(5, 5, 3, 5) + 0.1) #if (device=="screen") dev.new() ###### plot(ts1, t="b", col=colts1, xlab="Time", ylab="", lty=ltyts1, main="", xlim=range.timesORts1, las=1, cex=0.75) #par(new=T) points(bints1, t="b", col=colbints1, xaxt="n", yaxt="n", xlab="", ylab="", lty=ltybints1, cex=0.75) mtext(2, text=varnamets1, line=3.5, cex=0.95) legend("topleft", bty="n", legend=c(paste(varnamets1, " (primary).", " N = ", length(ts1[,1]), " elements", sep=""), paste(varnamets1, " (binned).", " N = ", length(bints1[,1]), " elements", sep="")), lty=c(ltyts1, ltybints1), col=c(colts1,colbints1)) axis(3, ts1[,1], labels=F, col=colts1) axis(3, bints1[,1], labels=F, line=1, col=colbints1) if (device=="screen") dev.new() par(mar=c(5, 5, 3, 5) + 0.1) ##### plot(ts2, t="b", col=colts2, xlab="Time", ylab="", lty=ltyts2, main="", xlim=range.timesORts2, las=1, cex=0.75) #par(new=T) points(bints2, t="b", col=colbints2, xaxt="n", yaxt="n", xlab="", ylab="", lty=ltybints2, cex=0.75) mtext(2, text=varnamets2, line=3.5, cex=0.95) legend("topleft", bty="n", legend=c(paste(varnamets2, " (primary).", " N = ", length(ts2[,1]), " elements", sep=""), paste(varnamets2, " (binned).", " N = ", length(bints2[,1]), " elements", sep="")), lty=c(ltyts2, ltybints2), col=c(colts2,colbints2)) axis(3, ts2[,1], labels=F, col=colts2) axis(3, bints2[,1], labels=F, line=1, col=colbints2) if (device=="screen") dev.new() par(mar=c(5, 5, 3, 5) + 0.1) ##### plot(ts1, t="b", col=colts1, xlab="Time", ylab="", yaxt="n", lty=ltyts1, main="", xlim=range.timesORts1, cex=0.75) axis(2, pretty(ts1[,2]), lwd=2, las=2, col=colts1) mtext(2, text=varnamets1, line=3.5, cex=0.95) par(new=T) plot(ts2, t="b", col=colts2, xaxt="n", yaxt="n", xlab="", ylab="", lty=ltyts2, main="", xlim=range.timesORts1, cex=0.75) #lty=ltyts2, main="", xlim=range.timesORts2, cex=0.75) axis(4, pretty(ts2[,2]), lwd=2, las=2, col=colts2) mtext(4, text=varnamets2, line=3.5, cex=0.95) legend("topleft", bty="n", legend=c(paste(varnamets1, " (primary). ", " N = ", length(ts1[,1]), " elements", sep=""), paste(varnamets2, " (primary). ", " N = ", length(ts2[,1]), " elements", sep="")), lty=c(ltyts1, ltyts2), col=c(colts1, colts2)) axis(3, ts1[,1], labels=F, col=colts1) axis(3, ts2[,1], labels=F, line=1, col=colts2) if (device=="screen") dev.new() par(mar=c(5, 5, 3, 5) + 0.1) ##### plot(bints1, t="b", col=colbints1, xlab="Time", ylab="", yaxt="n", lty=ltybints2, cex=0.75) axis(2, pretty(bints1[,2]), lwd=2, las=2, col=colbints1) mtext(2, text=varnamets1, line=3.5, cex=0.95) par(new=T) plot(bints2, t="b", col=colbints2, xaxt="n", yaxt="n", xlab="", ylab="", lty=ltybints2, xlim=range(bints1[,1]), cex=0.75) axis(4, pretty(bints2[,2]), lwd=2, las=2, col=colbints2) mtext(4, text=varnamets2, line=3.5, cex=0.95) legend("topleft", bty="n", legend=c(paste(varnamets1, " (binned). ", " N = ", length(bints1[,1]), " elements", sep=""), paste(varnamets2, " (binned). ", " N = ", length(bints2[,1]), " elements", sep="")), lty=c(ltybints1, ltybints2), col=c(colbints1, colbints2)) axis(3, bints1[,1], labels=F, col=colbints1) axis(3, bints2[,1], labels=F, line=1, col=colbints2) if (device != "screen") dev.off() return() }
/scratch/gouwar.j/cran-all/cranData/BINCOR/R/plot_ts.R
##################################################################### #:: This piece of code comes from the R package dplR (redfit function #:: based on Schulz & Mudelsee 2002) programmed by Mikko Korpela. #:: Very minor modifications was done by J. M. Polanco-Martínez #:: ([email protected]) in order to used the dplR redfit #:: function from the R package BINCOR. #:: 10/2016, Bordeaux, FR ##################################################################### ##################################################################### ### This part of dplR was (arguably non-trivially) translated and ### adapted from public domain Fortran program REDFIT, version 3.8e ### (Michael Schulz and Manfred Mudelsee). The possibly non-free parts ### of REDFIT derived from Numerical Recipes were not used. ### http://www.geo.uni-bremen.de/geomod/staff/mschulz/ ### Author of the dplR version is Mikko Korpela. ### ### Copyright (C) 2013-2015 Aalto University ### ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ##################################################################### ## dplR: R version based on Mudelsee's code. ## dplR: Introduction copied from REDFIT (some variables removed). ## ## Manfred Mudelsee's code for tau estimation ## ---------------------------------------------------------------------- ## TAUEST: Routine for persistence estimation for unevenly spaced time series ## ---------------------------------------------------------------------- ## Main variables ## ## t : time ## x : time series value ## np : number of points ## dt : average spacing ## scalt : scaling factor (time) ## rho : in the case of equidistance, rho = autocorr. coeff. ## mult : flag (multiple solution) ## amin : estimated value of a = exp(-scalt/tau) redfitTauest <- function(t, x) { np <- length(t) ## Correct time direction; assume that ages are input ## dplR: Correction of time direction is done by modifying this ## function and redfitMinls, not by explicitly reversing (and ## multiplying by one) ## tscal <- -rev(t)tauest_dplR.R ## xscal <- rev(x) ## Scaling of x xscal <- x / sd(x) ## Scaling of t (=> start value of a = 1/e) dt <- (t[np] - t[1]) / (np - 1) ## dplR: rhoest() of REDFIT is now an "inline function" of two ## lines + comment line: ## Autocorrelation coefficient estimation (equidistant data) xscalMNP <- xscal[-np] rho <- sum(xscalMNP * xscal[-1]) / sum(xscalMNP * xscalMNP) if (rho <= 0) { rho <- 0.05 warning("rho estimation: <= 0") # comentar tauest_dplR.R } else if (rho > 1) { rho <- 0.95 warning("rho estimation: > 1") # comentar } scalt <- -log(rho) / dt tscal <- t * scalt ## Estimation minRes <- redfitMinls(tscal, xscal) amin <- minRes[["amin"]] mult <- minRes[["nmu"]] #warnings <- FALSE ################################################################## #:: Piece of code*** added by Josué M. Polanco-Martínez: START ################################################################## #:: We use a piece of code*** (translated from Fortran to R) from #:: mc-brxy.f90 ((C) M. Mudelsee) in order to avoid some problems #:: in the estimation of the persistence. Please look at the #:: subroutine tauest in mc-brxy.f90. ################################################################## rhoavgmax=0.99 rhoavgmin=0.01 zero= 0.0 # determines also rho = rhoavg if (amin >= rhoavgmax) { rhoavg=rhoavgmax tau = -dt / log(rhoavgmax) } else if (amin >= rhoavgmin & amin < rhoavgmax) { tau = -1.0 /(scalt*log(amin)) rhoavg = exp(-dt / tau) # Bias correction (unknown mean) rhoavg = (rhoavg * (np - 1.0) + 1.0) / (np - 4.0) rhoavg = min(rhoavgmax,rhoavg) rhoavg = max(rhoavgmin,rhoavg) tau=-dt/log(rhoavg) } else if (amin < rhoavgmin) { rhoavg=rhoavgmin tau = zero } rhoout=rhoavg salida <- c(rho, rhoout, tau) return(list(salida)) ################################################################## #:: Piece of code added by Josué M. Polanco-Martínez: END ################################################################## } ################################################################## if(0){ # original code by Mikko Korpela, please note that we are # not using. Josué M. Polanco-Martínez warnings <- FALSE if (mult) { warning("estimation problem: LS function has > 1 minima") warnings <- TRUE } if (amin <= 0) { warning("estimation problem: a_min =< 0") warnings <- TRUE } else if (amin >= 1) { warning("estimation problem: a_min >= 1") warnings <- TRUE } if (!warnings) { ## determine tau tau <- -1 / (scalt * log(amin)) ## By jomopo, 10/2016 cat("tau", tau, "\n") ## By jomopo, 10/2016 ## determine rho, corresponding to tau exp(-dt / tau) } else { ## dplR: fail early stop("error in tau estimation") } } ################################################################## ## dplR: Minimization of the built-in least-squares function lsfun redfitMinls <- function(t, x) { ## Least-squares function lsfun <- function(a, difft, xM1, xMNP) { if (a > 0) { tmp <- xMNP - xM1 * a^difft } else if (a < 0) { tmp <- xMNP + xM1 * (-a)^difft } else { tmp <- xMNP } sum(tmp * tmp) } a_ar1 <- exp(-1) # 1 / e tol <- 3e-8 # Brent's search, precision tol2 <- 1e-6 # multiple solutions, precision difft <- diff(t) np <- length(x) xM1 <- x[-1] xMNP <- x[-np] opt1 <- optimize(lsfun, c(-2, 2), tol = tol, difft = difft, xM1 = xM1, xMNP = xMNP) opt2 <- optimize(lsfun, c(a_ar1, 2), tol = tol, difft = difft, xM1 = xM1, xMNP = xMNP) opt3 <- optimize(lsfun, c(-2, a_ar1), tol = tol, difft = difft, xM1 = xM1, xMNP = xMNP) a_ar11 <- opt1[["minimum"]] a_ar12 <- opt2[["minimum"]] a_ar13 <- opt3[["minimum"]] dum1 <- opt1[["objective"]] dum2 <- opt2[["objective"]] dum3 <- opt3[["objective"]] list(amin = c(a_ar11, a_ar12, a_ar13)[which.min(c(dum1, dum2, dum3))], nmu = ((abs(a_ar12 - a_ar11) > tol2 && abs(a_ar12 - a_ar1) > tol2) || (abs(a_ar13 - a_ar11) > tol2 && abs(a_ar13 - a_ar1) > tol2))) }
/scratch/gouwar.j/cran-all/cranData/BINCOR/R/tauest_dplR.R
#' The 'BINtools' package. #' #' @description A DESCRIPTION OF THE PACKAGE #' #' @docType package #' @name BINtools-package #' @aliases BINtools #' @useDynLib BINtools, .registration = TRUE #' @import methods #' @import Rcpp #' @import rstantools #' @importFrom rstan sampling #' @importFrom stats pnorm #' @importFrom stats qnorm #' @importFrom stats rnorm #' @importFrom dplyr bind_rows #' @importFrom dplyr %>% #' #' #' @references #' Stan Development Team (2020). RStan: the R interface to Stan. R package version 2.21.2. https://mc-stan.org #' NULL
/scratch/gouwar.j/cran-all/cranData/BINtools/R/BINtools-package.R
compute_contributions <- function(mu_0, par_control, par_treatment) { N_SIM = 10^6 z_0 = rnorm(N_SIM, mu_0, 1) eps = rnorm(N_SIM) compute_mbs_analytically <- function(mu_0, parameters) { mu_1 = parameters$bias + mu_0 gamma = parameters$information delta = parameters$information + parameters$noise var_o = pnorm(mu_0) msg = mu_1/sqrt(1-gamma) dg = delta/(1 - gamma) gg = gamma/sqrt(1 - gamma) Sigma_f = matrix(c(dg + 1, dg, dg, dg + 1), ncol = 2) var_f = mvtnorm::pmvnorm(upper=c(msg, msg), mean = c(0,0), sigma = Sigma_f) Sigma_of = matrix(c(1, gg, gg, dg + 1), ncol = 2) cov_of = mvtnorm::pmvnorm(upper=c(mu_0, msg), mean = c(0,0), sigma = Sigma_of) as.numeric(var_o + var_f - 2*cov_of) } compute_mbs_numerically <- function(mu_0, parameters) { mu_1 = parameters$bias + mu_0 gamma = parameters$information delta = parameters$information + parameters$noise outcomes = as.integer(z_0 > 0) z_1 = mu_1 + gamma * (z_0 - mu_0) + sqrt(delta - gamma^2) * eps probs = pnorm(z_1/sqrt(1 - gamma)) mean((probs - outcomes)^2) } compute_mbs = compute_mbs_analytically process_sequence <- function(par_sequence) { prev = par_control succ = par_control result = list() for (par_name in par_sequence) { succ[[par_name]] = par_treatment[[par_name]] contribution = compute_mbs(mu_0, prev) - compute_mbs(mu_0, succ) result[[par_name]] = contribution prev = succ } result } mbs_treatment = compute_mbs(mu_0, par_treatment) mbs_control = compute_mbs(mu_0, par_control) total = mbs_control - mbs_treatment contribution_perms = lapply(combinat::permn(c("bias", "information", "noise")), process_sequence) %>% bind_rows() contributions = contribution_perms %>% dplyr::summarise_all(mean) result = list( mean_brier_score_1 = mbs_treatment, mean_brier_score_0 = mbs_control, contribution_bias = contributions$bias, contribution_noise = contributions$noise, contribution_information = contributions$information ) } #' Summary #' #' This function uses the return value of a call to the function \code{estimate_BIN} and produces a full BIN analysis based on that object. #' #' @param full_bayesian_fit The return value of a call to function \code{estimate_BIN}. #' #' @return List containing the parameter estimates of the model, the posterior inferences, and the analysis #' of predictive performance. #' #' The elements of the list are as follows. #' \itemize{ #' \item Parameter Estimates: Posterior means, standard deviations, and different quantiles of the model parameters and their differences. #' The parameter values represent the following quantities. #' \itemize{ #' \item mu_star: Base rate of the event outcome in the probit scale. E.g., if mu_star = 0, then, in expectation, Phi(0)*100% = 50% of the events happen, where Phi(.) is the CDF of a standard Gaussian random variable. #' \item mu_0: The level of bias in the control group. This can be any number in the real-line. E.g., if mu_0 = 0.1, then the control group believes the base rate to be Phi(mu_star + 0.1). #' \item mu_1: The level of bias in the treatment group; otherwise the interpretation is the same as above for mu_0. #' \item gamma_0: The level of information in the control group. This is a number between 0 and 1, where 0 represents no information and 1 represents full information. #' \item gamma_1: The level of information in the treatment group; otherwise the interpretation is the same as above for gamma_0. #' \item delta_0: The level of noise in the control group. This is a positive value, with higher values indicating higher levels of noise. Noise is in the same scale as information. E.g., delta_0 = 1.0 says that the control group uses as many irrelevant signals as there are relevant signals in the universe. In this sense it represents a very high level of noise. #' \item delta_1: The level of noise in the treatment group; otherwise the interpretation is the same as above for delta_0. #' \item rho_0: The level of within-group dependence between forecasts of the control group. This is a positive value, with higher values indicating higher levels of dependence. The dependence can be interpreted to stem from shared irrelevant (noise) and/or relevant (information) signals. #' \item rho_1: The level of within-group dependence between forecasts of the treatment group; otherwise the interpretation is the same as above for rho_0. #' \item rho_01: The level of inter-group dependence between forecasts of the control and treatment groups; otherwise the interpretation is the same as above for rho_0. #' } #' \item Posterior Inferences: Posterior probabilities of events. #' Compared to the control group, does the treatment group have: (i) less bias, (ii) more information, and (iii) less noise? #' Intuitively, one can think of these probabilities as the Bayesian analogs of the p-values in classical hypothesis testing – the closer the probability is to 1, the stronger the evidence for the hypothesis. #' #' \item Control,Treatment: This compares the control group against the treatment group. #' The value of the contribution gives #' \itemize{ #' \item the mean Brier score of the control group; #' \item the mean Brier score of the treatment group; #' \item how the difference can be explained in terms of bias, noise, or information; and #' \item in percentage terms, how does the change in bias, noise, or information (from control to treatment group) changes the Brier score. #' } #' \item Control,Perfect Accuracy: This compares the control group against a treatment group with perfect accuracy; #' otherwise the interpretation is the same as above for 'Control,Treatment.' #' } #' #'@examples #' \donttest{ #' ## An example with one group #' # a) Simulate synthetic data: #' synthetic_data = simulate_data(list(mu_star = -0.8,mu_0 = -0.5,mu_1 = 0.2,gamma_0 = 0.1, #' gamma_1 = 0.3,rho_0 = 0.05,delta_0 = 0.1,rho_1 = 0.2, delta_1 = 0.3,rho_01 = 0.05),300,100,0) #' # b) Estimate the BIN-model on the synthetic data: #' full_bayesian_fit = estimate_BIN(synthetic_data$Outcomes,synthetic_data$Control, warmup = 500, #' iter = 1000) #' # c) Analyze the results: #' complete_summary(full_bayesian_fit) #'} #' \donttest{ #' ## An example with two groups #' # a) Simulate synthetic data: #' synthetic_data = simulate_data(list(mu_star = -0.8,mu_0 = -0.5,mu_1 = 0.2,gamma_0 = 0.1, #' gamma_1 = 0.3, rho_0 = 0.05,delta_0 = 0.1, rho_1 = 0.2, delta_1 = 0.3,rho_01 = 0.05), 300,100,100) #' # b) Estimate the BIN-model on the synthetic data: #' full_bayesian_fit = estimate_BIN(synthetic_data$Outcomes,synthetic_data$Control, #' synthetic_data$Treatment, warmup = 500, iter = 1000) #' # c) Analyze the results: #' complete_summary(full_bayesian_fit) #'} #' #' @seealso \code{\link{simulate_data}}, \code{\link{estimate_BIN}} #' #' @export complete_summary<-function(full_bayesian_fit){ #pacman::p_load("dplyr") parameter_name<-NULL #List containing the summary of model k current_summary=list() if(dim(full_bayesian_fit)[3]<20){ #Result Summary result_summary = rstan::summary(full_bayesian_fit)$summary %>% as.data.frame() %>% tibble::rownames_to_column("parameter_name") %>% dplyr::filter(parameter_name %in% c("mu_star", "mu_0", "gamma_0", "delta_0","rho_0")) %>% dplyr::mutate_if(is.numeric,function(x) round(x, 2)) %>% dplyr::select(-"n_eff",-"Rhat", -"se_mean") current_summary[["Parameter Estimates"]]<-result_summary posterior_samples <- rstan::extract(full_bayesian_fit) sample_values = lapply(posterior_samples, mean) par_control <- list(bias = sample_values$mu_0, information = sample_values$gamma_0, noise = sample_values$delta_0) par_perfect <- list(bias = 0, information = 1 - 1e-04, noise = 0) perfect_mbs_decomp <- compute_contributions(sample_values$mu_star, par_control, par_perfect) p_a_percentage_contributions <- list(perfect_accuracy_percentage_contribution_bias = perfect_mbs_decomp$contribution_bias * 100/perfect_mbs_decomp$mean_brier_score_0, perfect_accuracy_percentage_contribution_noise = perfect_mbs_decomp$contribution_noise * 100/perfect_mbs_decomp$mean_brier_score_0, perfect_accuracy_percentage_contribution_information = perfect_mbs_decomp$contribution_information * 100/perfect_mbs_decomp$mean_brier_score_0) contribution_perfect_accuracy = list() contribution_perfect_accuracy[["Value of the contribution"]] <- perfect_mbs_decomp contribution_perfect_accuracy[["Percentage of control group Brier score"]] <- p_a_percentage_contributions current_summary[["Control, Perfect Accuracy"]] <- contribution_perfect_accuracy } else{ #Result Summary result_summary = rstan::summary(full_bayesian_fit)$summary %>% as.data.frame() %>% tibble::rownames_to_column("parameter_name") %>% dplyr::filter(parameter_name %in% c("mu_star", "mu_0", "mu_1", "diff_bias", "gamma_0", "gamma_1", "diff_info", "delta_0", "delta_1", "diff_noise", "rho_0", "rho_1", "rho_01")) %>% dplyr::mutate_if(is.numeric, function(x) round(x, 2)) %>% dplyr::select(-"n_eff",-"Rhat", -"se_mean") current_summary[["Parameter Estimates"]]<-result_summary posterior_samples <- rstan::extract(full_bayesian_fit) ### Posterior inferences: Posterior_inferences<-c("More information in treatment group","Less noise in treatment group","Less bias in treatment group") Posterior_Probability<-c(mean(posterior_samples$gamma_0 < posterior_samples$gamma_1),mean(posterior_samples$delta_1 < posterior_samples$delta_0),mean(abs(posterior_samples$bias_1) < abs(posterior_samples$bias_0))) Posterior_Inferences<-data.frame(Posterior_inferences,Posterior_Probability) current_summary[["Posterior Inferences"]]<-Posterior_Inferences ### Predictive Performance & Contributions sample_values = lapply(posterior_samples, mean) par_control <- list( bias = sample_values$mu_0, information = sample_values$gamma_0, noise = sample_values$delta_0 ) par_treatment <- list( bias = sample_values$mu_1, information = sample_values$gamma_1, noise = sample_values$delta_1 ) par_perfect <- list( bias = 0, information = 1 - 1e-4, noise = 0 ) # Decomposition that compares two groups exp_mbs_decomp <- compute_contributions(sample_values$mu_star, par_control, par_treatment) # contribution_bias: contribution of bias to expected Brier score # contribution_information, contribution_noise: same for information and noise # mean_brier_score_0, mean_brier_score_1: expected Brier score for each of the two groups, given corresponding mu, gamma, delta #Percentages t_percentage_contributions<-list( treatment_percentage_contribution_bias=exp_mbs_decomp$contribution_bias*100/exp_mbs_decomp$mean_brier_score_0, treatment_percentage_contribution_noise=exp_mbs_decomp$contribution_noise*100/exp_mbs_decomp$mean_brier_score_0, treatment_percentage_contribution_information=exp_mbs_decomp$contribution_information*100/exp_mbs_decomp$mean_brier_score_0 ) contribution_treatment=list() contribution_treatment[["Value of the contribution"]]<-exp_mbs_decomp contribution_treatment[["Percentage of control group Brier score"]]<-t_percentage_contributions current_summary[["Control,Treatment"]]<-contribution_treatment # Decomposition of the maximum achievable improvement perfect_mbs_decomp <- compute_contributions(sample_values$mu_star, par_control, par_perfect) #Percentages p_a_percentage_contributions<-list( perfect_accuracy_percentage_contribution_bias=perfect_mbs_decomp$contribution_bias*100/perfect_mbs_decomp$mean_brier_score_0, perfect_accuracy_percentage_contribution_noise=perfect_mbs_decomp$contribution_noise*100/perfect_mbs_decomp$mean_brier_score_0, perfect_accuracy_percentage_contribution_information=perfect_mbs_decomp$contribution_information*100/perfect_mbs_decomp$mean_brier_score_0 ) contribution_perfect_accuracy=list() contribution_perfect_accuracy[["Value of the contribution"]]<-perfect_mbs_decomp contribution_perfect_accuracy[["Percentage of control group Brier score"]]<-p_a_percentage_contributions current_summary[["Control, Perfect Accuracy"]]<-contribution_perfect_accuracy } return(current_summary) }
/scratch/gouwar.j/cran-all/cranData/BINtools/R/complete_summary.R
compute_sufficient_statistics <- function(event) { outcome <- event$outcome x <- event$control_probits y <- event$treatment_probits N_0 <- length(x) N_1 <- length(y) M_0 <- sum(x) M_1 <- sum(y) V_0 <- sum(x^2) V_1 <- sum(y^2) C_0 <- sum(outer(x, x, '*')) - V_0 C_1 <- sum(outer(y, y, '*')) - V_1 C_01 <- sum(outer(x, y, '*')) list(outcome = outcome, N_0 = N_0, N_1 = N_1, M_0 = M_0, M_1 = M_1, V_0 = V_0, V_1 = V_1, C_0 = C_0, C_1 = C_1, C_01 = C_01) } summariser <- function(dataset) { suffstat_data <- dplyr::bind_rows(lapply(dataset, compute_sufficient_statistics)) stan_data <- as.list(suffstat_data) stan_data$N <- nrow(suffstat_data) list( data = list( raw = dataset, stan_data = stan_data, detailed = paste("$\\rho_o =", as.character(attr(dataset, "rho_o")), "$"), prefix_reg = "", prefix_super = "" ) ) } data_preparation<-function(Outcomes, Control, Treatment=NULL){ predictions<-c(unlist(Control),unlist(Treatment)) if( any(predictions<= 0 | predictions>= 1) ){stop('All predictions should be strictly between 0 and 1.')} if(!all(Outcomes %in% 0:1)){stop('All outcomes should be binary.')} N=length(Outcomes) dd<-list() for(n in 1:N){ dd[[n]] <- list(outcome = Outcomes[[n]], control_probits = qnorm(Control[[n]]), treatment_probits = qnorm(as.numeric(Treatment[[n]])) ) } data = lapply(list(dd), summariser) return(data) }
/scratch/gouwar.j/cran-all/cranData/BINtools/R/data_preparation.R
#' Estimate a BIN (Bias, Information, Noise) model #' #' This function allows the user to compare two groups (treatment and control) of forecasters in terms of their bias, information, and noise levels. #' Model estimation is performed with a Markov Chain Monte Carlo (MCMC) approach called Hamiltonian Monte Carlo. #' #' @param Outcomes Vector of binary values indicating the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. #' @param Control List of vectors containing the predictions made for each event by forecasters in the control group. The j-th vector contains predictions for the j-th event. #' @param Treatment (Default:\code{NULL}) List of vectors containing the predictions made for each event by forecasters in the treatment group. The j-th vector contains predictions for the j-th event. #' @param initial A list containing the initial values for the parameters mu_star,mu_0,mu_1,gamma_0,gamma_1,delta_0,rho_0,delta_1,rho_1,and rho_01. #' (Default: \code{list(mu_star = 0,mu_0 = 0,mu_1 = 0,gamma_0 = 0.4,gamma_1 = 0.4, #' delta_0 = 0.5,rho_0 = 0.27, delta_1 = 0.5,rho_1 = 0.27,rho_01 = 0.1)}) #' @param warmup The number of initial iterations used for ``burnin.'' #' These values are not included in the analysis of the model. (Default:\code{2000}) #' @param iter Total number of iterations. #' Must be larger than warmup. (Default:\code{4000}) #' @param seed (Default: \code{1}) #' #' @return Model estimation is performed with the statistical programming language called \href{https://mc-stan.org/}{Stan}. #' The return object is a Stan model. #' This way the user can apply available diagnostics tools in other packages, such as [rstan](https://mc-stan.org/rstan/), to analyze the final results. #' #'@examples #' \donttest{ #' ## An example with one group #' # a) Simulate synthetic data: #' synthetic_data = simulate_data(list(mu_star = -0.8,mu_0 = -0.5,mu_1 = 0.2,gamma_0 = 0.1, #' gamma_1 = 0.3,rho_0 = 0.05,delta_0 = 0.1,rho_1 = 0.2, delta_1 = 0.3,rho_01 = 0.05),300,100,0) #' # b) Estimate the BIN-model on the synthetic data: #' full_bayesian_fit = estimate_BIN(synthetic_data$Outcomes,synthetic_data$Control, warmup = 500, #' iter = 1000) #' # c) Analyze the results: #' complete_summary(full_bayesian_fit) #'} #' \donttest{ #' ## An example with two groups #' # a) Simulate synthetic data: #' synthetic_data = simulate_data(list(mu_star = -0.8,mu_0 = -0.5,mu_1 = 0.2,gamma_0 = 0.1, #' gamma_1 = 0.3, rho_0 = 0.05,delta_0 = 0.1, rho_1 = 0.2, delta_1 = 0.3,rho_01 = 0.05), 300,100,100) #' # b) Estimate the BIN-model on the synthetic data: #' full_bayesian_fit = estimate_BIN(synthetic_data$Outcomes,synthetic_data$Control, #' synthetic_data$Treatment, warmup = 500, iter = 1000) #' # c) Analyze the results: #' complete_summary(full_bayesian_fit) #'} #' #'@seealso \code{\link{simulate_data}}, \code{\link{complete_summary}} #' #' @export estimate_BIN<-function(Outcomes, Control, Treatment=NULL,initial=list( mu_star = 0, mu_0 = 0, mu_1 = 0, gamma_0 = 0.4, gamma_1 = 0.4, delta_0 = 0.5, rho_0 = 0.27, delta_1 = 0.5, rho_1 = 0.27, rho_01 = 0.1 ),warmup = 2000,iter = 4000,seed=1){ full_bayesian_fit=NULL Data=data_preparation(Outcomes, Control, Treatment) stan_data=Data[[1]]$data$stan_data if (stan_data$N_0[1]>1 && stan_data$N_1[1]>1){ full_bayesian_fit<- sampling(stanmodels$case_1_MM, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else if(stan_data$N_0[1]>1 && stan_data$N_1[1]==1){ full_bayesian_fit <- sampling(stanmodels$case_2_M1, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else if(stan_data$N_0[1]==1 && stan_data$N_1[1]>1){ full_bayesian_fit <- sampling(stanmodels$case_2_1M, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else if(stan_data$N_0[1]==1 && stan_data$N_1[1]==1){ full_bayesian_fit <- sampling(stanmodels$case_3_11, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else if(stan_data$N_0[1]>1 && stan_data$N_1[1]==0){ full_bayesian_fit<- sampling(stanmodels$case_4_M0, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else if(stan_data$N_0[1]==1 && stan_data$N_1[1]==0){ full_bayesian_fit <- sampling(stanmodels$case_5_10, stan_data, init = list(initial), chains = 1, iter = iter, warmup=warmup,seed = seed) } else{ stop('The data does not correspond to any of the cases available for analysis.')} return(full_bayesian_fit) }
/scratch/gouwar.j/cran-all/cranData/BINtools/R/estimate_BIN.R
transform_parameters <- function(parameters) { #transform the parameters: we observe only Z/sqrt(1-gamma), not Z #list2env(parameters, environment()) gamma_1_p <- parameters$gamma_1 / sqrt(1 - parameters$gamma_1) gamma_0_p <- parameters$gamma_0 / sqrt(1 - parameters$gamma_0) v_1_p <- (parameters$gamma_1 + parameters$delta_1) / (1 - parameters$gamma_1) v_0_p <- (parameters$gamma_0 + parameters$delta_0) / (1 - parameters$gamma_0) rho_1_p <- parameters$rho_1 / (1 - parameters$gamma_1) rho_0_p <- parameters$rho_0 / (1 - parameters$gamma_0) rho_01_p <- parameters$rho_01 / sqrt((1 - parameters$gamma_0) * (1 - parameters$gamma_1)) mu_1_p <- (parameters$mu_star + parameters$mu_1) / sqrt(1 - parameters$gamma_1) mu_0_p <- (parameters$mu_star + parameters$mu_0) / sqrt(1 - parameters$gamma_0) list( mu_star = parameters$mu_star, mu_0 = mu_0_p, mu_1 = mu_1_p, gamma_0 = gamma_0_p, gamma_1 = gamma_1_p, v_0 = v_0_p, v_1 = v_1_p, rho_0 = rho_0_p, rho_1 = rho_1_p, rho_01 = rho_01_p ) } generate_covariance_matrix <- function(parameters, N_0, N_1){ # generates the block covariance matrix #list2env(parameters, environment()) O <- matrix(1, 1, 1) G_0 <- matrix(parameters$gamma_0, 1, N_0) G_1 <- matrix(parameters$gamma_1, 1, N_1) V_01 <- matrix(parameters$rho_01, N_0, N_1) V_0 <- diag(parameters$v_0 - parameters$rho_0, N_0, N_0) + matrix(parameters$rho_0, N_0, N_0) V_1 <- diag(parameters$v_1 - parameters$rho_1, N_1, N_1) + matrix(parameters$rho_1, N_1, N_1) rbind(cbind(O, G_0, G_1), cbind(t(G_0), V_0, V_01), cbind(t(G_1), t(V_01), V_1)) } generate_mean_vector <- function(parameters, N_0, N_1){ #list2env(parameters, environment()) c(c(parameters$mu_star), rep(parameters$mu_0, N_0), rep(parameters$mu_1, N_1)) } simulate_event <- function(z_0, z_sig, mu, sig, N_0, N_1) { # could be made faster by pre-computing Cholesky # currently doing this way to avoid errors M = N_0 + N_1 + 1 information_vec = sig[1, 2:M] mu_star = mu[1] mu_update = information_vec * (z_0 - mu_star) sig_update = -1*outer(information_vec, information_vec, '*') # conditioning on the outcome forecast_mu = mu[2:M] + mu_update forecast_sig = sig[2:M, 2:M] + sig_update Z = forecast_mu + chol(forecast_sig) %*% z_sig list( outcome = as.integer(z_0 > 0), ifp_id = stringi::stri_rand_strings(1, 6), control_probits = Z[1:N_0], treatment_probits = Z[(N_0+1):(N_0+N_1)] ) } simulate_events <- function(parameters, N, N_0, N_1, rho_o = 0.0) { p <- transform_parameters(parameters) # transform the parameters: we observe only Z/sqrt(1-gamma), not Z mean_vec <- generate_mean_vector(p, N_0, N_1) sigma_mat <- generate_covariance_matrix(p, N_0, N_1) Z_o = rnorm(N) Z_s = replicate(N, rnorm(N_0 + N_1), simplify=FALSE) if (!all(eigen(sigma_mat)$values > 0)) { stop("Covariance matrix is not positive semidefinite.") } RR = length(rho_o) result = vector(mode = "list", length = RR) for (j in 1:RR) { mu_o = mean_vec[1] V_o <- diag(1 - rho_o[[j]], N, N) + matrix(rho_o[[j]], N, N) Z_os <- mu_o + chol(V_o) %*% Z_o events <- vector(mode = "list", length = N) for (i in 1:N) { events[[i]] = simulate_event(Z_os[[i]], Z_s[[i]], mean_vec, sigma_mat, N_0, N_1) } result[[j]] = events attr(result[[j]], "rho_o") = rho_o[[j]] } result } #' Simulate Data #' #' This function allows the user to generate synthetic data of two groups (control and treatment) of forecasters making probability predictions of binary events. #' The function is mostly useful for testing and illustration purposes. #' #' @param parameters A list containing the true values of the parameters: mu_star,mu_0,mu_1,gamma_0,gamma_1,rho_0,delta_0,rho_1,delta_1 and rho_01 #' @param N Number of events #' @param N_0 Number of forecasters in the control group #' @param N_1 Number of forecasters in the treatment group #' @param rho_o The level of dependence between event outcomes. (Default: the events are independent conditional on the model parameter values. This sets `rho_ = 0.0`) #' #' @details #' See \code{\link{complete_summary}} for a description of the model parameters. #' Not all combinations of parameters are possible. #' In particular, the covariance parameters gamma and rho are dependent on each other and must result in a positive semi-definite covariance matrix for the outcomes and predictions. #' To find a feasible set of parameters, we recommend users to experiment: begin with the desired levels of mu, gamma, and delta, and values of rho close to zero, and then increase rho until data can be generated without errors. #' #' @return List containing the simulated data. #' The elements of the list are as follows. #' \itemize{ #' \item Outcomes: Vector containing binary values that indicate the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. #' \item Control: List of vectors (one for each event) containing probability predictions made by the forecasters in the control group. #' \item Treatment: List of vectors (one for each event) containing probability predictions made by the forecasters in the treatment group. #' } #' #' @examples #' \donttest{ #' simulate_data(list(mu_star = -0.8,mu_0 = -0.5,mu_1 = 0.2,gamma_0 = 0.1,gamma_1 = 0.3, #' rho_0 = 0.05,delta_0 = 0.1,rho_1 = 0.2, delta_1 = 0.3,rho_01 = 0.05), 300,100,100) #' } #' #' #' @seealso \code{\link{estimate_BIN}}, \code{\link{complete_summary}} #' #' @export simulate_data<-function(parameters,N, N_0, N_1, rho_o = 0.0){ datasets <- simulate_events(parameters,N, N_0, N_1, rho_o)[[1]] control_probab=lapply(datasets, function(ifp) { control_prob = pnorm(ifp$control_probits) control_prob }) treatment_probab=lapply(datasets, function(ifp) { treatment_prob = pnorm(ifp$treatment_probits) treatment_prob }) outcomes=unlist(lapply(datasets, function(ifp) { outcome= ifp$outcome outcome })) data=list(outcomes,control_probab,treatment_probab) names(data)=c("Outcomes", "Control", "Treatment") return(data) }
/scratch/gouwar.j/cran-all/cranData/BINtools/R/simulate_data.R
# Generated by rstantools. Do not edit by hand. # names of stan models stanmodels <- c("case_1_MM", "case_2_1M", "case_2_M1", "case_3_11", "case_4_M0", "case_5_10", "model") # load each stan module Rcpp::loadModule("stan_fit4case_1_MM_mod", what = TRUE) Rcpp::loadModule("stan_fit4case_2_1M_mod", what = TRUE) Rcpp::loadModule("stan_fit4case_2_M1_mod", what = TRUE) Rcpp::loadModule("stan_fit4case_3_11_mod", what = TRUE) Rcpp::loadModule("stan_fit4case_4_M0_mod", what = TRUE) Rcpp::loadModule("stan_fit4case_5_10_mod", what = TRUE) Rcpp::loadModule("stan_fit4model_mod", what = TRUE) # instantiate each stanmodel object stanmodels <- sapply(stanmodels, function(model_name) { # create C++ code for stan model stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") stan_file <- file.path(stan_file, paste0(model_name, ".stan")) stanfit <- rstan::stanc_builder(stan_file, allow_undefined = TRUE, obfuscate_model_name = FALSE) stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) # create stanmodel object methods::new(Class = "stanmodel", model_name = stanfit$model_name, model_code = stanfit$model_code, model_cpp = stanfit$model_cpp, mk_cppmodule = function(x) get(paste0("model_", model_name))) })
/scratch/gouwar.j/cran-all/cranData/BINtools/R/stanmodels.R
--- title: "BINtools" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{BINtools} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # Introduction to BINtools The BINtools package implements the Bayesian BIN model (Bias, Information, Noise) discussed in the paper: *Satopää, Ville A., Marat Salikhov, Philip E. Tetlock, and Barbara Mellers. "Bias, Information, Noise: The BIN Model of Forecasting." Management Science (2021).* The model aims to disentangle the underlying processes that enable forecasters and forecasting methods to improve, decomposing forecasting accuracy into three components: bias, partial information, and noise. Bias refers to systematic deviations between forecasters’ interpretation of signals and the true informational value of those signals – deviations that can take the form of either over- or under-estimation of probabilities. Partial information is the informational value of the subset of signals that forecasters use – relative to full information that would permit forecasters to achieve omniscience. Finally, noise is the residual variability that is independent of the outcome. By describing the differences between two groups of forecasters, which we denote as *control* and *treatment*, the model allows the user to carry out useful inference, such as calculating the posterior probabilities of the treatment reducing bias, diminishing noise, or increasing information. It also provides insight into how much tamping down bias and noise in judgment or enhancing the efficient extraction of valid information from the environment improves forecasting accuracy. We can load the BINtools package as follows: ```r library(BINtools) ``` ## Functions and cases The BINtools package features three main functions: * `simulate_data()` creates synthetic data for testing and illustration purposes, * `estimate_BIN()` estimates a BIN model for a given set of data, and * `complete_summary()` produces a full BIN analysis based on a given BIN model estimation. It also allows for the application of the model to six different cases, determined both by the number of groups that the user wants to analyze and by the number of forecasters in each group. The cases available for analysis are listed below. * `MM`: Both groups with many forecasters. * `M1`: Control group with many forecasters. Treatment group with one forecaster. * `1M`: Control group with one forecaster. Treatment group with many forecasters. * `11`: Both groups with one forecaster. * `M0`: One group with many forecasters. * `10`: One group with one forecaster. We will illustrate how each of the functions of the package can be implemented with a detailed example of the first case, i.e., the case where two groups, denoted as control and treatment, have several forecasters. We will be applying the package's functions on synthetic data, which can be generated using the function `simulate_data()`. The other cases are implemented in a similar manner and hence are only illustrated briefly. # MM: Both groups with many forecasters ## Setting up the simulation environment We define a list containing the values of the parameters, based on which our synthetic data sets will be generated. The list must include the following: * `mu_star`: Base rate of the event outcome in the probit scale. E.g., if `mu_star = 0`, then, in expectation, Phi(0)*100% = 50% of the events happen, where Phi(.) is the CDF of a standard Gaussian random variable. * `mu_0`: The level of bias in the control group. This can be any number in the real-line. E.g., if `mu_0 = 0.1`, then the control group believes the base rate to be Phi(`mu_star` + 0.1). * `mu_1`: The level of bias in the treatment group; otherwise the interpretation is the same as above for `mu_0`. * `gamma_0`: The level of information in the control group. This is a number between 0 and 1, where 0 represents no information and 1 represents full information. * `gamma_1`: The level of information in the treatment group; otherwise the interpretation is the same as above for `gamma_0`. * `delta_0`: The level of noise in the control group. This is a positive value, with higher values indicating higher levels of noise. Noise is in the same scale as information. E.g., `delta_0 = 1.0` says that the control group uses as many irrelevant signals as there are relevant signals in the universe. In this sense it represents a very high level of noise. * `delta_1`: The level of noise in the treatment group; otherwise the interpretation is the same as above for `delta_0`. * `rho_0`: The level of within-group dependence between forecasts of the control group. This is a positive value, with higher values indicating higher levels of dependence. The dependence can be interpreted to stem from shared irrelevant (noise) and/or relevant (information) signals. * `rho_1`: The level of within-group dependence between forecasts of the treatment group; otherwise the interpretation is the same as above for `rho_0`. * `rho_01`: The level of inter-group dependence between forecasts of the control and treatment groups; otherwise the interpretation is the same as above for `rho_0`. It is important to mention that not all combinations of parameters are possible. In particular, the covariance parameters gamma and rho are dependent on each other and must result in a positive semi-definite covariance matrix for the outcomes and predictions. To find a feasible set of parameters, we recommend users to experiment: begin with the desired levels of mu, gamma, and delta, and values of rho close to zero, and then increase rho until data can be generated without errors. ```r true_parameters <- list( mu_star = -0.8, mu_0 = -0.5, mu_1 = 0.2, gamma_0 = 0.1, gamma_1 = 0.3, rho_0 = 0.05, delta_0 = 0.1, rho_1 = 0.2, delta_1 = 0.3, rho_01 = 0.05 ) ``` We set the number of events we want to simulate, as well as the number of control and treatment group members making predictions over these events. In this case, we will simulate 300 events, for which predictions will also be simulated for 100 control group members and 100 treatment group members. ```r #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 100 ``` ## Generating a synthetic data set We use the `simulate_data()` function to generate a synthetic data set based on the chosen parameters. The `simulate_data()` function returns a list containing the simulated data. The elements of the list are as follows: 1. `Outcomes`: Vector containing binary values that indicate the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. In our example, `Data_mm$Outcomes` will consist of a 300-long vector of binary values. 1. `Control`: List of vectors (one for each event) containing probability predictions made by the forecasters in the control group. In our example, `Data_mm$Control` will consist of a 300-long list of 100-long vectors, where each vector contains the predictions made by control group members for one of the events. 1. `Treatment`: List of vectors (one for each event) containing probability predictions made by the forecasters in the treatment group. In our example, `Data_mm$Treatment` will consist of a 300-long list of 100-long vectors, where each vector contains the predictions made by treatment group members for one of the events. It is important to note that the function `simulate_data()` has an optional parameter, `rho_o`, which represents the level of dependence between event outcomes. The parameter ranges from 0.0 to 1.0, with higher values indicating higher levels of dependence, and is helpful for analyzing the behavior of the BIN model in contexts where the outcomes are not independent from each other. However, for the sake of this illustration, we will not be considering this possibility. Instead, we choose to continue with the default value 'rho_o=0.0`. ```r #Simulate the data DATA_mm = simulate_data(true_parameters, N, N_0, N_1, rho_o=0.0) # equivalently: DATA_mm = simulate_data(true_parameters, N, N_0, N_1) ``` ## Estimating the BIN model The `estimate_BIN()` function allows the user to compare two groups (treatment and control) of forecasters in terms of their bias, information, and noise levels. The `estimate_BIN()` function requires two inputs: * `Outcomes`: Vector of binary values indicating the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. In our example, we will input the outcomes of our synthetic data set, i.e., `Data_mm$Outcomes`. The user can inspect `Data_mm` for the correct formatting of the input data. * `Control`: List of vectors containing the predictions made for each event by forecasters in the control group. The j-th vector contains predictions for the j-th event. In our example, we will input the simulated predictions for control group members, i.e., `Data_mm$Control`. The function `estimate_BIN()` also has the following optional inputs: * `Treatment`: List of vectors containing the predictions made for each event by forecasters in the treatment group. The j-th vector contains predictions for the j-th event. In our example, we have data for the predictions of two groups that we wish to compare, so we will also input predictions for treatment group members, i.e., `Data_mm$Treatment`. When left unspecified, the `Treatment` parameter is set to `NULL`. In this case, the estimate_BIN() function estimates a model tailored for the evaluation of a single forecasting group using the information provided for the control group only. * `initial` A list containing the initial values for the parameters `mu_star`,`mu_0`,`mu_1`,`gamma_0`,`gamma_1`,`delta_0`,`rho_0`,`delta_1`,`rho_1`,and `rho_01`. (Default: `list(mu_star = 0,mu_0 = 0,mu_1 = 0,gamma_0 = 0.4,gamma_1 = 0.4, delta_0 = 0.5,rho_0 = 0.27, delta_1 = 0.5,rho_1 = 0.27,rho_01 = 0.1)`. ) * `warmup` The number of initial iterations used for *burnin*. The *burnin* values are included to remove the influence of a poor starting point. In other words, we allow *burnin* number of iterations for the sampler to converge. (Default:`2000`) * `iter` Total number of iterations. Must be larger than `warmup`. (Default:`4000`) The total number of samples will be the difference between `iter` and `warmup`. For example, if we set `iter = 4000` and `warmup = 2000`, then we will have 4000-2000 = 2000 samples in the final posterior sample. * `seed` This seed is used for random number generation and is an input for the model estimation process. (Default: `1`) Model estimation is performed with the statistical programming language called [Stan](https://mc-stan.org/). This estimates the posterior distribution using a state-of-the-art sampling technique called Hamiltonian Monte Carlo. The return object is a Stan model. This way the user can apply available diagnostics tools in other packages, such as [rstan](https://mc-stan.org/rstan/), to analyze the final results. ```r # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_mm$Outcomes,DATA_mm$Control,DATA_mm$Treatment, warmup = 2000, iter = 4000, seed=1) #> #> SAMPLING FOR MODEL 'case_1_MM' NOW (CHAIN 1). #> Chain 1: #> Chain 1: Gradient evaluation took 0.000737 seconds #> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.37 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: #> Chain 1: Iteration: 1 / 4000 [ 0%] (Warmup) #> Chain 1: Iteration: 400 / 4000 [ 10%] (Warmup) #> Chain 1: Iteration: 800 / 4000 [ 20%] (Warmup) #> Chain 1: Iteration: 1200 / 4000 [ 30%] (Warmup) #> Chain 1: Iteration: 1600 / 4000 [ 40%] (Warmup) #> Chain 1: Iteration: 2000 / 4000 [ 50%] (Warmup) #> Chain 1: Iteration: 2001 / 4000 [ 50%] (Sampling) #> Chain 1: Iteration: 2400 / 4000 [ 60%] (Sampling) #> Chain 1: Iteration: 2800 / 4000 [ 70%] (Sampling) #> Chain 1: Iteration: 3200 / 4000 [ 80%] (Sampling) #> Chain 1: Iteration: 3600 / 4000 [ 90%] (Sampling) #> Chain 1: Iteration: 4000 / 4000 [100%] (Sampling) #> Chain 1: #> Chain 1: Elapsed Time: 64.545 seconds (Warm-up) #> Chain 1: 79.8534 seconds (Sampling) #> Chain 1: 144.398 seconds (Total) #> Chain 1: ``` ## Analyzing the resulting BIN model First, we provide the posterior means of the bias, noise, and information parameters. Second, by comparing components within each draw of the posterior sample, we can give posterior probabilities of the treatment group outperforming the control group with respect to each BIN component. Third, we calculate how much the treatment improves accuracy via changes in the expected bias, noise, and information. We provide a detailed description of each of the components of the analysis below. ```r # Create a Summary summary_results=complete_summary(full_bayesian_fit) ``` ### Parameter estimates We show the posterior means of the parameters of interest and their differences. Beside each posterior mean are the standard deviation and the 2.5th, 25th, 50th, 75th, and 97,5th percentiles of the posterior distribution of the parameter. The values corresponding to the 2,5th and 97,5th percentiles correspond to the 95% (central) credible interval, which represents the range in which the true parameter value falls with 95% posterior probability. The credible interval differs from the classical 95% confidence interval in that it contains the true parameter value with 95% posterior probability. ```r summary_results$`Parameter Estimates` #> parameter_name mean sd 2.5% 25% 50% 75% 97.5% #> 1 mu_star -0.82 0.08 -0.97 -0.87 -0.82 -0.76 -0.65 #> 2 mu_0 -0.50 0.08 -0.66 -0.55 -0.50 -0.45 -0.35 #> 3 mu_1 0.20 0.07 0.07 0.16 0.20 0.25 0.35 #> 4 gamma_0 0.10 0.01 0.07 0.09 0.10 0.10 0.12 #> 5 gamma_1 0.30 0.02 0.26 0.29 0.30 0.31 0.34 #> 6 rho_0 0.03 0.00 0.03 0.03 0.03 0.04 0.04 #> 7 delta_0 0.12 0.01 0.09 0.11 0.12 0.13 0.15 #> 8 rho_1 0.15 0.01 0.13 0.14 0.15 0.16 0.17 #> 9 delta_1 0.31 0.03 0.26 0.29 0.31 0.33 0.38 #> 10 rho_01 0.05 0.00 0.04 0.05 0.05 0.05 0.06 #> 11 diff_bias 0.30 0.15 0.00 0.20 0.29 0.40 0.59 #> 12 diff_info -0.20 0.02 -0.23 -0.21 -0.20 -0.19 -0.17 #> 13 diff_noise -0.19 0.02 -0.25 -0.21 -0.19 -0.18 -0.15 ``` In the results above, for example, the posterior mean of the control group bias, `mu_0`, is -0.5, and the parameter lies between -0.66 and -0.35 with 95% probability. The posterior mean of the treatment group bias, `mu_1`, is 0.2, and the parameter lies between 0.07 and 0.35 with 95% probability. The difference in bias between the treatment and control group is then |-0.5|-|0.2|=0.3 and lies between 0 and 0.59 with 95% probability. It is also worth noting that the values of the posterior means are reasonably close to the true values of the simulation environment. This corroborates the expectation that, after a sufficient amount of iterations, the parameters of the model are accurately estimated. ### Posterior inferences This section provides the posterior probabilities of events. Compared to the control group, does the treatment group have: (i) less bias, (ii) more information, and (iii) less noise? Intuitively, one can think of these probabilities as the Bayesian analogs of the p-values in classical hypothesis testing. The closer the probability is to 1, the stronger the evidence for the hypothesis. ```r summary_results$`Posterior Inferences` #> Posterior_inferences Posterior_Probability #> 1 More information in treatment group 1.000 #> 2 Less noise in treatment group 0.000 #> 3 Less bias in treatment group 0.975 ``` In our example, the treatment group has more information than the control group with probability 1, less noise with probability 0, and less bias with 0.975 probability. ### Control vs. Treatment comparative analysis A comparative analysis of the predictive performance of the control and treatment groups is summarized under ``$`Control,Treatment` ``. This part of the summary contains the components listed below. * Predictive performance and value of the contributions: ```r summary_results$`Control,Treatment`$`Value of the contribution` #> $mean_brier_score_1 #> [1] 0.1786799 #> #> $mean_brier_score_0 #> [1] 0.1720887 #> #> $contribution_bias #> [1] -0.00913257 #> #> $contribution_noise #> [1] -0.01307045 #> #> $contribution_information #> [1] 0.01561188 ``` Above you can visualize the predictive performance of the control and treatment groups, measured in terms of their Brier scores. The Brier score corresponds to the mean squared error between the probability predictions and the outcome indicators. Therefore, it ranges from 0 to 1, with 0 indicating perfect accuracy. A constant prediction of 0.5 receives a Brier score of 0.25. The mean Brier score of the control group for our example was 0.1720887, while the mean Brier score of the treatment group was 0.1786799. The individual contributions of each treatment are also provided. The sum of individual contributions attributed to bias, information, and noise should roughly add up to the total contribution of the treatment, i.e., the difference between the treatment and the control mean Brier scores. * Percentage of control group Brier score: Individual contributions divided by the expected Brier score of the control group. These values show, in percentage terms, how the change in the Brier score can be attributed to each component. ```r summary_results$`Control,Treatment`$`Percentage of control group Brier score` #> $treatment_percentage_contribution_bias #> [1] -5.306896 #> #> $treatment_percentage_contribution_noise #> [1] -7.595183 #> #> $treatment_percentage_contribution_information #> [1] 9.071996 ``` In our example, the contributions to predictive accuracy attributed to bias, noise, and information were -0.009133, -0.01307, 0.015612, respectively. These contributions corresponded to -5.306896% , -7.595183% , and 9.071996% of the mean Brier score of the control group, respectively. Therefore, e.g., the control group experiences a -7.595183% change in the Brier score due to better noise reduction. ### Maximum achievable contribution Finally, under ``$`Control, Perfect Accuracy` ``, an analysis of the maximum achievable contribution is given. Transformed contributions for a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information) are given with respect to the control group. These values can be seen as theoretical limits on improvement for a given component (bias, information or noise). As in the case of the Control vs. Treatment analysis, the summary includes the mean Brier scores of the control and perfect accuracy groups, the individual contributions of bias, noise, and information under a perfect accuracy scenario, and the percentage of the control group Brier score that each of these contributions represents. ```r summary_results$`Control, Perfect Accuracy` #> $`Value of the contribution` #> $`Value of the contribution`$mean_brier_score_1 #> [1] 0.001614471 #> #> $`Value of the contribution`$mean_brier_score_0 #> [1] 0.1720887 #> #> $`Value of the contribution`$contribution_bias #> [1] 0.04627696 #> #> $`Value of the contribution`$contribution_noise #> [1] 0.02613925 #> #> $`Value of the contribution`$contribution_information #> [1] 0.09805805 #> #> #> $`Percentage of control group Brier score` #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_bias #> [1] 26.89134 #> #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_noise #> [1] 15.1894 #> #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_information #> [1] 56.9811 ``` This shows the potential percentage improvements in accuracy to be gained from each BIN component. For instance, it shows that the control group can reduce their Brier score by 15.1894% by removing all noise from their predictions. # M1: Control group with many forecasters. Treatment group with one forecaster. This section shows how the model can be applied to cases where the control group has many forecasters and the treatment group has one. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 1 #Simulate the data DATA_m1 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_m1$Outcomes,DATA_m1$Control,DATA_m1$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # 1M: Control group with one forecaster. Treatment group with many forecasters. This section shows how the model can be applied to cases where the treatment group has many forecasters and the control group has one. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 100 #Simulate the data DATA_1m = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_1m$Outcomes, DATA_1m$Control, DATA_1m$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # 11: Both groups with one forecaster This section shows how the model can be applied to cases where both forecasting groups have only one forecaster (one prediction per event). ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 1 #Simulate the data DATA_11 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_11$Outcomes,DATA_11$Control,DATA_11$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # M0: One group with many forecasters Aside from comparing two groups with a single or multiple forecasters, the model can also be applied to conduct analysis on a single group of forecasters. This section illustrates how this can be done. Again, we will simulate 300 events and 100 predictions per event. This time, however, we set the size of the treatment group to 0, so that there is only one group, namely the control group, that makes 100 predictions per event. ```r #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 0 #Simulate the data DATA_m = simulate_data(true_parameters, N, N_0, N_1) ``` In this case, there are data for only one group. The `Treatment` input parameter of the `estimate_BIN()` function **must be left blank** (the default is NULL). Any other input for the Treatment parameter is likely to result in an error. ```r # Fit the BIN model # equivalently: full_bayesian_fit = estimate_BIN(DATA_m$Outcomes,DATA_m$Control, Treatment=NULL, warmup = 1000, iter = 2000,seed=1) full_bayesian_fit = estimate_BIN(DATA_m$Outcomes,DATA_m$Control, warmup = 2000, iter = 4000, seed=1) #> #> SAMPLING FOR MODEL 'case_4_M0' NOW (CHAIN 1). #> Chain 1: #> Chain 1: Gradient evaluation took 0.000284 seconds #> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.84 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: #> Chain 1: Iteration: 1 / 4000 [ 0%] (Warmup) #> Chain 1: Iteration: 400 / 4000 [ 10%] (Warmup) #> Chain 1: Iteration: 800 / 4000 [ 20%] (Warmup) #> Chain 1: Iteration: 1200 / 4000 [ 30%] (Warmup) #> Chain 1: Iteration: 1600 / 4000 [ 40%] (Warmup) #> Chain 1: Iteration: 2000 / 4000 [ 50%] (Warmup) #> Chain 1: Iteration: 2001 / 4000 [ 50%] (Sampling) #> Chain 1: Iteration: 2400 / 4000 [ 60%] (Sampling) #> Chain 1: Iteration: 2800 / 4000 [ 70%] (Sampling) #> Chain 1: Iteration: 3200 / 4000 [ 80%] (Sampling) #> Chain 1: Iteration: 3600 / 4000 [ 90%] (Sampling) #> Chain 1: Iteration: 4000 / 4000 [100%] (Sampling) #> Chain 1: #> Chain 1: Elapsed Time: 16.132 seconds (Warm-up) #> Chain 1: 19.2905 seconds (Sampling) #> Chain 1: 35.4226 seconds (Total) #> Chain 1: ``` In this case, the `complete_summary()` function provides the posterior means of the bias, noise, and information parameters only for the control group. A comparative analysis is also conducted with respect to a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information). ```r # Create Summary summary_results=complete_summary(full_bayesian_fit) summary_results #> $`Parameter Estimates` #> parameter_name mean sd 2.5% 25% 50% 75% 97.5% #> 1 mu_star -0.76 0.08 -0.93 -0.82 -0.75 -0.70 -0.60 #> 2 mu_0 -0.55 0.08 -0.70 -0.61 -0.55 -0.50 -0.39 #> 3 gamma_0 0.10 0.01 0.08 0.10 0.10 0.11 0.12 #> 4 rho_0 0.03 0.00 0.02 0.03 0.03 0.03 0.03 #> 5 delta_0 0.10 0.01 0.08 0.09 0.10 0.11 0.12 #> #> $`Control, Perfect Accuracy` #> $`Control, Perfect Accuracy`$`Value of the contribution` #> $`Control, Perfect Accuracy`$`Value of the contribution`$mean_brier_score_1 #> [1] 0.001688616 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$mean_brier_score_0 #> [1] 0.1837328 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_bias #> [1] 0.05672206 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_noise #> [1] 0.02401952 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_information #> [1] 0.1013026 #> #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score` #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_bias #> [1] 30.87204 #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_noise #> [1] 13.07307 #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_information #> [1] 55.13582 ``` This output can be analyzed as before: * Parameter estimates: We show the posterior means of the parameters of interest and their differences. Beside each posterior mean are the standard deviation and the 2.5th, 25th, 50th, 75th, and 97,5th percentiles of the posterior distribution of the parameter. The values corresponding to the 2,5th and 97,5th percentiles correspond to the 95% (central) credible interval, which represents the range in which the true parameter value falls with 95% posterior probability. The credible interval differs from the classical 95% confidence interval in that it contains the true parameter value with 95% posterior probability. In the results above, for example, the posterior mean of the control group bias, `mu_0`, is -0.55, and the parameter lies between -0.7 and -0.39 with 95% probability. It is also worth noting that the values of the posterior means are comparable to the true values of the simulation environment, indicating that the parameters of the model are estimated accurately. * Under `$'Control, Perfect Accuracy'`, an analysis of the maximum achievable contribution is given. Transformed contributions for a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information) are given with respect to the control group. These values can be seen as theoretical limits on improvement for a given component (bias, information or noise). As in the case of the Control vs. Treatment analysis, the summary includes the mean Brier scores of the control and perfect accuracy groups, the individual contributions of bias, noise, and information under a perfect accuracy scenario, and the percentage of the control group Brier score that each of these contributions represents. This shows the potential percentage improvements in accuracy to be gained from each BIN component. For instance, it shows that the control group can reduce their Brier score by 30.872% by removing all bias from their predictions. # 10: One group with one forecaster This section shows how the model can be applied to cases where there is a single forecaster. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 0 #Simulate the data DATA_1 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_1$Outcomes,DATA_1$Control, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ```
/scratch/gouwar.j/cran-all/cranData/BINtools/inst/doc/BINtools_vignette.Rmd
--- title: "BINtools" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{BINtools} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- # Introduction to BINtools The BINtools package implements the Bayesian BIN model (Bias, Information, Noise) discussed in the paper: *Satopää, Ville A., Marat Salikhov, Philip E. Tetlock, and Barbara Mellers. "Bias, Information, Noise: The BIN Model of Forecasting." Management Science (2021).* The model aims to disentangle the underlying processes that enable forecasters and forecasting methods to improve, decomposing forecasting accuracy into three components: bias, partial information, and noise. Bias refers to systematic deviations between forecasters’ interpretation of signals and the true informational value of those signals – deviations that can take the form of either over- or under-estimation of probabilities. Partial information is the informational value of the subset of signals that forecasters use – relative to full information that would permit forecasters to achieve omniscience. Finally, noise is the residual variability that is independent of the outcome. By describing the differences between two groups of forecasters, which we denote as *control* and *treatment*, the model allows the user to carry out useful inference, such as calculating the posterior probabilities of the treatment reducing bias, diminishing noise, or increasing information. It also provides insight into how much tamping down bias and noise in judgment or enhancing the efficient extraction of valid information from the environment improves forecasting accuracy. We can load the BINtools package as follows: ```r library(BINtools) ``` ## Functions and cases The BINtools package features three main functions: * `simulate_data()` creates synthetic data for testing and illustration purposes, * `estimate_BIN()` estimates a BIN model for a given set of data, and * `complete_summary()` produces a full BIN analysis based on a given BIN model estimation. It also allows for the application of the model to six different cases, determined both by the number of groups that the user wants to analyze and by the number of forecasters in each group. The cases available for analysis are listed below. * `MM`: Both groups with many forecasters. * `M1`: Control group with many forecasters. Treatment group with one forecaster. * `1M`: Control group with one forecaster. Treatment group with many forecasters. * `11`: Both groups with one forecaster. * `M0`: One group with many forecasters. * `10`: One group with one forecaster. We will illustrate how each of the functions of the package can be implemented with a detailed example of the first case, i.e., the case where two groups, denoted as control and treatment, have several forecasters. We will be applying the package's functions on synthetic data, which can be generated using the function `simulate_data()`. The other cases are implemented in a similar manner and hence are only illustrated briefly. # MM: Both groups with many forecasters ## Setting up the simulation environment We define a list containing the values of the parameters, based on which our synthetic data sets will be generated. The list must include the following: * `mu_star`: Base rate of the event outcome in the probit scale. E.g., if `mu_star = 0`, then, in expectation, Phi(0)*100% = 50% of the events happen, where Phi(.) is the CDF of a standard Gaussian random variable. * `mu_0`: The level of bias in the control group. This can be any number in the real-line. E.g., if `mu_0 = 0.1`, then the control group believes the base rate to be Phi(`mu_star` + 0.1). * `mu_1`: The level of bias in the treatment group; otherwise the interpretation is the same as above for `mu_0`. * `gamma_0`: The level of information in the control group. This is a number between 0 and 1, where 0 represents no information and 1 represents full information. * `gamma_1`: The level of information in the treatment group; otherwise the interpretation is the same as above for `gamma_0`. * `delta_0`: The level of noise in the control group. This is a positive value, with higher values indicating higher levels of noise. Noise is in the same scale as information. E.g., `delta_0 = 1.0` says that the control group uses as many irrelevant signals as there are relevant signals in the universe. In this sense it represents a very high level of noise. * `delta_1`: The level of noise in the treatment group; otherwise the interpretation is the same as above for `delta_0`. * `rho_0`: The level of within-group dependence between forecasts of the control group. This is a positive value, with higher values indicating higher levels of dependence. The dependence can be interpreted to stem from shared irrelevant (noise) and/or relevant (information) signals. * `rho_1`: The level of within-group dependence between forecasts of the treatment group; otherwise the interpretation is the same as above for `rho_0`. * `rho_01`: The level of inter-group dependence between forecasts of the control and treatment groups; otherwise the interpretation is the same as above for `rho_0`. It is important to mention that not all combinations of parameters are possible. In particular, the covariance parameters gamma and rho are dependent on each other and must result in a positive semi-definite covariance matrix for the outcomes and predictions. To find a feasible set of parameters, we recommend users to experiment: begin with the desired levels of mu, gamma, and delta, and values of rho close to zero, and then increase rho until data can be generated without errors. ```r true_parameters <- list( mu_star = -0.8, mu_0 = -0.5, mu_1 = 0.2, gamma_0 = 0.1, gamma_1 = 0.3, rho_0 = 0.05, delta_0 = 0.1, rho_1 = 0.2, delta_1 = 0.3, rho_01 = 0.05 ) ``` We set the number of events we want to simulate, as well as the number of control and treatment group members making predictions over these events. In this case, we will simulate 300 events, for which predictions will also be simulated for 100 control group members and 100 treatment group members. ```r #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 100 ``` ## Generating a synthetic data set We use the `simulate_data()` function to generate a synthetic data set based on the chosen parameters. The `simulate_data()` function returns a list containing the simulated data. The elements of the list are as follows: 1. `Outcomes`: Vector containing binary values that indicate the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. In our example, `Data_mm$Outcomes` will consist of a 300-long vector of binary values. 1. `Control`: List of vectors (one for each event) containing probability predictions made by the forecasters in the control group. In our example, `Data_mm$Control` will consist of a 300-long list of 100-long vectors, where each vector contains the predictions made by control group members for one of the events. 1. `Treatment`: List of vectors (one for each event) containing probability predictions made by the forecasters in the treatment group. In our example, `Data_mm$Treatment` will consist of a 300-long list of 100-long vectors, where each vector contains the predictions made by treatment group members for one of the events. It is important to note that the function `simulate_data()` has an optional parameter, `rho_o`, which represents the level of dependence between event outcomes. The parameter ranges from 0.0 to 1.0, with higher values indicating higher levels of dependence, and is helpful for analyzing the behavior of the BIN model in contexts where the outcomes are not independent from each other. However, for the sake of this illustration, we will not be considering this possibility. Instead, we choose to continue with the default value 'rho_o=0.0`. ```r #Simulate the data DATA_mm = simulate_data(true_parameters, N, N_0, N_1, rho_o=0.0) # equivalently: DATA_mm = simulate_data(true_parameters, N, N_0, N_1) ``` ## Estimating the BIN model The `estimate_BIN()` function allows the user to compare two groups (treatment and control) of forecasters in terms of their bias, information, and noise levels. The `estimate_BIN()` function requires two inputs: * `Outcomes`: Vector of binary values indicating the outcome of each event. The j-th entry is equal to 1 if the j-th event occurs and equal to 0 otherwise. In our example, we will input the outcomes of our synthetic data set, i.e., `Data_mm$Outcomes`. The user can inspect `Data_mm` for the correct formatting of the input data. * `Control`: List of vectors containing the predictions made for each event by forecasters in the control group. The j-th vector contains predictions for the j-th event. In our example, we will input the simulated predictions for control group members, i.e., `Data_mm$Control`. The function `estimate_BIN()` also has the following optional inputs: * `Treatment`: List of vectors containing the predictions made for each event by forecasters in the treatment group. The j-th vector contains predictions for the j-th event. In our example, we have data for the predictions of two groups that we wish to compare, so we will also input predictions for treatment group members, i.e., `Data_mm$Treatment`. When left unspecified, the `Treatment` parameter is set to `NULL`. In this case, the estimate_BIN() function estimates a model tailored for the evaluation of a single forecasting group using the information provided for the control group only. * `initial` A list containing the initial values for the parameters `mu_star`,`mu_0`,`mu_1`,`gamma_0`,`gamma_1`,`delta_0`,`rho_0`,`delta_1`,`rho_1`,and `rho_01`. (Default: `list(mu_star = 0,mu_0 = 0,mu_1 = 0,gamma_0 = 0.4,gamma_1 = 0.4, delta_0 = 0.5,rho_0 = 0.27, delta_1 = 0.5,rho_1 = 0.27,rho_01 = 0.1)`. ) * `warmup` The number of initial iterations used for *burnin*. The *burnin* values are included to remove the influence of a poor starting point. In other words, we allow *burnin* number of iterations for the sampler to converge. (Default:`2000`) * `iter` Total number of iterations. Must be larger than `warmup`. (Default:`4000`) The total number of samples will be the difference between `iter` and `warmup`. For example, if we set `iter = 4000` and `warmup = 2000`, then we will have 4000-2000 = 2000 samples in the final posterior sample. * `seed` This seed is used for random number generation and is an input for the model estimation process. (Default: `1`) Model estimation is performed with the statistical programming language called [Stan](https://mc-stan.org/). This estimates the posterior distribution using a state-of-the-art sampling technique called Hamiltonian Monte Carlo. The return object is a Stan model. This way the user can apply available diagnostics tools in other packages, such as [rstan](https://mc-stan.org/rstan/), to analyze the final results. ```r # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_mm$Outcomes,DATA_mm$Control,DATA_mm$Treatment, warmup = 2000, iter = 4000, seed=1) #> #> SAMPLING FOR MODEL 'case_1_MM' NOW (CHAIN 1). #> Chain 1: #> Chain 1: Gradient evaluation took 0.000737 seconds #> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.37 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: #> Chain 1: Iteration: 1 / 4000 [ 0%] (Warmup) #> Chain 1: Iteration: 400 / 4000 [ 10%] (Warmup) #> Chain 1: Iteration: 800 / 4000 [ 20%] (Warmup) #> Chain 1: Iteration: 1200 / 4000 [ 30%] (Warmup) #> Chain 1: Iteration: 1600 / 4000 [ 40%] (Warmup) #> Chain 1: Iteration: 2000 / 4000 [ 50%] (Warmup) #> Chain 1: Iteration: 2001 / 4000 [ 50%] (Sampling) #> Chain 1: Iteration: 2400 / 4000 [ 60%] (Sampling) #> Chain 1: Iteration: 2800 / 4000 [ 70%] (Sampling) #> Chain 1: Iteration: 3200 / 4000 [ 80%] (Sampling) #> Chain 1: Iteration: 3600 / 4000 [ 90%] (Sampling) #> Chain 1: Iteration: 4000 / 4000 [100%] (Sampling) #> Chain 1: #> Chain 1: Elapsed Time: 64.545 seconds (Warm-up) #> Chain 1: 79.8534 seconds (Sampling) #> Chain 1: 144.398 seconds (Total) #> Chain 1: ``` ## Analyzing the resulting BIN model First, we provide the posterior means of the bias, noise, and information parameters. Second, by comparing components within each draw of the posterior sample, we can give posterior probabilities of the treatment group outperforming the control group with respect to each BIN component. Third, we calculate how much the treatment improves accuracy via changes in the expected bias, noise, and information. We provide a detailed description of each of the components of the analysis below. ```r # Create a Summary summary_results=complete_summary(full_bayesian_fit) ``` ### Parameter estimates We show the posterior means of the parameters of interest and their differences. Beside each posterior mean are the standard deviation and the 2.5th, 25th, 50th, 75th, and 97,5th percentiles of the posterior distribution of the parameter. The values corresponding to the 2,5th and 97,5th percentiles correspond to the 95% (central) credible interval, which represents the range in which the true parameter value falls with 95% posterior probability. The credible interval differs from the classical 95% confidence interval in that it contains the true parameter value with 95% posterior probability. ```r summary_results$`Parameter Estimates` #> parameter_name mean sd 2.5% 25% 50% 75% 97.5% #> 1 mu_star -0.82 0.08 -0.97 -0.87 -0.82 -0.76 -0.65 #> 2 mu_0 -0.50 0.08 -0.66 -0.55 -0.50 -0.45 -0.35 #> 3 mu_1 0.20 0.07 0.07 0.16 0.20 0.25 0.35 #> 4 gamma_0 0.10 0.01 0.07 0.09 0.10 0.10 0.12 #> 5 gamma_1 0.30 0.02 0.26 0.29 0.30 0.31 0.34 #> 6 rho_0 0.03 0.00 0.03 0.03 0.03 0.04 0.04 #> 7 delta_0 0.12 0.01 0.09 0.11 0.12 0.13 0.15 #> 8 rho_1 0.15 0.01 0.13 0.14 0.15 0.16 0.17 #> 9 delta_1 0.31 0.03 0.26 0.29 0.31 0.33 0.38 #> 10 rho_01 0.05 0.00 0.04 0.05 0.05 0.05 0.06 #> 11 diff_bias 0.30 0.15 0.00 0.20 0.29 0.40 0.59 #> 12 diff_info -0.20 0.02 -0.23 -0.21 -0.20 -0.19 -0.17 #> 13 diff_noise -0.19 0.02 -0.25 -0.21 -0.19 -0.18 -0.15 ``` In the results above, for example, the posterior mean of the control group bias, `mu_0`, is -0.5, and the parameter lies between -0.66 and -0.35 with 95% probability. The posterior mean of the treatment group bias, `mu_1`, is 0.2, and the parameter lies between 0.07 and 0.35 with 95% probability. The difference in bias between the treatment and control group is then |-0.5|-|0.2|=0.3 and lies between 0 and 0.59 with 95% probability. It is also worth noting that the values of the posterior means are reasonably close to the true values of the simulation environment. This corroborates the expectation that, after a sufficient amount of iterations, the parameters of the model are accurately estimated. ### Posterior inferences This section provides the posterior probabilities of events. Compared to the control group, does the treatment group have: (i) less bias, (ii) more information, and (iii) less noise? Intuitively, one can think of these probabilities as the Bayesian analogs of the p-values in classical hypothesis testing. The closer the probability is to 1, the stronger the evidence for the hypothesis. ```r summary_results$`Posterior Inferences` #> Posterior_inferences Posterior_Probability #> 1 More information in treatment group 1.000 #> 2 Less noise in treatment group 0.000 #> 3 Less bias in treatment group 0.975 ``` In our example, the treatment group has more information than the control group with probability 1, less noise with probability 0, and less bias with 0.975 probability. ### Control vs. Treatment comparative analysis A comparative analysis of the predictive performance of the control and treatment groups is summarized under ``$`Control,Treatment` ``. This part of the summary contains the components listed below. * Predictive performance and value of the contributions: ```r summary_results$`Control,Treatment`$`Value of the contribution` #> $mean_brier_score_1 #> [1] 0.1786799 #> #> $mean_brier_score_0 #> [1] 0.1720887 #> #> $contribution_bias #> [1] -0.00913257 #> #> $contribution_noise #> [1] -0.01307045 #> #> $contribution_information #> [1] 0.01561188 ``` Above you can visualize the predictive performance of the control and treatment groups, measured in terms of their Brier scores. The Brier score corresponds to the mean squared error between the probability predictions and the outcome indicators. Therefore, it ranges from 0 to 1, with 0 indicating perfect accuracy. A constant prediction of 0.5 receives a Brier score of 0.25. The mean Brier score of the control group for our example was 0.1720887, while the mean Brier score of the treatment group was 0.1786799. The individual contributions of each treatment are also provided. The sum of individual contributions attributed to bias, information, and noise should roughly add up to the total contribution of the treatment, i.e., the difference between the treatment and the control mean Brier scores. * Percentage of control group Brier score: Individual contributions divided by the expected Brier score of the control group. These values show, in percentage terms, how the change in the Brier score can be attributed to each component. ```r summary_results$`Control,Treatment`$`Percentage of control group Brier score` #> $treatment_percentage_contribution_bias #> [1] -5.306896 #> #> $treatment_percentage_contribution_noise #> [1] -7.595183 #> #> $treatment_percentage_contribution_information #> [1] 9.071996 ``` In our example, the contributions to predictive accuracy attributed to bias, noise, and information were -0.009133, -0.01307, 0.015612, respectively. These contributions corresponded to -5.306896% , -7.595183% , and 9.071996% of the mean Brier score of the control group, respectively. Therefore, e.g., the control group experiences a -7.595183% change in the Brier score due to better noise reduction. ### Maximum achievable contribution Finally, under ``$`Control, Perfect Accuracy` ``, an analysis of the maximum achievable contribution is given. Transformed contributions for a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information) are given with respect to the control group. These values can be seen as theoretical limits on improvement for a given component (bias, information or noise). As in the case of the Control vs. Treatment analysis, the summary includes the mean Brier scores of the control and perfect accuracy groups, the individual contributions of bias, noise, and information under a perfect accuracy scenario, and the percentage of the control group Brier score that each of these contributions represents. ```r summary_results$`Control, Perfect Accuracy` #> $`Value of the contribution` #> $`Value of the contribution`$mean_brier_score_1 #> [1] 0.001614471 #> #> $`Value of the contribution`$mean_brier_score_0 #> [1] 0.1720887 #> #> $`Value of the contribution`$contribution_bias #> [1] 0.04627696 #> #> $`Value of the contribution`$contribution_noise #> [1] 0.02613925 #> #> $`Value of the contribution`$contribution_information #> [1] 0.09805805 #> #> #> $`Percentage of control group Brier score` #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_bias #> [1] 26.89134 #> #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_noise #> [1] 15.1894 #> #> $`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_information #> [1] 56.9811 ``` This shows the potential percentage improvements in accuracy to be gained from each BIN component. For instance, it shows that the control group can reduce their Brier score by 15.1894% by removing all noise from their predictions. # M1: Control group with many forecasters. Treatment group with one forecaster. This section shows how the model can be applied to cases where the control group has many forecasters and the treatment group has one. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 1 #Simulate the data DATA_m1 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_m1$Outcomes,DATA_m1$Control,DATA_m1$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # 1M: Control group with one forecaster. Treatment group with many forecasters. This section shows how the model can be applied to cases where the treatment group has many forecasters and the control group has one. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 100 #Simulate the data DATA_1m = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_1m$Outcomes, DATA_1m$Control, DATA_1m$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # 11: Both groups with one forecaster This section shows how the model can be applied to cases where both forecasting groups have only one forecaster (one prediction per event). ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 1 #Simulate the data DATA_11 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_11$Outcomes,DATA_11$Control,DATA_11$Treatment, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ``` # M0: One group with many forecasters Aside from comparing two groups with a single or multiple forecasters, the model can also be applied to conduct analysis on a single group of forecasters. This section illustrates how this can be done. Again, we will simulate 300 events and 100 predictions per event. This time, however, we set the size of the treatment group to 0, so that there is only one group, namely the control group, that makes 100 predictions per event. ```r #Number of events N = 300 #Number of control group members N_0 = 100 #Number of treatment group members N_1 = 0 #Simulate the data DATA_m = simulate_data(true_parameters, N, N_0, N_1) ``` In this case, there are data for only one group. The `Treatment` input parameter of the `estimate_BIN()` function **must be left blank** (the default is NULL). Any other input for the Treatment parameter is likely to result in an error. ```r # Fit the BIN model # equivalently: full_bayesian_fit = estimate_BIN(DATA_m$Outcomes,DATA_m$Control, Treatment=NULL, warmup = 1000, iter = 2000,seed=1) full_bayesian_fit = estimate_BIN(DATA_m$Outcomes,DATA_m$Control, warmup = 2000, iter = 4000, seed=1) #> #> SAMPLING FOR MODEL 'case_4_M0' NOW (CHAIN 1). #> Chain 1: #> Chain 1: Gradient evaluation took 0.000284 seconds #> Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.84 seconds. #> Chain 1: Adjust your expectations accordingly! #> Chain 1: #> Chain 1: #> Chain 1: Iteration: 1 / 4000 [ 0%] (Warmup) #> Chain 1: Iteration: 400 / 4000 [ 10%] (Warmup) #> Chain 1: Iteration: 800 / 4000 [ 20%] (Warmup) #> Chain 1: Iteration: 1200 / 4000 [ 30%] (Warmup) #> Chain 1: Iteration: 1600 / 4000 [ 40%] (Warmup) #> Chain 1: Iteration: 2000 / 4000 [ 50%] (Warmup) #> Chain 1: Iteration: 2001 / 4000 [ 50%] (Sampling) #> Chain 1: Iteration: 2400 / 4000 [ 60%] (Sampling) #> Chain 1: Iteration: 2800 / 4000 [ 70%] (Sampling) #> Chain 1: Iteration: 3200 / 4000 [ 80%] (Sampling) #> Chain 1: Iteration: 3600 / 4000 [ 90%] (Sampling) #> Chain 1: Iteration: 4000 / 4000 [100%] (Sampling) #> Chain 1: #> Chain 1: Elapsed Time: 16.132 seconds (Warm-up) #> Chain 1: 19.2905 seconds (Sampling) #> Chain 1: 35.4226 seconds (Total) #> Chain 1: ``` In this case, the `complete_summary()` function provides the posterior means of the bias, noise, and information parameters only for the control group. A comparative analysis is also conducted with respect to a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information). ```r # Create Summary summary_results=complete_summary(full_bayesian_fit) summary_results #> $`Parameter Estimates` #> parameter_name mean sd 2.5% 25% 50% 75% 97.5% #> 1 mu_star -0.76 0.08 -0.93 -0.82 -0.75 -0.70 -0.60 #> 2 mu_0 -0.55 0.08 -0.70 -0.61 -0.55 -0.50 -0.39 #> 3 gamma_0 0.10 0.01 0.08 0.10 0.10 0.11 0.12 #> 4 rho_0 0.03 0.00 0.02 0.03 0.03 0.03 0.03 #> 5 delta_0 0.10 0.01 0.08 0.09 0.10 0.11 0.12 #> #> $`Control, Perfect Accuracy` #> $`Control, Perfect Accuracy`$`Value of the contribution` #> $`Control, Perfect Accuracy`$`Value of the contribution`$mean_brier_score_1 #> [1] 0.001688616 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$mean_brier_score_0 #> [1] 0.1837328 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_bias #> [1] 0.05672206 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_noise #> [1] 0.02401952 #> #> $`Control, Perfect Accuracy`$`Value of the contribution`$contribution_information #> [1] 0.1013026 #> #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score` #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_bias #> [1] 30.87204 #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_noise #> [1] 13.07307 #> #> $`Control, Perfect Accuracy`$`Percentage of control group Brier score`$perfect_accuracy_percentage_contribution_information #> [1] 55.13582 ``` This output can be analyzed as before: * Parameter estimates: We show the posterior means of the parameters of interest and their differences. Beside each posterior mean are the standard deviation and the 2.5th, 25th, 50th, 75th, and 97,5th percentiles of the posterior distribution of the parameter. The values corresponding to the 2,5th and 97,5th percentiles correspond to the 95% (central) credible interval, which represents the range in which the true parameter value falls with 95% posterior probability. The credible interval differs from the classical 95% confidence interval in that it contains the true parameter value with 95% posterior probability. In the results above, for example, the posterior mean of the control group bias, `mu_0`, is -0.55, and the parameter lies between -0.7 and -0.39 with 95% probability. It is also worth noting that the values of the posterior means are comparable to the true values of the simulation environment, indicating that the parameters of the model are estimated accurately. * Under `$'Control, Perfect Accuracy'`, an analysis of the maximum achievable contribution is given. Transformed contributions for a hypothetical treatment that induces perfect accuracy (no bias, no noise, full information) are given with respect to the control group. These values can be seen as theoretical limits on improvement for a given component (bias, information or noise). As in the case of the Control vs. Treatment analysis, the summary includes the mean Brier scores of the control and perfect accuracy groups, the individual contributions of bias, noise, and information under a perfect accuracy scenario, and the percentage of the control group Brier score that each of these contributions represents. This shows the potential percentage improvements in accuracy to be gained from each BIN component. For instance, it shows that the control group can reduce their Brier score by 30.872% by removing all bias from their predictions. # 10: One group with one forecaster This section shows how the model can be applied to cases where there is a single forecaster. ```r # Not run: #Number of events N = 300 #Number of control group members N_0 = 1 #Number of treatment group members N_1 = 0 #Simulate the data DATA_1 = simulate_data(true_parameters, N, N_0, N_1) # Fit the BIN model full_bayesian_fit = estimate_BIN(DATA_1$Outcomes,DATA_1$Control, warmup = 2000, iter = 4000,seed=1) # Create Summary complete_summary(full_bayesian_fit) #End(Not run) ```
/scratch/gouwar.j/cran-all/cranData/BINtools/vignettes/BINtools_vignette.Rmd
#' Propagating above-ground biomass (AGB) or carbon (AGC) errors to the stand level #' #' Propagation of the errors throughout the steps needed to compute AGB or AGC. #' #' @param D Vector of tree diameters (in cm) #' @param WD Vector of wood density estimates (in g/cm3) #' @param errWD Vector of error associated to the wood density estimates (should be of the same size as `WD`) #' @param H (option 1) Vector of tree heights (in m). If set, `errH` must be set too. #' @param errH (if `H`) Residual standard error (RSE) of a model or vector of errors (sd values) associated to tree height #' values (in the latter case the vector should be of the same length as `H`). #' @param HDmodel (option 2) Model used to estimate tree height from tree diameter (output from [modelHD()], see example). #' @param coord (option 3) Coordinates of the site(s), either a vector giving a single site (e.g. c(longitude, latitude)) #' or a matrix/dataframe with two columns (e.g. cbind(longitude, latitude)). The coordinates are used to predict #' height-diameter allometry with bioclimatic variables. #' @param Dpropag This variable can take three kind of values, indicating how to propagate the errors on diameter measurements: #' a single numerical value or a vector of the same size as `D`, both representing the standard deviation associated #' with the diameter measurements or `"chave2004"` (an important error on 5 percent of the measures, a smaller error on #' 95 percent of the trees). #' @param n Number of iterations. Cannot be smaller than 50 or larger than 1000. By default `n = 1000` #' @param Carbon (logical) Whether or not the propagation should be done up to the carbon value (FALSE by default). #' @param Dlim (optional) Minimum diameter (in cm) for which above-ground biomass should be calculated (all diameter below #' `Dlim` will have a 0 value in the output). #' @param plot (optional) Plot ID, must be either one value, or a vector of the same length as D. This argument is used to build #' stand-specific HD models. #' #' @details See Rejou-Mechain et al. (2017) for all details on the error propagation procedure. #' #' @return Returns a list with (if Carbon is FALSE): #' - `meanAGB`: Mean stand AGB value following the error propagation #' - `medAGB`: Median stand AGB value following the error propagation #' - `sdAGB`: Standard deviation of the stand AGB value following the error propagation #' - `credibilityAGB`: Credibility interval at 95\% of the stand AGB value following the error propagation #' - `AGB_simu`: Matrix with the AGB of the trees (rows) times the n iterations (columns) #' #' @references Chave, J. et al. (2004). _Error propagation and scaling for tropical forest biomass estimates_. #' Philosophical Transactions of the Royal Society B: Biological Sciences, 359(1443), 409-420. #' @references Rejou-Mechain et al. (2017). #' _BIOMASS: An R Package for estimating above-ground biomass and its uncertainty in tropical forests_. #' Methods in Ecology and Evolution, 8 (9), 1163-1167. #' #' @author Maxime REJOU-MECHAIN, Bruno HERAULT, Camille PIPONIOT, Ariane TANGUY, Arthur PERE #' #' @examples #' # Load a database #' data(NouraguesHD) #' data(KarnatakaForest) #' #' # Modelling height-diameter relationship #' HDmodel <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, method = "log2") #' #' # Retrieving wood density values #' \donttest{ #' KarnatakaWD <- getWoodDensity(KarnatakaForest$genus, KarnatakaForest$species, #' stand = KarnatakaForest$plotId #' ) #' } #' #' # Propagating errors with a standard error in wood density in one plot #' filt <- KarnatakaForest$plotId == "BSP20" #' set.seed(10) #' \donttest{ #' resultMC <- AGBmonteCarlo( #' D = KarnatakaForest$D[filt], WD = KarnatakaWD$meanWD[filt], #' errWD = KarnatakaWD$sdWD[filt], HDmodel = HDmodel #' ) #' str(resultMC) #' } #' #' # If only the coordinates are available #' lat <- KarnatakaForest$lat[filt] #' long <- KarnatakaForest$long[filt] #' coord <- cbind(long, lat) #' \donttest{ #' resultMC <- AGBmonteCarlo( #' D = KarnatakaForest$D[filt], WD = KarnatakaWD$meanWD[filt], #' errWD = KarnatakaWD$sdWD[filt], coord = coord #' ) #' str(resultMC) #' } #' #' # Propagating errors with a standard error in wood density in all plots at once #' \donttest{ #' KarnatakaForest$meanWD <- KarnatakaWD$meanWD #' KarnatakaForest$sdWD <- KarnatakaWD$sdWD #' resultMC <- by( #' KarnatakaForest, KarnatakaForest$plotId, #' function(x) AGBmonteCarlo( #' D = x$D, WD = x$meanWD, errWD = x$sdWD, #' HDmodel = HDmodel, Dpropag = "chave2004" #' ) #' ) #' meanAGBperplot <- unlist(sapply(resultMC, "[", 1)) #' credperplot <- sapply(resultMC, "[", 4) #' } #' #' @keywords monte carlo #' @importFrom stats pnorm qnorm runif #' @export AGBmonteCarlo <- function(D, WD = NULL, errWD = NULL, H = NULL, errH = NULL, HDmodel = NULL, coord = NULL, Dpropag = NULL, n = 1000, Carbon = FALSE, Dlim = NULL, plot = NULL) { len <- length(D) # parameters verification ------------------------------------------------- if (n > 1000 | n < 50) { stop("n cannot be smaller than 50 or larger than 1000") } if (!is.null(Dpropag)) { if ((is.numeric(Dpropag) && !(length(Dpropag) %in% c(1, len)) || (!is.numeric(Dpropag) && tolower(Dpropag) != "chave2004"))) { stop('Dpropag should be set to one of these options: - "chave2004" - a single sd value that will be applied to all trees - a vector of sd values of the same length as D') } } if (is.null(WD) || is.null(errWD)) { stop("The WD and errWD arguments must be not NULL") } if (len != length(WD) || len != length(errWD)) { stop("One of vector WD or errWD does not have the same length as D") } if (is.null(HDmodel) & is.null(coord) & is.null(H)) { stop("Input missing, you need to provide one of the following arguments: - H - HDmodel - coord") } if ((!is.null(HDmodel) && !is.null(coord)) || (!is.null(HDmodel) && !is.null(H)) || (!is.null(coord) && !is.null(H))) { stop("Too many input, choose one input among those arguments: - H and Herr - HDmodel - coord") } if (!is.null(H)) { if (is.null(errH)) { stop("Cannot propagate height errors without information on associated errors (errH is null), if you do not want to propagate H errors please set errH to 0") } if (length(H) != len || !(length(errH) %in% c(1, len))) { stop("H must be the same length as D and errH must be either one value or the same length as D") } } if (!is.null(coord) && ((is.vector(coord) && length(coord) != 2) || (is.matrix(coord) && nrow(coord) != len))) { stop("coord should be either - a vector (e.g. c(longitude, latitude)) - a matrix with two columns (longitude and latitude) having the same number of rows as the number of trees (length(D))") } # the length of the plot is tested in predictHeight # the names of the plot and the names of the model is tested in predictHeight if (!is.null(plot) && is.null(HDmodel)) { stop("The 'plot' vector must be with 'model' argument") } # function truncated random gausien law ----------------------------------- myrtruncnorm <- function(n, lower = -1, upper = 1, mean = 0, sd = 1) { qnorm(runif(n, pnorm(lower, mean = mean, sd = sd), pnorm(upper, mean = mean, sd = sd)), mean = mean, sd = sd) } ### Propagate error with Markov Chain Monte Carlo approach # --------------------- D --------------------- if (!is.null(Dpropag)) { if (length(Dpropag) == 1 && tolower(Dpropag) == "chave2004") { # Propagation of the measurement error on D: based on Chave et al. 2004 (p.412) Phil. Trans. R. Soc. Lond. B. fivePercent <- round(len * 5 / 100) chaveError <- function(x, len) { ## Assigning large errors on 5% of the trees largeErrSample <- sample(len, fivePercent) D_sd <- 0.0062 * x + 0.0904 # Assigning small errors on the remaining 95% trees D_sd[largeErrSample] <- 4.64 x <- myrtruncnorm(n = len, mean = x, sd = D_sd, lower = 0.1, upper = 500) return(x) } D_simu <- suppressWarnings(replicate(n, chaveError(D, len))) } else { D_simu <- suppressWarnings(replicate(n, myrtruncnorm(len, mean = D, sd = Dpropag, lower = 0.1, upper = 500))) } } else { D_simu <- replicate(n, D) } # --------------------- WD --------------------- #### Below 0.08 and 1.39 are the minimum and the Maximum WD value from the global wood density database respectively WD_simu <- suppressWarnings(replicate(n, myrtruncnorm(n = len, mean = WD, sd = errWD, lower = 0.08, upper = 1.39))) # --------------------- H --------------------- # if there is data for H if (!is.null(HDmodel) | !is.null(H)) { if (!is.null(HDmodel)) { # Propagation of the error thanks to the local model of H H_simu <- apply(D_simu, 2, function(x) predictHeight(x, model = HDmodel, err = TRUE, plot = plot)) } else { # Propagation of the error using the errH value(s) upper <- max(H, na.rm = TRUE) + 15 H_simu <- suppressWarnings(replicate(n, myrtruncnorm(len, mean = H, sd = errH, lower = 1.3, upper = upper))) } # --------------------- AGB --------------------- param_4 <- BIOMASS::param_4 selec <- sample(1:nrow(param_4), n) RSE <- param_4[selec, "sd"] # Construct a matrix where each column contains random errors taken from N(0,RSEi) with i varying between 1 and n matRSE <- mapply(function(y) { rnorm(sd = y, n = len) }, y = RSE) # Posterior model parameters Ealpha <- param_4[selec, "intercept"] Ebeta <- param_4[selec, "logagbt"] # Propagation of the error using simulated parameters Comp <- t(log(WD_simu * H_simu * D_simu^2)) * Ebeta + Ealpha Comp <- t(Comp) + matRSE # Backtransformation AGB_simu <- exp(Comp) / 1000 } # --------------------- Coordinates --------------------- # If there is no data for H, but site coordinates if (!is.null(coord)) { if (is.null(dim(coord))) { coord <- as.matrix(t(coord)) } bioclimParams <- getBioclimParam(coord) # get bioclim variables corresponding to the coordinates if (nrow(bioclimParams) == 1) { bioclimParams <- bioclimParams[rep(1, len), ] } # Equ 7 # Log(agb) = -1.803 - 0.976 (0.178TS - 0.938CWD - 6.61PS) + 0.976log(WD) + 2.673log(D) -0.0299log(D2) param_7 <- BIOMASS::param_7 selec <- sample(1:nrow(param_7), n) # Posterior model parameters RSE <- param_7[selec, "sd"] # vector of simulated RSE values # Recalculating n E values based on posterior parameters associated with the bioclimatic variables Esim <- tcrossprod(as.matrix(param_7[selec, c("temp", "prec", "cwd")]), as.matrix(bioclimParams)) # Applying AGB formula over simulated matrices and vectors AGB_simu <- t(t(log(WD_simu)) * param_7[selec, "logwsg"] + t(log(D_simu)) * param_7[selec, "logdbh"] + t(log(D_simu)^2) * param_7[selec, "logdbh2"] + Esim * -param_7[selec, "E"] + param_7[selec, "intercept"]) # Construct a matrix where each column contains random errors taken from N(0,RSEi) with i varying between 1 and n matRSE <- mapply(function(y) { rnorm(sd = y, n = len) }, y = RSE) AGB_simu <- AGB_simu + matRSE AGB_simu <- exp(AGB_simu) / 1000 } if (!is.null(Dlim)) AGB_simu[D < Dlim, ] <- 0 AGB_simu[ which(is.infinite(AGB_simu)) ] <- NA if (Carbon == FALSE) { sum_AGB_simu <- colSums(AGB_simu, na.rm = TRUE) res <- list( meanAGB = mean(sum_AGB_simu), medAGB = median(sum_AGB_simu), sdAGB = sd(sum_AGB_simu), credibilityAGB = quantile(sum_AGB_simu, probs = c(0.025, 0.975)), AGB_simu = AGB_simu ) } else { # Biomass to carbon ratio calculated from Thomas and Martin 2012 forests data stored in DRYAD (tropical # angiosperm stems carbon content) AGC_simu <- AGB_simu * rnorm(mean = 47.13, sd = 2.06, n = n * len) / 100 sum_AGC_simu <- colSums(AGC_simu, na.rm = TRUE) res <- list( meanAGC = mean(sum_AGC_simu), medAGC = median(sum_AGC_simu), sdAGC = sd(sum_AGC_simu), credibilityAGC = quantile(sum_AGC_simu, probs = c(0.025, 0.975)), AGC_simu = AGC_simu ) } return(res) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/AGBmonteCarlo.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "subplot", "plot", "X", "Y" )) } #' Attribute trees to subplots #' #' Function to attribute the trees on each subplot, the trees that are at the exterior of the subplot will be marked as NA #' #' @param xy The coordinates of the trees for each plot #' @param plot The label of the plot (same length as the number of rows of `xy`) #' @param coordAbs Output of the function [cutPlot()] #' #' @return A vector with the code of the subplot for each trees, the code will be `plot_X_Y`. `X` and `Y` are the coordinate #' where the tree is inside the plot in regards to the corresponding subplot. #' @export #' @author Arthur PERE #' @importFrom data.table data.table setDT %between% setnames #' #' @examples #' #' # Trees relative coordinates #' xy <- data.frame(x = runif(200, min = 0, max = 200), y = runif(200, min = 0, max = 200)) #' #' #' # cut the plot in multiple part #' coord <- data.frame(X = rep(c(0, 200, 0, 200), 2), Y = rep(c(0, 0, 200, 200), 2)) #' coord[1:4, ] <- coord[1:4, ] + 5000 #' coord[5:8, ] <- coord[5:8, ] + 6000 #' corner <- rep(c(1, 2, 4, 3), 2) #' plot <- rep(c("plot1", "plot2"), each = 4) #' #' cut <- cutPlot(coord, plot, corner, gridsize = 100, dimX = 200, dimY = 200) #' #' #' # Assign a plot to 200 trees #' plot <- rep(c("plot1", "plot2"), 100) #' #' # attribute trees to subplots #' attributeTree(xy, plot, cut) attributeTree <- function(xy, plot, coordAbs) { # parameters verification ------------------------------------------------- if (!is.data.frame(xy)) { xy <- data.frame(xy) } if (nrow(xy) != length(plot)) { stop("Your plot vector have not the same length with the number of row of xy") } if (!is.data.frame(coordAbs)) { stop("Your parameter 'CoordAbs' is not a data frame") } Coord <- data.table(xy, plot = plot) setnames(Coord, colnames(Coord), c("X", "Y", "plot")) Coord[, order := .I] setDT(coordAbs) # Attribute the trees to the subplot invisible(lapply(split(coordAbs, by = "subplot", keep.by = TRUE), function(x) { Coord[ plot == x$plot[1] & X %between% range(x$XRel) & Y %between% range(x$YRel), subplot := x$subplot[1] ] })) if (anyNA(Coord[, subplot])) { warning("There is trees which are not assigned in a subplot") } return(Coord[order(order), subplot]) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/attributeTree.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "plot", "X", "Y", ".BY", "Xproj", "Yproj", "XRel", "YRel" )) } #' Attribute trees to GPS coordinates #' #' @param xy The relative coordinates of the trees within each plot #' @param plot The label of the plot (same length as the number of rows of `xy` or length of 1) #' @param dim The dimension of the plot (either one value if the plot is a square or a vector if a rectangle) #' @param coordAbs The result of the function [cutPlot()] or [numberCorner()] #' #' @return A data frame with two columns: #' - `Xproj`: The `X` coordinates in the absolute coordinate system #' - `Yproj`: The `Y` coordinates in the absolute coordinate system #' @export #' #' @importFrom data.table setDT setnames #' #' @examples #' #' # Trees relative coordinates #' xy <- data.frame(x = runif(200, min = 0, max = 200), y = runif(200, min = 0, max = 200)) #' #' #' # cut the plot in multiple part #' coord <- data.frame(X = rep(c(0, 200, 0, 200), 2), Y = rep(c(0, 0, 200, 200), 2)) #' coord[1:4, ] <- coord[1:4, ] + 5000 #' coord[5:8, ] <- coord[5:8, ] + 6000 #' corner <- rep(c(1, 2, 4, 3), 2) #' Forestplot <- rep(c("plot1", "plot2"), each = 4) #' #' Outcut <- cutPlot(coord, Forestplot, corner, gridsize = 100, dimX = 200, dimY = 200) #' #' #' # Assign a plot to 200 trees #' Forestplot <- rep(c("plot1", "plot2"), 100) #' #' # attribute trees to subplots #' attributeTreeCoord(xy, Forestplot, dim =100,coordAbs = Outcut) attributeTreeCoord <- function(xy, plot, dim, coordAbs) { # parameters verification ------------------------------------------------- setDT(coordAbs) setnames(coordAbs, c("XAbs", "YAbs"), c("X", "Y"), skip_absent = TRUE) if (!length(plot) %in% c(1, nrow(xy))) { stop("The 'plot' vector must have a length equal to 1 or nrow(xy)") } if (!all(c("plot", "corner", "X", "Y") %in% names(coordAbs))) { stop("The column 'plot', 'corner', 'X' (or 'XAbs'), 'Y' (or 'YAbs') are compulsory for the data frame 'coordAbs'") } if (!all(unique(plot) %in% unique(coordAbs$plot))) { stop("Some plots in the vector 'plot' are absent from the data frame coordAbs") } if (!length(dim) %in% c(1, 2)) { stop("Incorrect dimension vector, must be length of 1 or 2") } # put the dimension on the X and Y if (length(dim) == 1) { dimX <- dim dimY <- dim } else { dimX <- dim[1] dimY <- dim[2] } # function ---------------------------------------------------------------- xy <- data.table(plot, xy) setnames(xy, names(xy), c("plot", "X", "Y")) xy[, order := .I] if ("subplot" %in% names(coordAbs)) { # if we have subplot out <- rbindlist(lapply( split(coordAbs, by = "plot", keep.by = TRUE), function(subData) { res <- procrust(subData[, .(X, Y)], subData[, .(XRel, YRel)]) subDataTree <- as.matrix(xy[ plot == unique(subData$plot), .(X, Y) ]) subDataTree <- subDataTree %*% res$rotation subDataTree <- sweep(subDataTree, 2, res$translation, FUN = "+") return(list(Xproj = subDataTree[, 1], Yproj = subDataTree[, 2], order = xy[ plot == unique(subData$plot), order ])) } )) } else { xy[, ":="(X = X / dimX, Y = Y / dimY)] # divide all the coordinates by the dimension out <- rbindlist(lapply(split(coordAbs, by = "plot"), function(subData) { XY = xy[ plot == unique(subData$plot), .(X, Y, order) ] out = lapply(c("X", "Y"), function(col) { XY[, (1 - Y) * (1 - X) * subData[corner == 1, eval(parse(text = col))] + X * (1 - Y) * subData[corner == 2, eval(parse(text = col))] + Y * X * subData[corner == 3, eval(parse(text = col))] + Y * (1 - X) * subData[corner == 4, eval(parse(text = col))] ] }) return(list(Xproj = out[[1]], Yproj = out[[2]], order = XY[, order])) })) } return(as.data.frame(out[order(order), .(Xproj, Yproj)])) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/attributeTreeCoord.R
#' Function that return a possibly cached file, transparently downloading it if missing #' #' @section Localisation: #' Cache path discovery protocol #' 1. BIOMASS.cache option set to an **existing** folder #' 2. **existing** user data folder [rappdirs::user_data_dir()] #' - On Linux : `~/.local/share/R/BIOMASS` #' - On Mac OS X : `~/Library/Application Support/R/BIOMASS` #' - On Windows 7 up to 10 : `C:\\Users\\<username>\\AppData\\Local\\R\\BIOMASS` #' - On Windows XP : `C:\\Documents and Settings\\<username>\\Data\\R\\BIOMASS` #' 3. fallback to R session tempdir #' #' @param nameFile character. file to resolve cached path. #' @return file path of the resolved cached file. #' @importFrom utils download.file unzip #' @export cacheManager <- function(nameFile) { if(length(nameFile)>1) { stop("Only one file at a time please!") } if (nameFile == "correctTaxo.log") { return(cachePath("correctTaxo.log")) } if (nameFile == "feldRegion.grd") { return(system.file("external", "feldRegion.grd", package = "BIOMASS", mustWork = TRUE)) } url <- list( E.bil = "https://github.com/umr-amap/BIOMASS/raw/master/data-raw/climate_variable/E.zip", CWD.bil = "https://github.com/umr-amap/BIOMASS/raw/master/data-raw/climate_variable/CWD.zip", bio4.bil = "https://github.com/umr-amap/BIOMASS/raw/master/data-raw/climate_variable/wc2-5.zip", bio15.bil = "https://github.com/umr-amap/BIOMASS/raw/master/data-raw/climate_variable/wc2-5.zip" ) url <- url[[nameFile]] if(is.null(url)) { stop( "I don't know how to get this file!", "\n ", nameFile ) } req <- httr2::request(url) req <- httr2::req_error(req, function(response) FALSE) qryResult <- httr2::req_perform(req) if (httr2::resp_is_error(qryResult)) { message("There appears to be a problem reaching the directory.") return(invisible(NULL)) } if(!file.exists(cachePath(nameFile))) { dest <- tempfile(fileext = ".zip") on.exit(unlink(dest)) download.file(url, dest) unzip(dest, exdir = cachePath()) } if(!file.exists(cachePath(nameFile))) { stop("Error while retrieving file ", nameFile) } cachePath(nameFile) } #' Function used to build a file path based on a cache folder #' #' Parameters are similar to that of file.path function #' #' @inheritSection cacheManager Localisation #' #' @param ... character vectors. Elements of the subpath of cache path #' @return A character vector of normalized file path with a source attribute #' holding a hint to cache path source ("option", "data", "temp") #' @export cachePath <- function(...) { # user defined path basePath <- getOption("BIOMASS.cache") src <- "option" # if no user defined path, use subdir of user data dir if(is.null(basePath)) { # appauthor defaults to appname on windows resulting in duplicated sub path basePath <- rappdirs::user_data_dir("R/BIOMASS", appauthor=NULL) src <- "data" } # if user defined or user data path does not exist # fallback to R session temporary folder if(!dir.exists(basePath)) { basePath <- file.path(tempdir(check=TRUE), "BIOMASS") src <- "temp" } # return path built from base cache path and given subdir (...) # and how it was built structure( normalizePath(do.call(file.path, c(basePath, list(...))), mustWork = FALSE), source = src ) } #' Function used to create or activate a permanent cache. #' #' Permanent cache is located by default in user data dir. #' #' You can provide a custom path (that will be defined as a BIOMASS.cache option) #' but clearCache function will refuse to operate on it for security reasons. #' @param path Use a custom path to host cache #' @return No return value, called for side effects #' @export createCache <- function(path=NULL) { if(is.null(path)) { path <- rappdirs::user_data_dir("R/BIOMASS", NULL) } else { options(BIOMASS.cache=path) message("options(BIOMASS.cache=\"", path, "\")") } dir.create(path, showWarnings = FALSE, recursive = TRUE) invisible(NULL) } #' Function to clear cache content and possibly remove it #' #' It will refuse to clear or remove a custom cache folder set using BIOMASS.cache #' option as we don't know whether this folder contains other possibly valuable #' files apart from our cached files. #' #' @param remove logical. If TRUE cache folder will be removed too (not only content) #' resulting in deactivating cache as a side effect #' @return No return value, called for side effects #' @importFrom utils askYesNo clearCache <- function(remove=FALSE) { basePath <- cachePath() # temporary folder will be removed automatically at the end of the session if(attr(basePath, "source")=="temp") { return() } # prevent clearing custom path if(attr(basePath, "source")=="option") { if(remove) { options(BIOMASS.cache=NULL) } stop( "Custom cache path defined using BIOMASS.cache option must be removed manually!", "\n ", basePath, if(remove) "\n Resetting option BIOMASS.cache to simulate deactivation" ) } # Ask for confirmation if(askYesNo(paste( "Warning! This will permanently remove files from\n", basePath, "\n\n Continue?"), FALSE)) { if(remove) { unlink(basePath, recursive = TRUE) } else { unlink(list.files(basePath, include.dirs = TRUE, full.names = TRUE), recursive = TRUE) } } invisible(NULL) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/cacheManager.R
#' Computing tree above-ground biomass (AGB) #' #' This function uses Chave et al. 2014's pantropical models to estimate the above-ground biomass of tropical trees. #' #' @param D Tree diameter (in cm), either a vector or a single value. #' @param WD Wood density (in g/cm3), either a vector or a single value. If not available, see [getWoodDensity()]. #' @param H (optional) Tree height (H in m), either a vector or a single value. If not available, see [retrieveH()] #' and [modelHD()]. Compulsory if the coordinates `coord` are not given. #' @param coord (optional) Coordinates of the site(s), either a vector giving a single site #' (e.g. c(longitude, latitude)) or a matrix/dataframe with two columns (e.g. cbind(longitude, latitude)). #' The coordinates are used to account for variation in height-diameter relationship thanks to an environmental #' proxy (parameter E in Chave et al. 2014). Compulsory if tree heights `H` are not given. #' @param Dlim (optional) Minimum diameter (in cm) for which aboveground biomass should be calculated #' (all diameter below `Dlim` will have a 0 value in the output). #' #' @details #' This function uses two different ways of computing the above-ground biomass of a tree: #' #' 1) If tree height data are available, the AGB is computed thanks to the following equation (Eq. 4 in Chave et al., 2014): #' \deqn{AGB = 0.0673 * (WD * H * D^2)^0.976} #' #' 2) If no tree height data is available, the AGB is computed thanks to the site coordinates with the following equation, slightly modified from Eq. 7 in Chave et al., 2014 (see Réjou-Méchain et al. 2017): #' \deqn{AGB = exp(-2.024- 0.896*E + 0.920*log(WD) + 2.795*log(D) - 0.0461*(log(D)^2))} where `E` is a measure of environmental stress estimated from the site coordinates (`coord`). #' #' @return The function returns the AGB in Mg (or ton) as a single value or a vector. #' @export #' @references #' Chave et al. (2014) _Improved allometric models to estimate the aboveground biomass of tropical trees_, #' Global Change Biology, 20 (10), 3177-3190 #' @author Maxime REJOU-MECHAIN, Ariane TANGUY, Arthur PERE #' @seealso [computeE()] #' @examples #' # Create variables #' D <- 10:99 #' WD <- runif(length(D), min = 0.1, max = 1) #' H <- D^(2 / 3) #' #' # If you have height data #' AGB <- computeAGB(D, WD, H) #' #' # If you do not have height data and a single site #' lat <- 4.08 #' long <- -52.68 #' coord <- c(long, lat) #' \donttest{ #' AGB <- computeAGB(D, WD, coord = coord) #' } #' #' # If you do not have height data and several sites (here three) #' lat <- c(rep(4.08, 30), rep(3.98, 30), rep(4.12, 30)) #' long <- c(rep(-52.68, 30), rep(-53.12, 30), rep(-53.29, 30)) #' coord <- cbind(long, lat) #' \donttest{ #' AGB <- computeAGB(D, WD, coord = coord) #' } #' #' @keywords AGB above-ground biomass forest carbon allometry computeAGB <- function(D, WD, H = NULL, coord = NULL, Dlim = NULL) { # Parameters verification ------------------------------------------------- if (length(D) != length(WD)) { stop("D and WD have different lenghts") } if (!is.null(H)) { if (length(D) != length(H)) { stop("H and WD have different length") } if (anyNA(D)) { warning("NA values in D") } if (anyNA(H) & !anyNA(D)) { warning("There is some NA values in given heights. For those trees the function will return NA AGB, you may construct a height-diameter model to overcome that issue (see ?HDFunction and ?retrieveH)") } if (!is.null(coord)) { stop("Both height and coordinates are providen.") } } if (!is.null(coord) && ((is.vector(coord) && length(coord) != 2) || (is.matrix(coord) && nrow(coord) != length(D)))) { stop("coord should be either - a vector (e.g. c(longitude, latitude)) - a matrix with two columns (longitude and latitude) having the same number of rows as the number of trees (length(D))") } if (is.null(H) && is.null(coord)) { stop("You need to provide either H or coord") } # Compute the AGB --------------------------------------------------------- # If there is height data for all the trees if (!is.null(H)) { AGB <- (0.0673 * (WD * H * D^2)^0.976) / 1000 # Eq 4 from Chave et al. 2014 Global change biology } else { # If there is no heigth, but the coordinates : if (is.null(dim(coord))) { coord <- as.matrix(t(coord)) } E <- computeE(coord) # environmental index in Chave et al. 2014 # Modified Eq 7 from Chave et al. 2014 Global change biology AGB <- exp(-2.023977 - 0.89563505 * E + 0.92023559 * log(WD) + 2.79495823 * log(D) - 0.04606298 * (log(D)^2)) / 1000 } if (!is.null(Dlim)) AGB[D < Dlim] <- 0 return(AGB) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/computeAGB.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "RASTval", "long", "lat", "slice", "i.RASTval" )) } #' Retrieving Chave's environmental index #' #' Extract the Chave et al. 2014's environmental index thanks to the coordinates of the data. #' The function is time-consuming at its first use as it downloads a raster in a folder (see Details). #' However, as soon as the raster is downloaded once, the function then runs fast. #' #' #' #' @param coord Coordinates of the site(s), a matrix/dataframe with two columns (e.g. cbind(longitude, latitude)) (see examples). #' #' #' @inheritSection cacheManager Localisation #' #' @details #' The Chave's environmental index, `E`, has been shown to be an important covariable in #' the diameter-height relationship for tropical trees. It is calculated as: #' \deqn{E = 1.e-3 * (0.178 * TS - 0.938 * CWD - 6.61 * PS)} #' where `TS` is temperature seasonality as defined in the Worldclim dataset (bioclimatic variable 4), #' `CWD` is the climatic water deficit (in mm/yr, see Chave et al. 2014) and `PS` is the #' precipitation seasonality as defined in the Worldclim dataset (bioclimatic variable 15). #' #' #' The E index is extracted from a raster file (2.5 arc-second resolution, or ca. 5 km) available #' at http://chave.ups-tlse.fr/pantropical_allometry.htm #' #' @return The function returns `E`, the environmental index computed thanks to the Chave et al 2014's formula as a single value or a vector. #' @references #' Chave et al. (2014) _Improved allometric models to estimate the aboveground biomass of tropical trees_, #' Global Change Biology, 20 (10), 3177-3190 #' @author Jerome CHAVE, Maxime REJOU-MECHAIN, Ariane TANGUY, Arthur PERE #' #' @export #' @keywords environmental index internal #' @examples #' # One study site #' lat <- 4.08 #' long <- -52.68 #' coord <- cbind(long, lat) #' \donttest{ #' E <- computeE(coord) #' } #' #' # Several study sites (here three sites) #' long <- c(-52.68, -51.12, -53.11) #' lat <- c(4.08, 3.98, 4.12) #' coord <- cbind(long, lat) #' \donttest{ #' E <- computeE(coord) #' } #' #' @importFrom terra rast extract buffer vect #' @importFrom data.table as.data.table computeE <- function(coord) { RAST <- rast(cacheManager("E.bil")) if (is.vector(coord)) { return(extract(RAST, matrix(coord, ncol = 2), method = "bilinear")$E) } # set the coord in a data.table coord <- as.data.table(coord) setnames(coord, colnames(coord), c("long", "lat")) # coord_unique <- unique(coord) coord_unique <- na.omit(coord_unique) # Extract the raster value coord_unique[, RASTval := extract(RAST, coord_unique, method = "bilinear")$E] # search around the point if there is an NA in the RASTval r <- 0 i <- 1 while (anyNA(coord_unique$RASTval)) { r <- r + 5000 poly_buffer <- buffer(x = vect(coord_unique[is.na(RASTval), cbind(long, lat)], crs = "+proj=longlat"), width = r) coord_unique[is.na(RASTval), RASTval := extract(RAST, poly_buffer, fun = mean, method = "bilinear", na.rm = TRUE)$E] if (i > 8) { coord[coord_unique, on = c("long", "lat"), RASTval := i.RASTval] stop( "The coordinate n ", paste(which(is.na(coord$RASTval)), collapse = " "), " are too far for first non-NA value in the raster" ) } i <- i + 1 } return(coord[coord_unique, on = c("long", "lat"), RASTval]) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/computeE.R
#' Retrieving Feldpausch regions #' #' Extract the Feldpausch et al. (2012)'s regions using local coordinates. #' #' @inheritParams computeE #' @param level a string or a vector of string, the length must match the number of rows of the parameter coord. #' This parameter gives the scale at which Feldpausch regions should be assigned. There are tree levels: #' - `region`: Models assign at sub-continent levels, value by default #' - `continent`: Models assign at the Africa, South America, Asia and Australia levels #' - `world`: Pantropical model #' #' @return The function returns a vector with the Feldpausch et al. (2012)'s regions that can be #' incorporated in the `retrieveH` function. #' @export #' #' @examples #' #' # One study site #' lat <- 4.08 #' long <- -52.68 #' coord <- cbind(long, lat) #' \donttest{ #' FeldRegion <- computeFeldRegion(coord) #' } #' #' # Several study sites (here three sites) #' long <- c(-52.68, -51.12, -53.11) #' lat <- c(4.08, 3.98, 4.12) #' coord <- cbind(long, lat) #' \donttest{ #' FeldRegion <- computeFeldRegion(coord) #' } #' #' @references #' Feldpausch, T.R., et al. (2012). _Tree height integrated into pantropical forest biomass estimates._ #' Biogeosciences, 9, 3381–3403. #' #' @author Arthur PERE #' #' @importFrom terra rast extract computeFeldRegion <- function(coord, level = c("region")) { # Parameter verification -------------------------------------------------- #level <- match.arg(level) if (!(length(level) %in% c(1, nrow(coord)))) { stop("The vector region must be a length of 1 or the number of rows of your coord parameter") } if (!all(grepl("(^world$)|(^region$)|(^continent$)", tolower(level)))) { stop("The level parameter must be one of this tree levels: 'region', 'continent' or 'world'") } # if the user have the level set on world if (all(level == "world")) { return(rep("Pantropical", nrow(coord))) } # raster ------------------------------------------------------------------ RAST <- rast(cacheManager("feldRegion.grd")) # Extract the raster value RASTval <- extract(RAST, coord) FeldRegion <- as.character(RASTval$Region) level <- tolower(level) # if (all(level == "region")) { # return(FeldRegion) # } # level different from world and region ------------------------------------- # if the user choose to take a level if (length(level) == 1) { level <- rep(level, length(FeldRegion)) } # Replace the world level by Pantropical FeldRegion[ level == "world" ] <- "Pantropical" # Replace the continent level by different value: FeldRegion[ level == "continent" ] <- sub( pattern = ".+(Africa)", replacement = "Africa", FeldRegion[ level == "continent" ] ) FeldRegion[ level == "continent" ] <- sub( pattern = ".+(Amazonia|Shield)", replacement = "SAmerica", FeldRegion[ level == "continent" ] ) if (anyNA(FeldRegion)) { warning("There is NA in your final vector, those NA will be replaced by 'Pantropical'") } FeldRegion[is.na(FeldRegion)] <- "Pantropical" return(FeldRegion) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/computeFeldRegion.R
#' Correct the GPS coordinates #' #' @description #' This function builds the most probable GPS coordinates of the plot corners from multiple GPS measurements. #' #' @details #' GPS coordinates should be either given in longitude latitude (longlat) or in projected coordinates (projCoord) #' #' #' @param longlat (optional) data frame with the coordinate in longitude latitude (eg. cbind(longitude, latitude)). #' @param projCoord (optional) data frame with the projected coordinate in X Y #' @param coordRel data frame with the relative coordinate in the same order than the longlat or projCoord #' @param rangeX a vector of length 2 giving the range for plot relative X coordinates #' @param rangeY a vector of length 2 giving the range for plot relative Y coordinates #' @param maxDist a numeric giving the maximum distance above which GPS measurements should be considered as outliers (by default 15 m) #' @param drawPlot a logical if you want to display a graphical representation #' @param rmOutliers a logical if you want to remove the outliers from coordinates calculation #' #' @author Arthur PERE, Maxime REJOU-MECHAIN #' #' @return If there are no outliers or rmOutliers = TRUE, a list with: #' - `cornerCoords`: a data.frame with the coordinates of the corners #' - `correctedCoord`: a data.frame with the adjusted coordinates given as input #' - `polygon`: a spatial polygon #' - `outliers`: index of coordinates lines considered as outliers, if any #' - `codeUTM`: the UTM code of the coordinates if the parameter `longlat` is set #' #' #' #' @export #' #' @importFrom data.table between #' @importFrom sf st_multipoint st_polygon st_sfc #' @importFrom graphics points #' #' @author Arthur PERE, Maxime REJOU-MECHAIN #' #' @examples #' projCoord <- data.frame( #' X = c( #' runif(5, min = 9, max = 11), runif(5, min = 8, max = 12), #' runif(5, min = 80, max = 120), runif(5, min = 90, max = 110) #' ), #' Y = c( #' runif(5, min = 9, max = 11), runif(5, min = 80, max = 120), #' runif(5, min = 8, max = 12), runif(5, min = 90, max = 110) #' ) #' ) #' projCoord <- projCoord + 1000 #' coordRel <- data.frame( #' X = c(rep(0, 10), rep(100, 10)), #' Y = c(rep(c(rep(0, 5), rep(100, 5)), 2)) #' ) #' #' aa <- correctCoordGPS( #' projCoord = projCoord, coordRel = coordRel, #' rangeX = c(0, 100), rangeY = c(0, 100) #' ) #' bb <- correctCoordGPS( #' projCoord = projCoord, coordRel = coordRel, #' rangeX = c(0, 100), rangeY = c(0, 100), rmOutliers = TRUE #' ) #' \donttest{ #' correctCoordGPS( #' projCoord = projCoord, coordRel = coordRel, #' rangeX = c(0, 100), rangeY = c(0, 100), drawPlot = TRUE #' ) #' } #' correctCoordGPS <- function(longlat = NULL, projCoord = NULL, coordRel, rangeX, rangeY, maxDist = 15, drawPlot = FALSE, rmOutliers = TRUE) { # To maintain user's original options oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) # parameters verification ------------------------------------------------- if (is.null(longlat) && is.null(projCoord)) { stop("Give at least one set of coordinates: longlat or projCoord") } if (!is.null(longlat) && !is.null(projCoord)) { stop("Give only one set of coordinates: longlat or projCoord") } if (length(rangeX) != 2 || length(rangeY) != 2) { stop("The rangeX and/or rangeY must be of length 2") } if (length(maxDist) != 1) { stop("Your argument maxDist must be of length 1") } if (!all(between(coordRel[, 1], lower = rangeX[1], upper = rangeX[2]) & between(coordRel[, 2], lower = rangeY[1], upper = rangeY[2]))) { stop("coordRel must be inside the X and Y ranges") } if ((!is.null(longlat) && any(dim(longlat) != dim(coordRel))) || (!is.null(projCoord) && any(dim(projCoord) != dim(coordRel)))) { stop("GPS and relative coordinates are not of the same dimension") } # function ---------------------------------------------------------------- # Transform the geographic coordinates into UTM coordinates if (!is.null(longlat)) { projCoord <- latlong2UTM(longlat) codeUTM <- unique(projCoord[, "codeUTM"]) projCoord <- projCoord[, c("X", "Y")] } # Transformation CoordRel to CoordAbs res <- procrust(projCoord, coordRel) coordAbs <- as.matrix(coordRel) %*% res$rotation coordAbs <- sweep(coordAbs, 2, res$translation, FUN = "+") # Calculate the distances between the GNSS measurements and the CoordAbs dist <- sqrt((coordAbs[, 1] - projCoord[, 1])^2 + (coordAbs[, 2] - projCoord[, 2])^2) outliers <- which(dist > maxDist) if (length(outliers)==nrow(projCoord)){ stop("All coordinates points are considered as outliers at the first stage.\n This may be because some coordinates have very large error associated.\n Try to remove these very large error or reconsider the maxDist parameter by increasing the distance") } # retransform the coordRel without the outliers if (rmOutliers & length(outliers)>0) { refineCoord <- TRUE while(refineCoord){ res <- procrust(projCoord[-outliers, ], coordRel[-outliers,]) coordAbs <- as.matrix(coordRel) %*% res$rotation coordAbs <- sweep(coordAbs, 2, res$translation, FUN = "+") newdist <- sqrt((coordAbs[, 1] - projCoord[, 1])^2 + (coordAbs[, 2] - projCoord[, 2])^2) if(all(which(newdist > maxDist)==outliers)) refineCoord <- FALSE outliers <- which(newdist > maxDist) } } # Create the matrix of corners to return the projected coordinate of the corner of the plot cornerCoord <- as.matrix(expand.grid(X = sort(rangeX), Y = sort(rangeY))) # switch between the lines 3 and 4 because the corner 3 and 4 are invert in the expand.grid cornerCoord <- cornerCoord[c(1, 2, 4, 3), ] # Project the corner matrix in the projected coordinate cornerCoord <- as.matrix(cornerCoord) %*% res$rotation cornerCoord <- sweep(cornerCoord, 2, res$translation, FUN = "+") # Create a polygon p <- st_multipoint(rbind(cornerCoord, cornerCoord[1, ])) ps <- st_polygon(list(p), 1) sps <- st_sfc(list(ps)) # draw plot --------------------------------------------------------------- if (drawPlot) { par(xpd = TRUE, mar = par("mar") + c(0, 0, 0, 7.5)) plot(if(length(outliers)==0) projCoord else projCoord[-outliers, ], col = "grey30", main = "Plot drawing", xlim = range(projCoord[, 1], coordAbs[, 1]), ylim = range(projCoord[, 2], coordAbs[, 2]), asp = 1, xlab = "X", ylab = "Y", axes = FALSE, frame.plot = FALSE ) usr <- par("usr") grid <- sapply(par(c("xaxp", "yaxp")), function(x) { seq(x[1], x[2], length.out = x[3] + 1) }, simplify = FALSE) # draw the grid segments(x0 = grid$xaxp, y0 = usr[3], y1 = usr[4], col = "grey80", lty = 1) segments(y0 = grid$yaxp, x0 = usr[1], x1 = usr[2], col = "grey80", lty = 1) # draw the axis axis(side = 1, lty = "blank", las = 1) axis(side = 2, lty = "blank", las = 1) plot(sps, add = TRUE, lwd = 3) points(coordAbs, col = "black", pch = 15, cex = 1.3) if(length(outliers)>0) points(projCoord[outliers, ], col = "red", pch = 4, cex = 1) legend( x = usr[2], y = grid$yaxp[length(grid$yaxp) - 1], c("GPS measurements", ifelse(rmOutliers, "Outliers (discarded)", "Outliers"), "Corrected coord"), col = c("grey30", "red", "black"), pch = c(1, 4, 15, 49), bg = "grey90" ) par(xpd = NA, mar = c(5, 4, 4, 2) + 0.1) } # return ------------------------------------------------------------------ if (length(outliers) != 0 & !rmOutliers) { warning( "Be carefull, you may have GNSS measurement outliers. \n", "Removing them may improve the georeferencing of your plot (see the rmOutliers argument)." ) } output <- list( cornerCoords = data.frame(X = cornerCoord[, 1], Y = cornerCoord[, 2]), correctedCoord=data.frame(X = coordAbs[, 1], Y = coordAbs[, 2]), polygon = sps, outliers = outliers ) if (!is.null(longlat)) { output$codeUTM <- codeUTM } return(output) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/correctCoordGPS.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "query", "from", "submittedName", "nameSubmitted","slice", ".I", "..score", "matchedName", "outName", "nameModified", "scientificScore", "genusCorrected", "speciesCorrected", "acceptedName", "nameScientific", "Name_submitted", "Overall_score", "Name_matched", "Accepted_name", ".N", "." )) } ##%######################################################%## # # #### ' Checking typos in names #### # # ##%######################################################%## #' #' This function corrects typos for a given taxonomic name using the Taxonomic Name Resolution Service (TNRS). #' #' #' @details #' This function create a file named correctTaxo.log (see Localisation), this file have the memory of all the previous requests, as #' to avoid the replication of time-consuming server requests. #' #' By default, names are queried in batches of 500, with a 0.5s delay between each query. These values can be modified using options: #' `options(BIOMASS.batch_size=500)` for batch size (max 1000), `options(BIOMASS.wait_delay=0.5)` for delay (in seconds). #' #' #' @inheritSection cacheManager Localisation #' #' #' @param genus Vector of genera to be checked. Alternatively, the whole species name (genus + species) #' or (genus + species + author) may be given (see example). #' @param species (optional) Vector of species to be checked (same size as the genus vector). #' @param score Score of the matching ( see https://tnrs.biendata.org/instructions ) below which corrections are discarded. #' @param useCache logical. Whether or not use a cache to reduce online search of taxa names (NULL means use cache but clear it first) #' @param verbose logical. If TRUE various messages are displayed during process #' @param accepted logical. If TRUE accepted names will be returned instead of matched names. Cache will not be used as synonymy changes over time. #' #' @return The function returns a dataframe with the corrected (or not) genera and species. #' #' @references Boyle, B. et al. (2013). #' _The taxonomic name resolution service: An online tool for automated standardization of plant names_. BMC bioinformatics, 14, 1. doi:10.1186/1471-2105-14-16 #' #' @author Ariane TANGUY, Arthur PERE, Maxime REJOU-MECHAIN, Guillaume CORNU #' #' @examples #' \donttest{ #' correctTaxo(genus = "Astrocarium", species = "standleanum") #' correctTaxo(genus = "Astrocarium standleanum") #' } #' #' @export #' @importFrom data.table tstrsplit := data.table setkey chmatch fread fwrite setDF setDT rbindlist #' @importFrom rappdirs user_data_dir #' @importFrom jsonlite fromJSON toJSON #' @importFrom utils head #' correctTaxo <- function(genus, species = NULL, score = 0.5, useCache = FALSE, verbose = TRUE, accepted=FALSE) { # Check if package httr2 is available if (!requireNamespace("httr2", quietly = TRUE)) { warning( 'To use this function, you must install the "httr2" library \n\n', '\t\tinstall.packages("httr2")' ) return(invisible(NULL)) } # check parameters ------------------------------------------------- WAIT_DELAY <- getOption("BIOMASS.wait_delay", 0.5) # delay between requests to tnrs (to reduce load on server) BATCH_SIZE <- min(getOption("BIOMASS.batch_size", 500), 1000) # number of taxa sought per request to tnrs (max 1000) if (is.logical(useCache) && !useCache) { message("Using useCache=TRUE is recommended to reduce online search time for the next query") } if (all(is.na(genus))) { stop("Please supply at least one name for genus") } if (!is.null(species)) { if (all(is.na(species))) { stop("Please supply at least one name for species") } if (length(genus) != length(species)) { stop("You should provide two vectors of genera and species of the same length") } species[is.na(genus)] <- NA } if(accepted && !is.null(useCache) && useCache) { warning("Cache cannot be used if accepted names are required! I will ignore it") useCache <- FALSE } checkURL <- function(url) { tryCatch( { req <- httr2::request(url) req <- httr2::req_method(req, "HEAD") httr2::req_perform(req) TRUE }, error = function(e) { FALSE } ) } if(!checkURL("https://tnrsapi.xyz")) { warning("Sorry there is no internet connexion or the tnrs site is unreachable!", call. = FALSE, immediate. = TRUE) return(invisible(NULL)) } # sub-function definition ------------------------------------------------- # split x always returning count columns (padding with NA) tstrsplit_NA <- function(x, pattern = " ", count = 2) { # NOTE extraneous columns ignored maybe better paste them together split <- utils::head(tstrsplit(x, pattern), count) # pad with NA if (length(split) < count) { split <- c(split, rep(NA_character_, count - length(split))) } split } # Data preparation -------------------------------------------------------- genus <- as.character(genus) if (is.null(species)) { # Create a dataframe with the original values userTaxo <- data.table( genus = NA_character_, species = NA_character_, query = genus ) # split genus (query) userTaxo[, c("genus", "species") := tstrsplit_NA(query)] } else { species <- as.character(species) # Create a dataframe with the original values userTaxo <- data.table( genus = genus, species = species, query = genus ) # species can be NA so handle it with care when pasting userTaxo[!is.na(genus) & !is.na(species), query := paste(query, species)] } # If there is an empty genus userTaxo[genus == "", ":="(genus = NA_character_, species = NA_character_, query = NA_character_)] # If there is empty species userTaxo[species == "", ":="(species = NA_character_, query = gsub(" ", "", query))] # get unique values qryTaxo <- unique(userTaxo[!is.na(query)]) # get cached taxonomic corrections if needed ------------------------------------------------- cachedTaxo <- NULL if (is.null(useCache) || useCache) { cachePath <- cacheManager("correctTaxo.log") if (file.exists(cachePath)) { # should we remove cache ? if (is.null(useCache)) { file.remove(cachePath) useCache <- TRUE } else { if (verbose) { message("Cache last modification time : ", as.character.POSIXt(file.info(cachePath)["mtime"])) } cachedTaxo <- fread(file = cachePath) cachedTaxo[, from := "cache"] # if not the right format then ignore it! if (!("submittedName" %in% names(cachedTaxo))) { cachedTaxo <- NULL } } } else if (is.null(useCache)) { useCache <- TRUE } } # init cachedTaxo with empty data if (is.null(cachedTaxo)) { cachedTaxo <- data.table( submittedName = character(0), score = numeric(0), matchedName = character(0), from = character(0), acceptedName = character(0) ) } # identify taxo not present in cache missingTaxo <- qryTaxo[!cachedTaxo[, .(submittedName)], on = c(query = "submittedName")] # query tnrs for missing taxo if any queriedTaxo <- NULL if (nrow(missingTaxo)) { # split missing taxo in chunks of 30 slices <- split(missingTaxo[, slice := ceiling(.I / BATCH_SIZE)], by = "slice", keep.by = TRUE) # for each slice of queries if (verbose) { pb <- utils::txtProgressBar(style = 3) } queriedTaxo <- rbindlist(lapply(slices, function(slice) { req <- httr2::request("https://tnrsapi.xyz/tnrs_api.php") req <- httr2::req_headers(req, 'Accept' = 'application/json', 'Content-Type' = "application/json", 'charset' = "UTF-8" ) req <- httr2::req_body_json(req, list( opts = list( class = jsonlite::unbox("wfo"), mode = jsonlite::unbox("resolve"), matches = jsonlite::unbox("best") ), data = unname(data.frame(seq_along(slice$query),slice$query)) )) req <- httr2::req_error(req, function(response) FALSE) qryResult <- httr2::req_perform(req) if (httr2::resp_is_error(qryResult)) { message("There appears to be a problem reaching the tnrs API.") return(invisible(NULL)) } # parse answer from tnrs answer <- setDT(httr2::resp_body_json(qryResult, simplifyVector = TRUE)) # recode empty strings as NA answer[, names(answer) := lapply(.SD, function(x) { x[x==""]<-NA x })] # format result answer <- answer[, .( submittedName = Name_submitted, score = as.numeric(Overall_score), matchedName = Name_matched, from = "iplant_tnrs", acceptedName = Accepted_name )] if (verbose) { utils::setTxtProgressBar(pb, slice$slice[1] / length(slices)) } Sys.sleep(WAIT_DELAY) answer })) if (verbose) { close(pb) } } # build reference taxonomy from cached and queried ones fullTaxo <- rbindlist(list(queriedTaxo, cachedTaxo), fill = TRUE) # inject taxo names in original (user provided) taxonomy if(accepted) { userTaxo[fullTaxo, on = c(query = "submittedName"), `:=`( outName = ifelse(score >= ..score, acceptedName, query), nameModified = ifelse(score >= ..score, "TRUE", "NoMatch(low_score)"), from = from )] } else { userTaxo[fullTaxo, on = c(query = "submittedName"), `:=`( outName = ifelse(score >= ..score, matchedName, query), nameModified = ifelse(score >= ..score, "TRUE", "NoMatch(low_score)"), from = from )] } # if nothing changed tell it userTaxo[ !is.na(outName) & (outName == query) & (nameModified != "NoMatch(low_score)"), nameModified := "FALSE" ] # split name userTaxo[, c("genusCorrected", "speciesCorrected") := tstrsplit_NA(outName)] # If genera or species not found by TNRS # Genera userTaxo[ ((nameModified == "TRUE") | is.na(nameModified)) & is.na(genusCorrected) & !is.na(genus), c("genusCorrected", "nameModified") := list(genus, "TaxaNotFound") ] # Species userTaxo[ (nameModified %in% c("TRUE", "TaxaNotFound")| is.na(nameModified)) & is.na(speciesCorrected) & !is.na(species), `:=`( speciesCorrected = species, nameModified = ifelse(nameModified == "TRUE", "SpNotFound", nameModified) ) ] # cache full taxonomy for further use if (useCache && !is.null(queriedTaxo)) { # complete taxo with matched names and accepted names matchedTaxo <- unique(fullTaxo[submittedName != matchedName], by = "matchedName" )[, `:=`( submittedName = matchedName, score = 1 )] acceptedTaxo <- unique(fullTaxo[(submittedName != acceptedName) & (acceptedName != matchedName)], by = "acceptedName" )[, `:=`( submittedName = acceptedName, matchedName = acceptedName, score = 1 )] fullTaxo <- unique(rbindlist(list(fullTaxo, matchedTaxo, acceptedTaxo))[submittedName != ""]) # write cache fwrite(fullTaxo[order(submittedName), -"from"], file = cachePath) if (verbose) { message("Cache updated") } } # stats if (verbose) { stats <- userTaxo[, by = from, .N] message("Source ", paste(sprintf("%s:%d", stats$from, stats$N), collapse = ", ")) stats <- userTaxo[, by = nameModified, .N] message("Corrections ", paste(sprintf("%s:%d", stats$nameModified, stats$N), collapse = ", ")) } # return corrected taxo data.frame(userTaxo[, .(genusCorrected, speciesCorrected, nameModified)]) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/correctTaxo.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "X", "Y", ".SD", "XRel", "YRel", "XAbs", "YAbs" )) } #' Divides a plot in subplots #' #' This function divides a plot in subplots (with dimX and dimY) and gives the #' coordinates of the grid in return. #' This function uses a procrustes analysis to fit the rectangle you gave to the plot you have. #' #' @param projCoord A data frame with the projected coordinates with X and Y on the first and second column respectively #' @param plot Vector with the code of the plot #' @param corner Vector with the corner numbered from 1 to 4 for each plot, the numbered must be counted clockwise #' (see the result of the [numberCorner()]) #' @param gridsize The size of the grid #' @param dimX A vector of the real size for the X axis for the plot (can be given one value it will be replicate for each plot) #' @param dimY A vector of the real size for the Y axis for the plot (can be given one value it will be replicate for each plot) #' #' @return This function return a data frame with : #' - `plot`: The code of the plot you use #' - `subplot`: The code of the subplot automatically generated #' - `XRel`: The relative coordinate for the axis X (following the corner 1->2) for the plot #' - `YRel`: The relative coordinate for the axis Y (following the corner 1->4) for the plot #' - `XAbs`: The absolute coordinate (projected) for the axis X (following the corner 1->2) #' - `YAbs`: The absolute coordinate (projected) for the axis Y (following the corner 1->4) #' #' @export #' @author Arthur PERE #' @importFrom data.table data.table := #' @examples #' #' coord <- data.frame(X = c(0, 200, 0, 200), Y = c(0, 0, 200, 200)) + 5000 #' corner <- c(1, 2, 4, 3) #' plot <- rep("plot1", 4) #' #' cut <- cutPlot(coord, plot, corner, gridsize = 100, dimX = 200, dimY = 200) #' #' # plot the result #' plot(coord, main = "example", xlim = c(4900, 5300), ylim = c(4900, 5300), asp = 1) #' text(coord, labels = corner, pos = 1) #' points(cut$XAbs, cut$YAbs, pch = "+") #' legend("bottomright", legend = c("orignal", "cut"), pch = c("o", "+")) cutPlot <- function(projCoord, plot, corner, gridsize = 100, dimX = 200, dimY = 200) { # parameter verification -------------------------------------------------- if (!is.data.frame(projCoord)) { projCoord <- data.frame(projCoord) } if (nrow(projCoord) != length(plot)) { stop("Length of plot and the number of row of your UTMcoord data frame are different") } if (nrow(projCoord) != length(corner)) { stop("Length of corner and the number of row of your UTMcoord data frame are different") } if (length(gridsize) != 1 || !is.numeric(gridsize)) { stop("Gridsize must contain 1 numeric value") } if (!(length(dimX) %in% c(1, length(unique(plot))))) { stop("Your dimX vector must be of length 1 or of length equal to length(unique(plot))") } if (!(length(dimY) %in% c(1, length(unique(plot))))) { stop("Your dimY vector must be of length 1 or of length equal to length(unique(plot))") } if (any(gridsize > dimX) || any(gridsize > dimY)) { stop("Your gridsize is larger than the X or Y dimensions") } # function ---------------------------------------------------------------- Coord <- data.table(plot = plot, X = projCoord[, 1], Y = projCoord[, 2], corner = corner) Coord <- Coord[order(corner), .SD, by = plot] dimRel <- data.table(plot = unique(plot), dimX = dimX, dimY = dimY) # Do the grid in the plot and calcul the coordinate absolute of the points of the grid grid <- function(data, gridsize) { a <- as.matrix(data[, .(X, Y)]) # Do the matrix for the procrust problem b <- matrix(0, nrow = 4, ncol = 2) b[2:3, 1] <- unique(data[, dimX]) b[3:4, 2] <- unique(data[, dimY]) res <- procrust(a, b) # The grid matrix c <- as.matrix(expand.grid( X = seq(0, max(b[, 1]), by = gridsize), Y = seq(0, max(b[, 2]), by = gridsize) )) # in absolute coordinate coordAbs <- c %*% res$rotation coordAbs <- sweep(coordAbs, 2, res$translation, FUN = "+") return(data.table(XRel = c[, 1], YRel = c[, 2], XAbs = coordAbs[, 1], YAbs = coordAbs[, 2])) } Coord <- Coord[dimRel, on = "plot"][, grid(.SD, gridsize), by = plot] cornerCoordinate <- function(data) { rbindlist(apply(data[XRel < max(XRel) & YRel < max(YRel), -1], 1, function(x) { X <- x["XRel"] Y <- x["YRel"] data[ (XRel == X & YRel == Y) | (XRel == X + gridsize & YRel == Y) | (XRel == X + gridsize & YRel == Y + gridsize) | (XRel == X & YRel == Y + gridsize), .(subplot = paste(plot, X / gridsize, Y / gridsize, sep = "_"), XRel, YRel, XAbs, YAbs) ][c(1, 2, 4, 3), corner := seq(4)] })) } Coord <- Coord[, cornerCoordinate(.SD), by = plot, .SDcols = colnames(Coord)] return(as.data.frame(Coord)) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/cutPlot.R
#' Angiosperm Phylogeny Group (APG III) dataset #' #' APGIII Families taken from the Angiosperm Phylogeny Website (http://www.mobot.org/MOBOT/research/APweb/) #' #' @docType data #' @format #' A data frame with 502 observations on the following 2 variables: #' - `order`: Vector of order #' - `famAPG`: Vector of APGIII families #' #' @usage data("apgFamilies") #' @source Stevens, P. F. (2001 onwards). _Angiosperm Phylogeny Website_. #' Version 12, July 2012. Retrieved on 2016-07-25 http://www.mobot.org/MOBOT/research/APweb/ #' #' @examples #' data(apgFamilies) #' str(apgFamilies) #' @keywords datasets internal #' "apgFamilies" #' Feldpausch et al. 2012 coefficients for generalized height-diameter models #' #' Weibull coefficients from a height-diameter model of the form \eqn{H = a(1-exp(-b*D^c))} given by Feldpausch #' et al. 2012. in the table 3, with the associated RSE. #' #' @docType data #' @usage data("feldCoef") #' @format #' A data frame with 12 observations on the following 4 variables: #' - `a`: Coefficient a #' - `b`: Coefficient b #' - `c`: Coefficient c #' - `RSE`: Vector of RSE #' #' @details This dataset is used in the function [retrieveH()] #' to predict height from diameter depending on the region. #' @references #' Feldpausch, T.R., et al. (2012). _Tree height integrated into pantropical forest biomass estimates_. #' Biogeosciences, 9, 3381–3403. #' @examples #' data(feldCoef) #' str(feldCoef) #' @keywords datasets internal "feldCoef" #' Genus Family database #' #' To create this database, we combined the genera from The Plant List (http://www.theplantlist.org/1.1/browse/-/-/) #' and the Vascular Plant Families and Genera from Kew (http://data.kew.org/vpfg1992/genlist.html). #' Families were checked against the APGIII families. #' #' @docType data #' @usage data("genusFamily") #' @format A data frame with 28107 observations on the following 2 variables: #' - `family`: Vector of families APGIII corrected #' - `genus`: Vector of genus #' #' @source #' WCSP (2015). _World Checklist of Selected Plant Families_. #' Facilitated by the Royal Botanic Gardens, Kew. Published on the Internet; http://apps.kew.org/wcsp/ Retrieved 2015-12-17. #' #' The Plant List (2013). Version 1.1. Published on the Internet; http://www.theplantlist.org/ Retrieved 2016-08-25. #' @examples #' data(genusFamily) #' str(genusFamily) #' @keywords datasets internal "genusFamily" #' Karnataka forest dataset #' #' Dataset from 96 forest plots (1 ha) established in the central Western Ghats of India by Ramesh et al. (2010). #' #' @docType data #' @usage data("KarnatakaForest") #' @format #' A data frame with 65889 observations on the following 8 variables : #' - `plotId`: Names of the plots #' - `treeId`: Tree Id, contains a letter (A, B, C...) when an individual has multiple stems #' - `family`: Family #' - `genus`: Genus #' - `species`: Species #' - `D`: Diameter (cm) #' - `lat`: Latitude #' - `long`: Longitude #' #' @references #' Ramesh, B. R. et al. (2010). #' _Forest stand structure and composition in 96 sites along environmental gradients in the central Western Ghats of India_ #' Ecological Archives E091-216. Ecology, 91(10), 3118-3118. #' @examples #' data(KarnatakaForest) #' str(KarnatakaForest) #' @keywords datasets "KarnatakaForest" #' Height-Diameter data #' #' Dataset from two 1-ha plots from the Nouragues forest (French Guiana) #' #' @docType data #' @usage data("NouraguesHD") #' @format #' A data frame with 1051 observations on the following variables : #' - `plotId`: Names of the plots #' - `genus`: Genus #' - `species`: Species #' - `D`: Diameter (cm) #' - `H`: Height (m) #' - `lat`: Latitude #' - `long`: Longitude #' #' @references #' Réjou-Méchain, M. et al. (2015). #' _Using repeated small-footprint LiDAR acquisitions to infer spatial and temporal variations of a high-biomass Neotropical forest_ #' Remote Sensing of Environment, 169, 93-101. #' @examples #' data(NouraguesHD) #' str(NouraguesHD) #' @keywords datasets "NouraguesHD" #' Posterior distribution of Chave et al.'s 2014 equation 4 parameters #' #' This matrix contains the posterior distribution of the parameters of Equation 4 of Chave et al. (2014), #' obtained in a Bayesian framework with uninformative priors through a Metropolis algorithm. #' #' @docType data #' @usage data("param_4") #' @format #' A data frame with 1001 observations on the following 3 variables. #' - `intercept`: Vector of intercept values #' - `logagbt`: Vector of the model coefficients associated with the product wood density * diameter^2 * height #' - `sd`: Vector of model residual standard error (RSE) values #' #' @details This dataset is used in the function [AGBmonteCarlo()]. #' @references #' Chave et al. (2014) _Improved allometric models to estimate the aboveground biomass of tropical trees_, #' Global Change Biology, 20 (10), 3177-3190 #' @examples #' data(param_4) #' str(param_4) #' @keywords datasets AGBmonteCarlo internal "param_4" #' Posterior distribution of parameters associated with the equation 7 by Chave et al. 2014. #' #' This matrix contains the posterior distribution of the parameters of the Equation 7 of Chave et al., (2014), #' obtained in a Bayesian framework with uninformative priors through a Metropolis algorithm. #' #' @docType data #' @usage data("param_7") #' @format #' A data frame with 1001 observations on the following 9 variables. #' - `intercept`: Vector of intercept values #' - `logwsg`: Vector of the model coefficients associated with log(wood density) #' - `logdbh`: Vector of the model coefficients associated with log(diameter) #' - `logdbh2`: Vector of the model coefficients associated with log(diameter)^2 #' - `E`: Vector of the model coefficients associated with the environmental index E #' - `sd`: Vector of model residual standard error (RSE) values #' - `temp`: Vector of the model coefficients associated with temperature seasonality #' - `cwd`: Vector of the model coefficients associated with climatic water deficit #' - `prec`: Vector of the model coefficients associated with precipitation seasonality #' #' @details This dataset is used in the function [AGBmonteCarlo()]. #' @references #' Chave et al. (2014) _Improved allometric models to estimate the aboveground biomass of tropical trees_, #' Global Change Biology, 20 (10), 3177-3190 #' @examples #' data(param_7) #' str(param_7) #' @keywords datasets AGBmonteCarlo internal "param_7" #' Mean standard deviation of wood density estimates at different taxonomic levels #' #' This dataset gives the mean standard deviation of wood density values of the [wdData] dataset #' at different taxonomical levels only considering taxa having more than 10 different values. #' This dataset is used in the function [getWoodDensity()] to associate at the appropriate taxonomic #' level a mean error to wood density estimate. #' #' @docType data #' @usage data("sd_10") #' @format #' A data frame with 3 observations on the following 2 variables: #' - `taxo`: Character vector with the different taxonomical levels (family, genus, species) #' - `sd`: Numeric vector giving the mean standard deviation of wood density values #' #' @details This dataset is used in the function [getWoodDensity()]. #' @references #' Rejou-Mechain et al. (2017). #' _BIOMASS: An R Package for estimating above-ground biomass and its uncertainty in tropical forests_. #' Methods in Ecology and Evolution, 8 (9), 1163-1167. #' @examples #' data(sd_10) #' str(sd_10) #' @keywords datasets wdData getWoodDensity internal "sd_10" #' The global wood density database #' #' The global wood density database (Chave et al. 2009, Zanne et al. 2009). #' #' @docType data #' @usage data("wdData") #' @format #' A data frame with 16467 observations on the following 7 variables. #' - `family`: a character vector indicating the family #' - `genus`: a character vector indicating the genus #' - `species`: a character vector indicating the species #' - `wd`: a numeric vector of wood densities (g/cm^3) #' - `region`: a character vector of regions (see [getWoodDensity()]) #' - `referenceNumber`: a numeric vector of reference numbers (bibliography) #' - `regionId`: a character vector of region ids #' #' @details This dataset is used in the function [getWoodDensity()], to estimate a taxon-average wood density value. #' @references #' Chave et al. (2009) _Towards a worldwide wood economics spectrum._ Ecology letters 12:4, 351-366. #' @source Zanne et al. _Global wood density database._ Dryad. Identifier: http://datadryad.org/handle/10255/dryad.235 (2009). #' @examples #' data(wdData) #' str(wdData) #' @keywords datasets wood density getWoodDensity internal "wdData" #' @name HDmethods #' #' @title HDmethods #' #' @description Methods used for modeling height-diameter relationship #' #' @details #' These functions model the relationship between tree height (H) and diameter (D). #' __loglogFunction__ #' Compute two types of log model (log and log2) to predict H from D. #' The model can be: #' - log 1: \eqn{log(H) = a+ b*log(D)} (equivalent to a power model) #' - log 2: \eqn{log(H) = a+ b*log(D) + c*log(D)^2} #' #' __michaelisFunction__ #' Construct a Michaelis Menten model of the form: \deqn{H = (A * D) / (B + D)} (A and B are the model parameters to be estimated) #' #' __weibullFunction__ #' Construct a three parameter Weibull model of the form: \deqn{H = a*(1-exp(-(D/b)^c))} (a, b, c are the model parameters to be estimated) #' #' #' @param data Dataset with the informations of height (H) and diameter (D) #' @param method In the case of the loglogFunction, the model is to be chosen between log1, log2 or log3. #' @param weight (optional) Vector indicating observation weights in the model. #' #' @return All the functions give an output similar to the one given by [stats::lm()], obtained for #' `michaelisFunction` and `weibullFunction` from [minpack.lm::nlsLM]). #' #' @references #' Michaelis, L., & Menten, M. L. (1913). _Die kinetik der invertinwirkung_. Biochem. z, 49(333-369), 352. #' Weibull, W. (1951). _Wide applicability_. Journal of applied mechanics, 103. #' Baskerville, G. L. (1972). _Use of logarithmic regression in the estimation of plant biomass_. #' Canadian Journal of Forest Research, 2(1), 49-53. #' #' @author Maxime REJOU-MECHAIN, Ariane TANGUY #' #' @seealso [modelHD()] #' #' #' @keywords Internal NULL
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/data_documentation.R
#' Retrieving bioclimatic parameters #' #' This function extracts three bioclimatic parameters thanks to the coordinates of the data: #' the Climatic Water Deficit (CWD), the Temperature Seasonality (TS) and the Precipitation Seasonality (PS). #' #' The function is time-consuming at its first use as it downloads three raster files (one for each of #' the parameter) which are then stored in folders named wc2-5 and CWD (see Localisation). #' #' However, as soon as the raster is downloaded once, the function then runs fast. #' #' #' @inheritSection cacheManager Localisation #' #' @inheritParams computeE #' #' @return The function returns a data.frame with `tempSeas` (temperature seasonality, #' i.e. bioclimatic variable 4 from the Worldclim dataset; Hijmans et al. 2005), `precSeas` #' (precipitation seasonality, i.e. bioclimatic variable 15 from the Worldclim dataset; Hijmans #' et al. 2005) and `CWD` (climatic water deficit; Chave et al. 2014). #' #' @references #' Hijmans et al. (2005) _Very high resolution interpolated climate surfaces for global land areas_, #' International journal of climatology, 25(15), 1965-1978. #' Chave et al. (2014) _Improved allometric models to estimate the above-ground biomass of tropical trees_, #' Global Change Biology, 20 (10), 3177-3190 #' #' @author Ariane TANGUY, Arthur PERE #' @keywords bioclim param internal #' @export #' #' @examples #' # One study site #' lat <- 4.08 #' long <- -52.68 #' coord <- cbind(long, lat) #' \donttest{ #' bioclim <- getBioclimParam(coord) #' } #' #' # Several study sites (here three sites) #' long <- c(-52.68, -51.12, -53.11) #' lat <- c(4.08, 3.98, 4.12) #' coord <- cbind(long, lat) #' \donttest{ #' bioclim <- getBioclimParam(coord) #' } #' #' @importFrom terra rast extract getBioclimParam <- function(coord) { coord <- apply(coord, 1:2, as.numeric) tempSeas_rast <- rast(cacheManager("bio4.bil")) precSeas_rast <- rast(cacheManager("bio15.bil")) CWD_rast <- rast(cacheManager("CWD.bil")) ### Extract the raster value tempSeas <- extract(tempSeas_rast, coord, method = "bilinear")$bio4 * 10^-3 precSeas <- extract(precSeas_rast, coord, method = "bilinear")$bio15 * 10^-3 CWD <- extract(CWD_rast, coord, method = "bilinear")$CWD * 10^-3 out <- data.frame(tempSeas = tempSeas, precSeas = precSeas, CWD = CWD) return(out) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/getBioclimParam.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "id", "family" )) } #' Retrieving the taxonomy #' #' From a genus, the function `getTaxonomy` finds the APG III family, and optionally the #' order, from the [genusFamily] database and the [apgFamilies] dataset #' #' #' @param genus Vector of genus names #' @param findOrder (Boolean) If `TRUE`, the output will contain the taxonomical orders of the families. #' #' @return Data frame with the order (if `findOrder` is `TRUE`), family and genus. #' @author Ariane TANGUY, Arthur PERE, Maxime REJOU-MECHAIN #' @export #' #' @examples #' # Find the Family of the Aphelandra genus #' getTaxonomy("Aphelandra") #' # ... and the order #' \donttest{ #' getTaxonomy("Aphelandra", findOrder = TRUE) #' } #' @importFrom data.table setDF setDT data.table getTaxonomy <- function(genus, findOrder = FALSE) { ### Find the family (and the order) of a vector of genus ################## 1. Retrieve the Family # Load taxonomical data (sourced from Angiosperm Phylogeny Website, http://www.mobot.org/MOBOT/research/APweb/) genusFamily <- setDT(copy(BIOMASS::genusFamily)) setkey(genusFamily, genus) # Create ids inputGenus <- data.table( id = 1:length(genus), inputGenus = as.character(genus), stringsAsFactors = FALSE, key = "inputGenus" ) # Merge the input genera with the genus family table genusFam <- merge(inputGenus, genusFamily, by.x = "inputGenus", by.y = "genus", all.x = TRUE) genusFam <- genusFam[, .(id, inputGenus, family)] ################## 2. Retrieve the Order if (findOrder == TRUE) { apgFamilies <- setDT(copy(BIOMASS::apgFamilies)) genusFam <- merge(genusFam, apgFamilies, by.x = "family", by.y = "famAPG", all.x = TRUE) genusFam <- genusFam[, .(id, inputGenus, family, order)] } genusFam <- genusFam[order(id), ] genusFam <- setDF(genusFam[, id := NULL]) return(genusFam) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/getTaxonomy.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "regionId", "i.family", "wd", "wd.x", "wd.y", "taxo", ".EACHI", "meanWDsp", "nIndsp", "sdWDsp", "meanWD", "meanWDgn", "nInd", "nIndgn", "sdWD", "sdWDgn", "levelWD", "meanWDfm", "nIndfm", "sdWDfm", "meanWDst", "nIndst", "sdWDst" )) } #' Estimating wood density #' #' The function estimates the wood density (WD) of the trees from their taxonomy or from their #' congeners using the global wood density database (Chave et al. 2009, Zanne et al. 2009) or #' any additional dataset. The WD can either be attributed to an individual at a species, genus, #' family or stand level. #' #' @param genus Vector of genus names #' @param species Vector of species names #' @param stand (optional) Vector with the corresponding stands of your data. #' If set, the missing wood densities at the genus level will be attributed at stand level. #' If not, the value attributed will be the mean of the whole tree dataset. #' @param family (optional) Vector of families. If set, the missing wood densities at the genus #' level will be attributed at family level if available. #' @param region Region (or vector of region) of interest of your sample. By default, Region is #' set to 'World', but you can restrict the WD estimates to a single region : #' - `AfricaExtraTrop`: Africa (extra tropical) #' - `AfricaTrop`: Africa (tropical) #' - `Australia`: Australia #' - `AustraliaTrop`: Australia (tropical) #' - `CentralAmericaTrop`: Central America (tropical) #' - `China`: China #' - `Europe`: Europe #' - `India`: India #' - `Madagascar`: Madagascar #' - `Mexico`: Mexico #' - `NorthAmerica`: North America #' - `Oceania`: Oceania #' - `SouthEastAsia`: South-East Asia #' - `SouthEastAsiaTrop`: South-East Asia (tropical) #' - `SouthAmericaExtraTrop`: South America (extra tropical) #' - `SouthAmericaTrop`: South America (tropical) #' - `World`: World #' #' @param addWoodDensityData A dataframe containing additional wood density data to be #' combined with the global wood density database. The dataframe should be organized #' in a dataframe with three (or four) columns: "genus","species","wd", the fourth #' column "family" is optional. #' @param verbose A logical, give some statistic with the database #' #' @details #' The function assigns to each taxon a species- or genus- level average if at least #' one wood density value at the genus level is available for that taxon in the reference database. #' If not, the mean wood density of the family (if set) or of the stand (if set) is given. #' #' The function also provides an estimate of the error associated with the wood density estimate #' (i.e. a standard deviation): a mean standard deviation value is given to the tree at the #' appropriate taxonomic level using the [sd_10] dataset. #' #' #' @return Returns a dataframe containing the following information: #' - `family`: (if set) Family #' - `genus`: Genus #' - `species`: Species #' - `meanWD` (g/cm^3): Mean wood density #' - `sdWD` (g/cm^3): Standard deviation of the wood density that can be used in error propagation #' (see [sd_10] and [AGBmonteCarlo()]) #' - `levelWD`: Level at which wood density has been calculated. Can be species, genus, family, #' dataset (mean of the entire dataset) or, if stand is set, the name of the stand (mean of the current stand) #' - `nInd`: Number of individuals taken into account to compute the mean wood density #' #' @export #' #' @author Maxime REJOU-MECHAIN, Arthur PERE, Ariane TANGUY #' #' @references #' Chave, J., et al. _Towards a worldwide wood economics spectrum_. Ecology letters 12.4 (2009): 351-366. #' Zanne, A. E., et al. _Global wood density database_. Dryad. Identifier: http://hdl. handle. net/10255/dryad 235 (2009). #' #' #' @examples #' # Load a data set #' data(KarnatakaForest) #' #' # Compute the Wood Density up to the genus level and give the mean wood density of the dataset #' \donttest{ #' WD <- getWoodDensity( #' genus = KarnatakaForest$genus, #' species = KarnatakaForest$species #' ) #' } #' #' # Compute the Wood Density up to the genus level and then give the mean wood density per stand #' \donttest{ #' WD <- getWoodDensity( #' genus = KarnatakaForest$genus, #' species = KarnatakaForest$species, #' stand = KarnatakaForest$plotId #' ) #' } #' #' # Compute the Wood Density up to the family level and then give the mean wood density per stand #' \donttest{ #' WD <- getWoodDensity( #' family = KarnatakaForest$family, #' genus = KarnatakaForest$genus, #' species = KarnatakaForest$species, #' stand = KarnatakaForest$plotId #' ) #' str(WD) #' } #' @seealso [wdData], [sd_10] #' @keywords Wood density #' @importFrom data.table data.table := setDF setDT setkey copy chmatch %chin% #' getWoodDensity <- function(genus, species, stand = NULL, family = NULL, region = "World", addWoodDensityData = NULL, verbose = TRUE) { # Parameters verification ------------------------------------------------- if (length(genus) != length(species)) { stop("Your data (genus and species) do not have the same length") } if (!is.null(family) && (length(genus) != length(family))) { stop("Your family vector and your genus/species vectors do not have the same length") if (any(colSums(table(family, genus) > 0, na.rm = TRUE) >= 2)) { stop("Some genera are in two or more families") } } if (!is.null(stand) && (length(genus) != length(stand))) { stop("Your stand vector and your genus/species vectors do not have the same length") } if (!is.null(addWoodDensityData)) { if (!(all(names(addWoodDensityData) %in% c("genus", "species", "wd", "family")) && length(names(addWoodDensityData)) %in% c(3, 4))) { stop('The additional wood density database should be organized in a dataframe with three (or four) columns: "genus","species","wd", and the column "family" is optional') } } # Data processing --------------------------------------------------------- # Load global wood density database downloaded from http://datadryad.org/handle/10255/dryad.235 wdData <- setDT(copy(BIOMASS::wdData)) # Load the mean standard deviation observed at the species, Genus or Family level # in the Dryad dataset when at least 10 individuals are considered sd_10 <- setDT(copy(BIOMASS::sd_10)) sd_tot <- sd(wdData$wd) Region <- tolower(region) if ((Region != "world") && any(is.na(chmatch(Region, tolower(wdData$regionId))))) { stop("One of the region you entered is not recognized in the global wood density database") } subWdData <- wdData if (!("world" %in% Region)) { subWdData <- wdData[tolower(regionId) %chin% Region] } if (nrow(subWdData) < 1000 && is.null(addWoodDensityData)) { warning( "DRYAD data only stored ", nrow(subWdData), " wood density values in your region of interest. ", 'You could provide additional wood densities (parameter addWoodDensityData) or widen your region (region="World")' ) } if (!is.null(addWoodDensityData)) { setDT(addWoodDensityData) if (!("family" %in% names(addWoodDensityData))) { genusFamily <- setDT(copy(BIOMASS::genusFamily)) addWoodDensityData[genusFamily, on = "genus", family := i.family] } addWoodDensityData <- addWoodDensityData[!is.na(wd), ] subWdData <- merge(subWdData, addWoodDensityData, by = c("family", "genus", "species"), all = TRUE) subWdData[!is.na(regionId), wd := wd.x][is.na(regionId), wd := wd.y][, ":="(wd.x = NULL, wd.y = NULL)] } if (verbose) { message("The reference dataset contains ", nrow(subWdData), " wood density values") } # Creating an input dataframe inputData <- data.table(genus = as.character(genus), species = as.character(species)) if (!is.null(family)) { inputData[, family := as.character(family)] } else { if (!exists("genusFamily", inherits = FALSE)) { genusFamily <- setDT(copy(BIOMASS::genusFamily)) } inputData[genusFamily, on = "genus", family := i.family] } if (!is.null(stand)) { inputData[, stand := as.character(stand)] } taxa <- unique(inputData, by = c("family", "genus", "species")) if (verbose) { message("Your taxonomic table contains ", nrow(taxa), " taxa") } # utilitary function : paste y values inside x when one x value is NA coalesce <- function(x, y) { if (length(y) == 1) { y <- rep(y, length(x)) } where <- is.na(x) x[where] <- y[where] x } # Select only the relevant data meanWdData <- subWdData[(family %in% taxa$family | genus %in% taxa$genus | species %in% taxa$species), ] if (nrow(meanWdData) == 0) { stop("Our database have not any of your family, genus and species") } # If there is no genus or species level inputData[, ":="(meanWD = NA_real_, nInd = NA_integer_, sdWD = NA_real_, levelWD = NA_character_)] if (!((!is.null(family) && nrow(merge(inputData, meanWdData[, .N, by = .(family)], c("family"))) != 0) || nrow(merge(inputData, meanWdData[, .N, by = .(family, genus)], c("family", "genus"))) != 0 || nrow(merge(inputData, meanWdData[, .N, by = .(family, genus, species)], c("family", "genus", "species"))) != 0)) { stop("There is no exact match among the family, genus and species, try with 'addWoodDensity' or inform the 'family' or increase the 'region'") } # Extracting data --------------------------------------------------------- # compute mean at species level sdSP <- sd_10[taxo == "species", sd] meanSP <- meanWdData[, by = c("family", "genus", "species"), .( meanWDsp = mean(wd), nIndsp = .N, sdWDsp = sdSP ) ] inputData[meanSP, on = c("family", "genus", "species"), by = .EACHI, `:=`( meanWD = meanWDsp, nInd = nIndsp, sdWD = sdWDsp, levelWD = "species" ) ] # mean at genus level sdGN <- sd_10[taxo == "genus", sd] meanGN <- meanSP[, by = c("family", "genus"), .( meanWDgn = mean(meanWDsp), nIndgn = .N, sdWDgn = sdGN ) ] inputData[meanGN, on = c("family", "genus"), by = .EACHI, `:=`( meanWD = coalesce(meanWD, meanWDgn), nInd = coalesce(nInd, nIndgn), sdWD = coalesce(sdWD, sdWDgn), levelWD = coalesce(levelWD, "genus") ) ] # mean at family level if provided if (!is.null(family)) { sdFM <- sd_10[taxo == "family", sd] meanFM <- meanGN[, by = family, .( meanWDfm = mean(meanWDgn), nIndfm = .N, sdWDfm = sdFM ) ] inputData[meanFM, on = "family", by = .EACHI, `:=`( meanWD = coalesce(meanWD, meanWDfm), nInd = coalesce(nInd, nIndfm), sdWD = coalesce(sdWD, sdWDfm), levelWD = coalesce(levelWD, "family") ) ] } # mean at stand level if provided if (!is.null(stand)) { meanST <- inputData[!is.na(meanWD), by = stand, .( meanWDst = mean(meanWD), nIndst = .N, sdWDst = sd(meanWD) ) ] inputData[is.na(meanWD), levelWD := stand] inputData[meanST, on = "stand", by = .EACHI, `:=`( meanWD = coalesce(meanWD, meanWDst), nInd = coalesce(nInd, nIndst), sdWD = coalesce(sdWD, sdWDst) ) ] } # mean of whole dataset for remaining NA meanDS <- inputData[ !is.na(meanWD), .( meanWDds = mean(meanWD), nIndds = .N, sdWDds = sd(meanWD) ) ] inputData[ is.na(meanWD), `:=`( meanWD = meanDS$meanWDds, nInd = meanDS$nIndds, sdWD = meanDS$sdWDds, levelWD = "dataset" ) ] # Deal with NA or zero values in sdWD (adopt the most conservative approach assigning the sd over the full wdData dataset) #(very specific cases where no or only one species co-occur with unidentified individuals in the plot) inputData[is.na(sdWD) | sdWD==0, sdWD:=sd_tot] # Convert to a dataframe result <- setDF(inputData[, .(family, genus, species, meanWD, sdWD, levelWD, nInd)]) return(result) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/getWoodDensity.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "codeUTM", "long", "lat", ".BY" )) } #' Translate the long lat coordinate in UTM coordinate #' #' @inheritParams computeE #' #' @return a data frame with : #' - `long`: The longitude of the entry #' - `lat`: The latitude of the entry #' - `codeUTM`: The code `proj` for UTM #' - `X`: The X UTM coordinate #' - `Y`: The Y UTM coordinate #' #' @export #' @importFrom data.table as.data.table := #' @examples #' #' long <- c(-52.68, -51.12, -53.11) #' lat <- c(4.08, 3.98, 4.12) #' coord <- cbind(long, lat) #' \donttest{ #' UTMcoord <- latlong2UTM(coord) #' } #' latlong2UTM <- function(coord) { coord <- data.table(coord, check.names = TRUE) setnames(coord, colnames(coord), c("long", "lat")) if (!requireNamespace("proj4")) { stop("Please install the package 'proj4'\n \t\tinstall.packages('proj4').") } # Function to find UTM zone: assumes that data longitudes to the west of the # Prime Meridian are encoded as running from -180 to 0 degrees codelatlong2UTM <- function(long, lat) { Nzone <- (floor((long + 180) / 6) %% 60) + 1 Nzone <- paste0(Nzone, ifelse(lat >= 0, " +north ", " +south ")) Nzone <- paste0("+proj=utm +zone=", Nzone, "+ellps=WGS84 +datum=WGS84 +units=m +no_defs") return(Nzone) } # Convert into UTM coord[, codeUTM := codelatlong2UTM(long, lat)] coord[, c("X", "Y") := proj4::project(.(long, lat), proj = unique(.BY)), by = codeUTM] setDF(coord) return(coord) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/latlong2UTM.R
#' @rdname HDmethods #' @return Result of a model (lm object) #' @importFrom stats formula as.formula loglogFunction <- function(data, method) { ### Compute the loglog model of the H-D relationship # take the pow of the method method_pow <- strtoi(substr(method, nchar(method), nchar(method))) # do the rigth part of the formula i.e I(log(D)^1) + I(log(D)^2) + ... formula <- paste(sapply( seq(method_pow), function(x) { sprintf("I(log(D)^%i)", x) } ), collapse = " + " ) # for the rest of the formula formula <- as.formula(paste("I(log(H))", formula, sep = " ~ ")) return(lm(formula, data)) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/loglogFunction.R
#' @rdname HDmethods #' @return Result of a model (nlsM object) #' @importFrom minpack.lm nlsLM nls.lm.control #' @importFrom methods is michaelisFunction <- function(data, weight = NULL) { ### Compute the michaelis model of the H-D relationship H <- data$H D <- data$D count <- 1 maxIter <- 50 converge <- FALSE if (anyNA(weight)) weight <- NULL while (converge == FALSE && count <= 10) { tt <- tryCatch({ if (is.null(weight)) { nlsLM(H ~ SSmicmen(D, A, B), control = nls.lm.control(maxiter = maxIter) ) } else { nlsLM(H ~ SSmicmen(D, A, B), weights = weight, control = nls.lm.control(maxiter = maxIter) ) } }, error = function(e) e, warning = function(w) w ) if (is(tt, "warning")) { count <- count + 1 maxIter <- maxIter + 50 } else { converge <- TRUE } } model <- if (is.null(weight)) { nlsLM(H ~ SSmicmen(D, A, B), control = nls.lm.control(maxiter = maxIter) ) } else { nlsLM(H ~ SSmicmen(D, A, B), weights = weight, control = nls.lm.control(maxiter = maxIter) ) } return(model) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/michaelisFunction.R
#' Fitting height-diameter models #' #' This function fits and compares (optional) height-diameter models. #' #' @param D Vector with diameter measurements (in cm). NA values are accepted but a #' minimum of 10 valid entries (i.e. having a corresponding height in H) is required. #' @param H Vector with total height measurements (in m). NA values are accepted but a minimum of 10 valid entries #' (i.e. having a corresponding diameter in D) is required. #' @param method Method used to fit the relationship. #' To be chosen between: #' - log1, log2 #' + log 1: \eqn{(log(H) = a+ b*log(D))} (equivalent to a power model) #' + log 2: \eqn{(log(H) = a+ b*log(D) + c*log(D)^2)} #' - weibull: \eqn{H = a*(1-exp(-(D/b)^c))} #' - michaelis: \eqn{H = (A * D)/(B + D)} #' #' If `NULL`, all the methods will be compared. #' @param useWeight If weight is `TRUE`, model weights will be \eqn{(D^2)*H} (i.e. weights are proportional to tree #' volume, so that larger trees have a stronger influence during the construction of the model). #' @param drawGraph If `TRUE`, a graphic will illustrate the relationship between H and D. Only if argument `plot` is null. #' @param plot (optional) Plot ID, must be either one value, or a vector of the same length as D. This argument is used to build #' stand-specific HD models. #' #' @details All the back transformations for log-log models are done using the Baskerville correction (\eqn{0.5 * RSE^2}, #' where RSE is the Residual Standard Error). #' #' #' @return #' If plot is NULL or has a single value, a single list is returned. If there is more than one plot, #' multiple embedded lists are returned with plots as the list names. #' Returns a list if the parameter model is not null: #' - `input`: list of the data used to construct the model (list(H, D)) #' - `model`: outputs of the model (same outputs as given by [stats::lm()], [stats::nls()]) #' - `RSE`: Residual Standard Error of the model #' - `RSElog`: Residual Standard Error of the log model (\code{NULL} if other model) #' - `residuals`: Residuals of the model #' - `coefficients`: Coefficients of the model #' - `R.squared`: \eqn{R^2} of the model #' - `formula`: Formula of the model #' - `method`: Name of the method used to construct the model #' - `predicted`: Predicted height values #' #' #' If the parameter model is null, the function return a graph with all the methods for #' comparison, the function also returns a data.frame with: #' - `method`: The method that had been used to construct the graph #' - `color`: The color of the curve in the graph #' - `RSE`: Residual Standard Error of the model #' - `RSElog`: Residual Standard Error of the log model (`NULL` if other model) #' - `Average_bias`: The average bias for the model #' #' #' #' @author Maxime REJOU-MECHAIN, Arthur PERE, Ariane TANGUY #' @seealso [retrieveH()] #' #' @export #' #' @examples #' #' # Load a data set #' data(NouraguesHD) #' #' # To model the height from a dataset #' \donttest{ #' HDmodel <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, drawGraph = TRUE) #' } #' #' # If the method needed is known #' HDmodel <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, method = "weibull", drawGraph = TRUE) #' HDmodel <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, method = "log1", drawGraph = TRUE) #' #' # Using weights #' HDmodel <- modelHD( #' D = NouraguesHD$D, H = NouraguesHD$H, method = "weibull", useWeight = TRUE, #' drawGraph = TRUE #' ) #' @importFrom graphics legend lines par plot grid axis #' @importFrom stats SSmicmen lm median na.omit quantile rnorm sd predict coef #' @importFrom utils data #' @importFrom data.table data.table modelHD <- function(D, H, method = NULL, useWeight = FALSE, drawGraph = FALSE, plot = NULL) { # # To maintain user's original options oldpar <- par(no.readonly = TRUE) on.exit(par(oldpar)) # parameters verification ------------------------------------------------- # Check if there is enough data to compute an accurate model nbNonNA <- sum(!is.na(H)) if (nbNonNA < 15) { stop("The data has not enough height data (less than 15 non NA)") } if (length(H) != length(D)) { stop("Your vector D and H do not have the same length") } if (!is.null(method)) { method <- tolower(method) } methods <- c("log1", "log2", "weibull", "michaelis") if (!is.null(method) && !(method %in% methods)) { stop("Chose your method among those ones : ", paste(methods, collapse = ", ")) } if (!is.logical(useWeight)) { stop("UseWeight argument must be a boolean") } if (!is.logical(drawGraph)) { stop("drawGraph argument must be a boolean") } if (!is.null(plot) && !length(plot) %in% c(1, length(D))) { stop("The length of the 'plot' vector must be either 1 or the length of D") } if (!is.null(plot)) { drawGraph <- FALSE } # If there is a plot ID --------------------------------------------------- # if there is multiple plots in the plot vector if (!is.null(plot) && length(unique(plot)) != 1) { Hdata <- data.table(H = H, D = D, plot = plot) output <- lapply(split(Hdata, by = "plot", keep.by = TRUE), function(subData) { suppressMessages(modelHD( subData$D, subData$H, method, useWeight, drawGraph, unique(subData$plot) )) }) if (is.null(method)) { message("To build a HD model you must use the parameter 'method' in this function") } return(output) } # functions ---------------------------------------------------------------- # fonction to choose the fonction modSelect <- function(Hdata, method, useGraph = FALSE) { output <- list() ################## Log-log model if (grepl("log", method)) { mod <- loglogFunction(Hdata, method) output$RSElog <- summary(mod)$sigma # Baskerville correction 1972 output$Hpredict <- exp(predict(mod) + 0.5 * output$RSElog^2) if (useGraph) { output$Hpredict_plot <- exp(predict(mod, newdata = D_Plot) + 0.5 * output$RSElog^2) } } else { ######### The others HD models mod <- switch(method, michaelis = michaelisFunction(Hdata, weight), # Michaelis-Menten function weibull = weibullFunction(Hdata, weight) # Weibull 3 parameters ) output$Hpredict <- predict(mod) output$RSElog <- NA_real_ if (useGraph) { output$Hpredict_plot <- predict(mod, newdata = D_Plot) } } names(output$Hpredict) <- NULL res <- Hdata$H - output$Hpredict output$method <- method output$RSE <- sqrt(sum(res^2) / summary(mod)$df[2]) # Residual standard error output$Average_bias <- (mean(output$Hpredict) - mean(Hdata$H)) / mean(Hdata$H) output$residuals <- res output$mod <- mod return(output) } # function to draw the beining of the graph drawPlotBegin <- function(givenMethod = FALSE, plotId) { main_title <- ifelse(givenMethod == FALSE, "Model comparison", paste("Selected model : ", givenMethod)) par(mar = c(5, 5, 3, 3)) plot(Hdata$D, Hdata$H, pch = 20, cex = 0.5, col = "grey50", log = "xy", las = 1, xlab = "D (cm)", ylab = "H (m)", cex.lab = 1.8, cex.axis = 1.5, main = main_title, cex.main = 2, axes = FALSE, frame.plot = FALSE ) grid(col = "grey80", lty = 1, equilogs = FALSE) axis(side = 1, lty = "blank", las = 1) axis(side = 2, lty = "blank", las = 1) } # Data processing --------------------------------------------------------- Hdata <- data.table(H = H, D = D) Hdata <- na.omit(Hdata) # Remove NA values weight <- NULL # Vector of diameter used only for visualisation purpose D_Plot <- data.frame(D = Hdata[, seq(floor(min(D)), ceiling(max(D)), 0.5)]) # If the measures need to be weighted if (useWeight == TRUE) { weight <- Hdata[, D^2 * H] } # weight is proportional to tree volume # If we gave the function a method ---------------------------------------- if (!is.null(method)) { output <- modSelect(Hdata, method, drawGraph) ####### if drawGraph is true if (drawGraph) { drawPlotBegin(method, plot) lines(D_Plot$D, output$Hpredict_plot, lwd = 2, col = "blue") legend("bottomright", c("Data", "Model selected"), lty = c(3, 1), lwd = c(3, 3), col = c("grey", "blue"), cex = 1.5 ) } ################## Return the model chosen # Results (RSE, model coefficient, residuals, R?) out <- list( input = list(H = Hdata$H, D = Hdata$D), model = output$mod, residuals = output$residuals, coefficients = summary(output$mod)$coefficients, R.squared = summary(output$mod)$r.squared, formula = summary(output$mod)$call, method = method, predicted = output$Hpredict, RSE = output$RSE ) if (grepl("log", method)) { out$RSElog <- output$RSElog } return(out) } else { # Compare Models ---------------------------------------------------------- if (is.null(plot)) { drawPlotBegin(plotId = plot) } color <- c("blue", "green", "orange", "purple") result <- rbindlist(lapply(1:length(methods), function(i) { method <- methods[i] out <- modSelect(Hdata, method, useGraph = is.null(plot)) output <- list( method = method, color = color[i], RSE = out$RSE, # Residual standard error RSElog = out$RSElog, Average_bias = out$Average_bias ) if (is.null(plot)) { lines(D_Plot$D, out$Hpredict_plot, lwd = 2, col = color[i], lty = i) } if (!is.null(plot)) { output[["color"]] <- NULL } return(output) }), fill = TRUE) if (is.null(plot)) { legend("bottomright", methods, lty = 1:5, lwd = 2, cex = 1, col = color ) } message("To build a HD model you must use the parameter 'method' in this function") return(data.frame(result)) } }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/modelHD.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "n.row", "corner", "X", "Y", "plot" )) } #' Get the UTM coordinates with the corner of the plot #' #' @description #' Get the UTM coordinates from the latitude and longitude of the corners of a plot. #' The function also assign a number to the corners in a clockwise or counterclockwise way, with the number 1 for the XY origin. #' Corner numbering is done as followed: #' - axis X: the corner 1 to the corner 2 #' - axis Y: the corner 1 to the corner 4 #' #' #' @param longlat (optional) data frame with the coordinates in longitude latitude (eg. cbind(longitude, latitude)). #' @param projCoord (optional) data frame with the projected coordinates in X Y #' @param plot A vector of codes (names) of the plots #' @param origin A logical vector with TRUE corresponding of the origin of the axis of each plot. #' @param clockWise A logical, whether the numbering should be done in a clockwise (TRUE) or counterclockwise (FALSE) way. #' #' @return A data frame with: #' - `plot`: The code of the plot #' - `X`: The coordinates X in UTM #' - `Y`: The coordinates Y in UTM #' - `corner`: The corner numbers #' #' @export #' @importFrom data.table data.table := #' @author Arthur PERE, Maxime REJOU-MECHAIN #' @examples #' coord <- data.frame(X = c(0, 200, 0, 200), Y = c(0, 0, 200, 200)) + 5000 #' plot <- rep("plot1", 4) #' origin <- c(FALSE, FALSE, TRUE, FALSE) #' #' # if you turn clock wise #' corner <- numberCorner(projCoord = coord, plot = plot, origin = origin, clockWise = TRUE) #' #' # Plot the plot #' plot(coord, asp = 1) #' text(coord, labels = corner$corner, pos = 1) #' #' #' # Using a counterclockwise way #' corner <- numberCorner(projCoord = coord, plot = plot, origin = origin, clockWise = FALSE) #' #' # Plot the plot #' plot(coord, asp = 1) #' text(coord, labels = corner$corner, pos = 1) numberCorner <- function(longlat = NULL, projCoord = NULL, plot, origin, clockWise) { # Parameters verification ------------------------------------------------- if (is.null(longlat) && is.null(projCoord)) { stop("Give at least one set of coordinates: longlat or projCoord") } if (!is.null(longlat) && !is.null(projCoord)) { stop("Give only one set of coordinates: longlat or projCoord") } if (length(plot) != length(origin)) { stop("Vectors plot and origin have not the same length") } if (!is.null(longlat) && !is.data.frame(longlat)) { longlat <- as.data.frame(longlat, fix.empty.names = FALSE) } if (!is.null(projCoord) && !is.data.frame(projCoord)) { projCoord <- as.data.frame(projCoord, fix.empty.names = FALSE) } if (!is.null(longlat) && nrow(longlat) != length(plot)) { stop("The length of vectors plot and origin is different from the number of rows of longlat") } if (!is.null(projCoord) && nrow(projCoord) != length(plot)) { stop("The length of vectors plot and origin is different from the number of rows of projCoord") } tab <- as.numeric(table(plot)) if (any(as.numeric(table(plot)) != 4)) { stop( "Lenght of vector plot is not 4, the plot(s):\n\t\t", paste(names(table(plot)[ table(plot) != 4 ]), collapse = " "), "\nhave:\n\t\t", paste(table(plot)[ table(plot) != 4 ], collapse = " "), "\ncorner respectively" ) } tab <- as.matrix(table(plot, origin)) if (any(tab[, 1] != 3) || any(tab[, 2] != 1)) { stop( "Please verify your 'origin' vector, it should contain 1 TRUE and 3 FALSE by plot, those plot(s) are:\n\t\t", paste(rownames(tab)[tab[, 1] != 3 | tab[, 2] != 1], collapse = " ") ) } # data table -------------------------------------------------------------- if (!is.null(longlat)) { Coord <- data.table(longlat, plot = plot) setnames(Coord, colnames(Coord), new = c("long", "lat", "plot")) Coord <- Coord[setDT(latlong2UTM(cbind(long, lat))), on = c("long", "lat") ] } if (!is.null(projCoord)) { Coord <- data.table(projCoord, plot = plot) setnames(Coord, colnames(Coord), new = c("X", "Y", "plot")) } Coord[, origin := origin] # Function ---------------------------------------------------------------- # Function to assign corner numbers cornerFun <- function(x, y, Origin, clockWise) { coord <- data.table(X = x, Y = y, n.row = 1:length(x), corner = as.numeric(NA)) # if the plot the square is turn at 45° in relation to the horizontal ######?????? # rotate the coordinate by 45° if (any(rank(coord$X) == 4)) { rot <- matrix(c(cos(pi / 4), sin(pi / 4), -sin(pi / 4), cos(pi / 4)), nrow = 2) newcoord <- as.matrix(coord[, .(X, Y)]) %*% rot coord[, ":="(X = newcoord[, 1], Y = newcoord[, 2]) ] } # Assign temporary corner numbers without accounting for the origin m1 <- coord[ rank(X) <= 2, ] tmp1 <- m1[rank(Y), n.row] m2 <- coord[ rank(X) > 2, ] tmp2 <- m2[rank(Y), n.row] if (!clockWise) { coord[tmp1, corner := c(1, 4)] coord[tmp2, corner := c(2, 3)] } else { coord[tmp1, corner := c(1, 2)] coord[tmp2, corner := c(4, 3)] } # Shift the corner numbers to have corner 1 on the origin Origin <- Origin[order(coord[, corner])] shift <- 5 - which(Origin) out <- (coord$corner + shift) %% 4 out[which(out == 0)] <- 4 return(out) } Coord[, corner := cornerFun(X, Y, origin, clockWise), by = plot] return(as.data.frame(Coord[, .(plot, X, Y, corner)])) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/numberCorner.R
#' @references #' Réjou-Méchain M., Tanguy A., Piponiot C., Chave J., Hérault B. (2017). BIOMASS : #' An R Package for estimating above-ground biomass and its uncertainty in tropical forests. #' Methods in Ecology and Evolution, 8(9), 1163-1167. #' #' #' @examples #' \donttest{ #' library(BIOMASS) #' #' # Dataset containing plot inventory data from Karnataka, India (Ramesh et al. 2010) #' data(KarnatakaForest) #' str(KarnatakaForest) #' #' # Dataset containing height and diameter measurements from two 1-ha plots #' # established in the lowland rainforest of French Guiana, at the Nouragues #' # Ecological Research Station #' data(NouraguesHD) #' str(NouraguesHD) #' #' ############################################################################# #' # WOOD DENSITY #' #' # 1-RETRIEVE AND CORRECT TAXONOMY #' #' # Checking typos in taxonomy #' Taxo <- correctTaxo(genus = KarnatakaForest$genus, species = KarnatakaForest$species) #' KarnatakaForest$genusCorr <- Taxo$genusCorrected #' KarnatakaForest$speciesCorr <- Taxo$speciesCorrected #' #' # Retrieving APG III Families and Orders from Genus names #' APG <- getTaxonomy(KarnatakaForest$genusCorr, findOrder = TRUE) #' KarnatakaForest$familyAPG <- APG$family #' KarnatakaForest$orderAPG <- APG$order #' #' # 2-RETRIEVE WOOD DENSITY #' dataWD <- getWoodDensity( #' genus = KarnatakaForest$genusCorr, #' species = KarnatakaForest$speciesCorr, #' stand = KarnatakaForest$plotID #' ) #' #' ############################################################################# #' # TREE HEIGHT #' #' # Compare different local H-D models #' modelHD( #' D = NouraguesHD$D, H = NouraguesHD$H, #' drawGraph = TRUE, useWeight = TRUE #' ) #' #' # Compute the local H-D model with the lowest RSE #' HDmodel <- modelHD( #' D = NouraguesHD$D, H = NouraguesHD$H, #' method = "log2", useWeight = TRUE #' ) #' #' # Compute plot-specific H-D models #' HDmodelPerPlot <- modelHD(NouraguesHD$D, NouraguesHD$H, #' method = "weibull", #' useWeight = TRUE, plot = NouraguesHD$plotId #' ) #' #' RSEmodels <- sapply(HDmodelPerPlot, function(x) x$RSE) #' Coeffmodels <- lapply(HDmodelPerPlot, function(x) x$coefficients) #' #' # Retrieve height data from a local HD model #' dataHlocal <- retrieveH(D = KarnatakaForest$D, model = HDmodel) #' #' # Retrieve height data from a Feldpaush et al. (2012) averaged model #' dataHfeld <- retrieveH(D = KarnatakaForest$D, region = "SEAsia") #' #' # Retrieve height data from Chave et al. (2012) equation 6 #' dataHchave <- retrieveH( #' D = KarnatakaForest$D, #' coord = cbind(KarnatakaForest$long, KarnatakaForest$lat) #' ) #' #' ############################################################################# #' # AGB CALCULATION #' #' KarnatakaForest$WD <- dataWD$meanWD #' KarnatakaForest$H <- dataHlocal$H #' KarnatakaForest$Hfeld <- dataHfeld$H #' #' # Compute AGB(Mg) per tree #' AGBtree <- computeAGB( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, #' H = KarnatakaForest$H #' ) #' #' # Compute AGB(Mg) per plot #' AGBplot <- summaryByPlot(AGBtree, KarnatakaForest$plotId) #' #' # Compute AGB(Mg) per tree without height information (Eq. 7 from Chave et al. (2014)) #' AGBplotChave <- summaryByPlot( #' computeAGB( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, #' coord = KarnatakaForest[, c("long", "lat")] #' ), #' plot = KarnatakaForest$plotId #' ) #' #' # Compute AGB(Mg) per tree with Feldpausch et al. (2012) regional H-D model #' AGBplotFeld <- summaryByPlot( #' computeAGB( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, #' H = KarnatakaForest$Hfeld #' ), #' plot = KarnatakaForest$plotId #' ) #' #' ############################################################################# #' # PROPAGATING ERRORS #' #' KarnatakaForest$sdWD <- dataWD$sdWD #' KarnatakaForest$HfeldRSE <- dataHfeld$RSE #' #' # Per plot using the local HD model constructed above (modelHD) #' resultMC <- AGBmonteCarlo( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, errWD = KarnatakaForest$sdWD, #' HDmodel = HDmodel, Dpropag = "chave2004" #' ) #' resMC <- summaryByPlot(resultMC$AGB_simu, KarnatakaForest$plotId) #' #' # Per plot using the Feldpaush regional HD averaged model #' AGBmonteCarlo( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, #' errWD = KarnatakaForest$sdWD, H = KarnatakaForest$Hfeld, #' errH = KarnatakaForest$HfeldRSE, Dpropag = "chave2004" #' ) #' resMC <- summaryByPlot(resultMC$AGB_simu, KarnatakaForest$plotId) #' #' # Per plot using Chave et al. (2014) Equation 7 #' resultMC <- AGBmonteCarlo( #' D = KarnatakaForest$D, WD = KarnatakaForest$WD, errWD = KarnatakaForest$sdWD, #' coord = KarnatakaForest[, c("long", "lat")], #' Dpropag = "chave2004" #' ) #' resMC <- summaryByPlot(resultMC$AGB_simu, KarnatakaForest$plotId) #' } #' @keywords internal "_PACKAGE" .onLoad <- function(libname, pkgname) { } .onAttach <- function(libname, pkgname) { basePath <- cachePath() if(attr(basePath, "source")=="temp") { packageStartupMessage( "Using temporary cache", "\n It is recommended to use a permanent cache to avoid to re-download files on each session.", "\n See function createCache() or BIOMASS.cache option." ) } if(attr(basePath, "source")=="data") { packageStartupMessage( "Using user data cache ", basePath, "\n To clear or remove cache see function clearCache()." ) } }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/pkgname.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "H" )) } #' Predicting tree height #' #' The function predicts height from diameter based on a fitted model. #' #' @param D Vector of diameter (in cm). #' @param model A height-diameter model output by the function [modelHD()] #' @param err If `TRUE`, An error is taken randomly from a normal distribution with a mean of #' zero and a standard deviation equalled to the residual standard error of the model (RSE). Only used #' for the Monte Carlo approach (see [AGBmonteCarlo()]), otherwise it should be #' let as `FALSE`, the default case. #' @param plot (optional) Plot ID, must be either one value, or a vector of the same length as D. This argument is used to build #' stand-specific HD models. #' #' @details In the case where the error is `FALSE` and the model is a log-log model, we use the #' Baskerville correction, a bias correction factor used to get unbiased backtransformation values. #' #' @return Returns a vector of total tree height (in m). #' @author Maxime REJOU-MECHAIN, Ariane TANGUY, Arthur PERE #' @seealso [minpack.lm::nlsLM()] #' #' #' @importFrom data.table data.table #' @keywords Internal predictHeight <- function(D, model, err = FALSE, plot = NULL) { ### From the diameter and the model, compute (with or without error) the height method <- model$method logmod <- any(grepl("log", method)) if (is.null(plot) && length(model[[1]]) != 2) { stop("The argument model contains different HD models, use the argument plot to assign a given model to the trees") } if (!is.null(plot) && length(model[[1]]) != 2) { if (length(plot) == 1) { plot <- rep(plot, length(D)) } if (length(plot) != length(D)) { stop("The argument plot and D have not the same length") } if (any(!plot %in% names(model))) { stop( "Cannot find a HD model corresponding to ", paste(unique(plot[ !plot %in% names(model) ]), collapse = ", ") ) } data <- data.table(D = D, plot = plot) data[, H := predictHeight(D, model[[unique(plot)]], err), by = plot] return(data[, H]) } D <- data.table(D = D) if (!err) { if (logmod) { e <- 0.5 * model$RSElog^2 # Baskerville correction } else { e <- 0 # No error } } else { if (logmod) { e <- rnorm(nrow(D), 0, model$RSElog) # Log-log error } else { e <- rnorm(nrow(D), 0, model$RSE) # Michaelis or Weibull error } } if (logmod) { Hpredict <- exp(predict(model$model, D) + e) } else { Hpredict <- predict(model$model, D) + e } names(Hpredict) <- NULL # If H predicted values are negative due to random error assignment Hpredict[Hpredict <= 0] <- 0.1 return(Hpredict) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/predictHeight.R
#' Procrust analysis #' #' Do a procrust analysis. X is the target matrix, Y is the matrix we want to fit to the target. #' This function returns a translation vector and a rotation matrix #' After the procrust problem you __must__ do the rotation before the translation. #' __Warning : The order of the value on both matrix is important__ #' #' @param X the target matrix #' @param Y the matrix we want to fit to the target #' #' @return A list with the translation vector and the matrix of rotation #' @keywords Internal procrust analysis #' #' @author Arthur PERE #' procrust <- function(X, Y) { xmean <- colMeans(X) ymean <- colMeans(Y) X <- scale(X, scale = FALSE) Y <- scale(Y, scale = FALSE) XY <- crossprod(X, Y) sol <- svd(XY) A <- sol$v %*% t(sol$u) b <- xmean - ymean %*% A return(list(rotation = A, translation = b)) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/procrust.R
#' Retrieving tree height from models #' #' From the diameter and either i) a model, ii) the coordinates of the plot or iii) the region, this function gives an #' estimation of the total tree height. #' #' @param D Vector of diameters. #' @param model A model output by the function [modelHD()]. #' @param coord Coordinates of the site(s), either a vector (e.g. c(longitude, latitude)) or a #' matrix/dataframe with two columns (e.g. cbind(longitude, latitude)). #' @param region Area of your dataset to estimate tree height thanks to Weibull-H region-, continent-specific #' and pantropical models proposed by Feldpausch et al. (2012). To be chosen between: #' - `Africa`: Africa #' - `CAfrica`: Central Africa #' - `EAfrica`: Eastern Africa #' - `WAfrica`: Western Africa #' - `SAmerica`: Southern America #' - `BrazilianShield`: Brazilian Shield #' - `ECAmazonia`: East-Central Amazonia #' - `GuianaShield`: Guiana Shield #' - `WAmazonia`: Western Amazonia #' - `SEAsia`: South-Eastern Asia #' - `NAustralia`: Northern Australia #' - `Pantropical`: Pantropical #' #' @param plot (optional) Plot ID, must be either one value, or a vector of the same length as D. This argument is used to build #' stand-specific HD models. #' #' @return Returns a list with: #' - `H`: Height predicted by the model #' - `RSE` Residual Standard Error of the model, or a vector of those for each plot #' @references #' Feldpausch et al. _Tree height integrated into pantropical forest biomass estimates_. Biogeosciences (2012): 3381-3403. #' Chave et al. _Improved allometric models to estimate the aboveground biomass of tropical trees_. #' Global change biology 20.10 (2014): 3177-3190. #' @author Ariane TANGUY, Maxime REJOU-MECHAIN, Arthur PERE #' @seealso [modelHD()] #' @export #' #' @examples #' # Load a database #' data(NouraguesHD) #' model <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, method = "log2") #' #' # If any height model is available #' H <- retrieveH(D = NouraguesHD$D, model = model) #' #' # If the only data available are the coordinates of your spot #' n <- length(NouraguesHD$D) #' coord <- cbind(long = rep(-52.68, n), lat = rep(4.08, n)) #' \donttest{ #' H <- retrieveH(D = NouraguesHD$D, coord = coord) #' } #' #' # If the only data available is the region of your spot #' H <- retrieveH(D = NouraguesHD$D, region = "GuianaShield") retrieveH <- function(D, model = NULL, coord = NULL, region = NULL, plot = NULL) { # parameters verification ------------------------------------------------- if (is.null(model) & is.null(region) & is.null(coord)) { stop("Either model, region, coord should be given") } if (!is.null(region) && length(region) != 1 && length(region) != length(D)) { stop("The number of region does not match the number of trees") } if (!is.null(coord) && ((is.vector(coord) && length(coord) != 2) || (is.matrix(coord) && nrow(coord) != length(D)))) { stop("coord should be either - a vector (e.g. c(longitude, latitude)) - a matrix with two columns (longitude and latitude) having the same number of rows as the number of trees (length(D))") } if ((!is.null(model) && !is.null(coord)) || (!is.null(model) && !is.null(region)) || (!is.null(coord) && !is.null(region))) { stop("Too many input, choose one input among those arguments: - H and Herr - HDmodel - coord") } # the length of the plot is tested in predictHeight # the names of the plot and the names of the model is tested in predictHeight if (!is.null(plot) && is.null(model)) { stop("The 'plot' vector must be with 'model' argument") } # First case : with a model fitted with HDfunction if (!is.null(model)) { H <- predictHeight(D, model, plot = plot) RSE <- if (!is.null(plot)) { sapply(model, function(x) x$RSE) } else if (length(model[[1]]) != 2) { model[[1]]$RSE } else { model$RSE } } else { # Second case : with the coordinates of your site, find the E index and estimate the H following Chave et al. 2014 Global Change Biology if (!is.null(coord)) { if (is.null(dim(coord))) { coord <- as.matrix(t(coord)) } E <- computeE(coord) # E = environmental index in Chave et al. 2014 if (length(E) == 1) { E <- rep(E, length(D)) } logD <- log(D) # eq 6a Chave et al. 2014 logH <- 0.893 - E + 0.760 * logD - 0.0340 * I(logD^2) RSE <- 0.243 H <- exp(logH + 0.5 * RSE^2) } else { # Third case : with the region, use the weibull parameters from Feldpaush et al. 2012 Biogeosciences feldCoef <- BIOMASS::feldCoef a <- feldCoef[region, "a"] b <- feldCoef[region, "b"] c <- feldCoef[region, "c"] RSE <- feldCoef[region, "RSE"] # eq 5 in Feldpaush et al. 2012 H <- a * (1 - exp(-b * D^c)) } } output <- list(H = as.numeric(H), RSE = RSE) return(output) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/retrieveH.R
if (getRversion() >= "2.15.1") { utils::globalVariables(c( "indice_line", "indice_col", "V1" )) } #' Summarize by plot (or subplot) the posterior distribution of AGB values #' #' @description #' This function summarizes the matrix `AGB_val` given by the function [AGBmonteCarlo()] by plot. Or just do the sums #' for each plot of the AGB if the argument `AGB_val` is the resulting vector from the function [computeAGB()]. #' #' @details #' If some trees belong to an unknown plot (i.e. NA value in the plot arguments), their AGB values are randomly assigned #' to a plot at each iteration of the AGB monte Carlo approach. Or discarded when using output from [computeAGB()]. #' #' The `drawPlot` argument is a logical that if it is set `TRUE`, a graph will appear with the plot given on absciss and the value #' of AGB on ordinate, the red segments are the quantile, if `AGB_val` is the result of the function [AGBmonteCarlo()]. #' If the `subplot` arguments is set and the `drawPlot` is set `TRUE`, a graph is drawn with the spatialisation of the plots. #' #' @param AGB_val Matrix resulting from the function [AGBmonteCarlo()] (AGB_val element of the list), #' or just the output of the function [AGBmonteCarlo()]. Or the output of the function [computeAGB()] #' @param plot Vector with the code of plot #' @param drawPlot a logical to draw the plot (see Details) #' @param subplot Data frame, output of the function [cutPlot()] #' #' @return a data frame where: #' - `plot`: the code of the plot #' - `AGB`: AGB value at the plot level #' - `Cred_2.5`: the quantile 2.5\% for the plot (when output of [AGBmonteCarlo()] is used) #' - `Cred_97.5`: the quantile 97.5\% for the plot (when output of [AGBmonteCarlo()] is used) #' #' If the `subplot` is set, the output is a list with the previous data frame and a simple features (sf) geometry object. #' #' #' @export #' #' @importFrom data.table data.table := first setDT #' @importFrom grDevices terrain.colors #' @importFrom graphics segments #' @importFrom stats quantile #' @examples #' #' # Load a database #' data(NouraguesHD) #' data(KarnatakaForest) #' #' # Modelling height-diameter relationship #' HDmodel <- modelHD(D = NouraguesHD$D, H = NouraguesHD$H, method = "log2") #' #' # Retrieving wood density values #' \donttest{ #' KarnatakaWD <- getWoodDensity(KarnatakaForest$genus, KarnatakaForest$species, #' stand = KarnatakaForest$plotId #' ) #' } #' #' # Propagating errors #' \donttest{ #' filt <- KarnatakaForest$plotId %in% c("BSP20", "BSP14") #' resultMC <- AGBmonteCarlo( #' D = KarnatakaForest$D[filt], WD = KarnatakaWD$meanWD[filt], #' errWD = KarnatakaWD$sdWD[filt], HDmodel = HDmodel #' ) #' #' plot <- KarnatakaForest$plotId[ filt ] #' #' # The summary by plot #' summaryByPlot(AGB_val = resultMC$AGB_simu, plot) #' #' # The summary by plot for computeAGB #' H <- retrieveH(KarnatakaForest$D[filt], model = HDmodel)$H #' AGB <- computeAGB(KarnatakaForest$D[filt], WD = KarnatakaWD$meanWD[filt], H = H) #' summaryByPlot(AGB, plot) #' } summaryByPlot <- function(AGB_val, plot, drawPlot = FALSE, subplot = NULL) { # parameters verification ------------------------------------------------- if (is.list(AGB_val)) { AGB_val <- AGB_val$AGB_simu } if (!is.matrix(AGB_val) && !is.vector(AGB_val)) { stop( "The AGB_val must be a matrix you have for the result of the function ", "'AGBmonteCarlo', or just the result of the function. ", "Or the result from the function 'computeAGB'" ) } if (length(plot) != ifelse(is.matrix(AGB_val), nrow(AGB_val), length(AGB_val))) { stop("Your 'plot' vector have not the same length as your number of row in the matrix") } if (!is.null(subplot)) { if (!requireNamespace("sf", quietly = TRUE)) { warning( 'To use this part of the function you must have the "sf" library\n\n', '\t\tinstall.packages("sf")' ) subplot <- NULL } if (!any(subplot$subplot %in% plot)) { warning( "The subplot parameter do not correspond to any plot" ) subplot <- NULL } } # function if it's a vector ----------------------------------------------- if (is.vector(AGB_val)) { data <- data.table(AGB = AGB_val, plot = plot) data <- na.omit(data) AGB <- data[, .(AGB = sum(AGB)), by = plot] } else { # function if it's a matrix ----------------------------------------------- Plot <- data.table(plot = plot) indice_tree <- Plot[is.na(plot), .I, by = plot] # filter if there is there is NA in the AGB_val filter <- rowSums(is.na(AGB_val)) > 0 # take the first tree in the database by plot indice_first <- Plot[!is.na(plot) & !filter, .(indice_line = first(.I), plot = unique(plot)), by = plot] # if there is trees without label ----------------------------------------- if (nrow(indice_tree) != 0) { # Create a table with I : indice of the tree to distribute # indice_line : a random sample of matrix line inside the plot without the trees to distribute # indice_col : the column index of the matrix by I mySample <- function(plot1, n) { return(samples = indice_first[, sample(indice_line, n, replace = TRUE)]) } n <- ncol(AGB_val) samples <- indice_tree[, .(indice_line = mySample(plot, n), indice_col = 1:n), by = I] # remove the index for the tree to distribute when it is NA samples <- samples[ !(I %in% filter) ] } # function summary -------------------------------------------------------- mySummary <- function(x, matrix) { # deal with plots with only one tree individual if(!is.null(nrow(matrix[x, ]))){ resAGB <- colSums(matrix[x, ], na.rm = TRUE) }else{ resAGB <-matrix[x, ] } # if there is trees without label if (nrow(indice_tree) != 0) { subsample <- samples[x[1] == indice_line, ] # if the tree belong among the subplot if (nrow(subsample) != 0) { # sum for the trees I whose are not in the subplot, by column sums <- subsample[, sum(matrix[I, unique(indice_col)], na.rm = TRUE), by = indice_col] sums[is.na(V1), V1 := 0] # if there is any NA update the table resAGB[sums$indice_col] <- resAGB[sums$indice_col] + sums$V1 # sum the result of the table } } return(list( AGB = mean(resAGB), Cred_2.5 = quantile(resAGB, probs = 0.025), Cred_97.5 = quantile(resAGB, probs = 0.975) )) } AGB <- Plot[!is.na(plot), mySummary(.I, AGB_val), by = plot] if (drawPlot) { with(AGB[order(AGB)], { plot(AGB, pch = 20, xlab = "", ylab = "AGB (Mg/ha)", ylim = range(Cred_2.5, Cred_97.5), las = 1, cex.lab = 1.3, xaxt = "n", main = "AGB by plot" ) axis(1, at = seq(length(AGB)), labels = plot, las = 2) segments(x0 = seq(length(AGB)), y0 = Cred_2.5, y1 = Cred_97.5, col = "red") }) } } if (!is.null(subplot)) { setDT(subplot) list_poly <- lapply(split(subplot, by = "subplot"), function(data) { mat <- data[order(corner), .(XAbs, YAbs)] mat <- as.matrix(rbind(mat, mat[1, ])) output <- list() output$poly <- sf::st_polygon(list(mat)) output$AGB <- AGB[ plot == unique(data$subplot), AGB] * data[, 10000 / (diff(range(XRel)) * diff(range(YRel)))] output }) sf_obj <- sf::st_sf(polygon = lapply(list_poly, "[[", 1), plot = names(list_poly), AGB = sapply(list_poly, "[[", 2)) if (drawPlot) { plot(sf_obj["AGB"], main = "AGB (Mg / ha)", key.pos = 1, pal = function(n) { rev(terrain.colors(n)) }) } return(list(AGB = as.data.frame(AGB), polygon = sf_obj)) } return(as.data.frame(AGB)) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/summaryByPlot.R
#' @rdname HDmethods #' @return Result of a model (nlsM object) #' @importFrom minpack.lm nlsLM nls.lm.control #' @importFrom methods is weibullFunction <- function(data, weight = NULL) { ### Compute the weibull model of the H-D relationship H <- data$H D <- data$D Hmax <- quantile(H, probs = 0.90, na.rm = TRUE) init <- list(a = as.double(Hmax), b = 24.9, c = 0.8) count <- 1 maxIter <- 50 converge <- FALSE if (anyNA(weight)) weight <- NULL while (converge == FALSE && count <= 10) { tt <- tryCatch({ if (is.null(weight)) { nlsLM(H ~ a * (1 - exp(-(D / b)^c)), start = init, control = nls.lm.control(maxiter = maxIter) ) } else { nlsLM(H ~ a * (1 - exp(-(D / b)^c)), start = init, weights = weight, control = nls.lm.control(maxiter = maxIter) ) } }, error = function(e) e, warning = function(w) w ) if (is(tt, "warning")) { count <- count + 1 maxIter <- maxIter + 50 } else { converge <- TRUE } } model <- if (is.null(weight)) { nlsLM(H ~ a * (1 - exp(-(D / b)^c)), start = init, control = nls.lm.control(maxiter = maxIter) ) } else { nlsLM(H ~ a * (1 - exp(-(D / b)^c)), start = init, weights = weight, control = nls.lm.control(maxiter = maxIter) ) } return(model) }
/scratch/gouwar.j/cran-all/cranData/BIOMASS/R/weibullFunction.R
## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, cache = TRUE) test <- TRUE CACHE <- TRUE require(knitr) require(BIOMASS) ## ---- eval=F------------------------------------------------------------------ # install.packages("BIOMASS") ## ---- eval=F------------------------------------------------------------------ # require(BIOMASS) # require(knitr) # To build tables in this document ## ---- cache=CACHE------------------------------------------------------------- data(KarnatakaForest) str(KarnatakaForest) # data(NouraguesHD) str(NouraguesHD) ## ---- cache=CACHE------------------------------------------------------------- selecPlot <- KarnatakaForest$plotId %in% c("BSP2", "BSP12", "BSP14", "BSP26", "BSP28", "BSP30", "BSP34", "BSP44", "BSP63", "BSP65") KarnatakaForestsub <- droplevels(KarnatakaForest[selecPlot, ]) ## ----eval=test, cache=CACHE--------------------------------------------------- Taxo <- correctTaxo(genus = KarnatakaForestsub$genus, species = KarnatakaForestsub$species, useCache = FALSE, verbose = FALSE) KarnatakaForestsub$genusCorr <- Taxo$genusCorrected KarnatakaForestsub$speciesCorr <- Taxo$speciesCorrected ## ----eval=test, cache=CACHE--------------------------------------------------- APG <- getTaxonomy(KarnatakaForestsub$genusCorr, findOrder = TRUE) KarnatakaForestsub$familyAPG <- APG$family KarnatakaForestsub$orderAPG <- APG$order ## ----eval=test, cache=CACHE--------------------------------------------------- dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, stand = KarnatakaForestsub$plotId ) ## ----eval=test, cache=CACHE--------------------------------------------------- LocalWoodDensity <- data.frame( genus = c("Ziziphus", "Terminalia", "Garcinia"), species = c("oenopolia", "bellirica", "indica"), wd = c(0.65, 0.72, 0.65) ) dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, family = KarnatakaForestsub$familyAPG, stand = KarnatakaForestsub$plotID, addWoodDensityData = LocalWoodDensity ) ## ----eval=test, cache=CACHE--------------------------------------------------- # At species level sum(dataWD$levelWD == "species") # At genus level sum(dataWD$levelWD == "genus") # At plot level sum(!dataWD$levelWD %in% c("genus", "species")) ## ----echo=TRUE, cache=CACHE--------------------------------------------------- result <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, useWeight = TRUE ) kable(result) ## ---- cache=CACHE------------------------------------------------------------- HDmodel <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "log2", useWeight = TRUE ) ## ---- cache=CACHE------------------------------------------------------------- HDmodelPerPlot <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "weibull", useWeight = TRUE, plot = NouraguesHD$plotId ) ResHD <- t(sapply(HDmodelPerPlot, function(x) c(coef(x$model), RSE = x$RSE))) kable(ResHD, row.names = TRUE, digits = 3) ## ---- cache=CACHE------------------------------------------------------------- dataHlocal <- retrieveH( D = KarnatakaForestsub$D, model = HDmodel ) ## ---- cache=CACHE------------------------------------------------------------- dataHfeld <- retrieveH( D = KarnatakaForestsub$D, region = "SEAsia" ) ## ---- eval=F, cache=CACHE----------------------------------------------------- # dataHchave <- retrieveH( # D = KarnatakaForestsub$D, # coord = KarnatakaForestsub[, c("long", "lat")] # ) ## ---- cache=CACHE------------------------------------------------------------- KarnatakaForestsub$WD <- dataWD$meanWD KarnatakaForestsub$H <- dataHlocal$H KarnatakaForestsub$Hfeld <- dataHfeld$H ## ----warning=F, cache=CACHE--------------------------------------------------- AGBtree <- computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$H ) ## ----warning=F, cache=CACHE--------------------------------------------------- AGBplot <- summaryByPlot(AGBtree, KarnatakaForestsub$plotId) ## ----warning=F, eval=F, cache=CACHE------------------------------------------- # AGBplotChave <- summaryByPlot( # computeAGB( # D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, # coord = KarnatakaForestsub[, c("long", "lat")] # ), # KarnatakaForestsub$plotId # ) ## ----warning=F, cache=CACHE--------------------------------------------------- AGBplotFeld <- summaryByPlot( computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$Hfeld ), plot = KarnatakaForestsub$plotId ) ## ---- cache=CACHE------------------------------------------------------------- KarnatakaForestsub$sdWD <- dataWD$sdWD KarnatakaForestsub$HfeldRSE <- dataHfeld$RSE ## ---- cache=CACHE------------------------------------------------------------- resultMC <- AGBmonteCarlo(D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, HDmodel = HDmodel, Dpropag = "chave2004") Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ## ---- eval=F, cache=CACHE----------------------------------------------------- # resultMC <- AGBmonteCarlo( # D = KarnatakaForestsub$D, # WD = KarnatakaForestsub$WD, # errWD = KarnatakaForestsub$sdWD, # H = KarnatakaForestsub$Hfeld, # errH = KarnatakaForestsub$HfeldRSE, # Dpropag = "chave2004" # ) # # Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) # Res <- Res[order(Res$AGB), ] # plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) # segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ## ---- eval=F,cache=CACHE------------------------------------------------------ # resultMC <- AGBmonteCarlo( # D = KarnatakaForestsub$D, # WD = KarnatakaForestsub$WD, # errWD = KarnatakaForestsub$sdWD, # coord = KarnatakaForestsub[, c("long", "lat")], # Dpropag = "chave2004" # ) # Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) # Res <- Res[order(Res$AGB), ] # plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) # segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ## ---- cache=CACHE------------------------------------------------------------- NouraguesHD$Hmix <- NouraguesHD$H NouraguesHD$RSEmix <- 0.5 filt <- is.na(NouraguesHD$Hmix) NouraguesHD$Hmix[filt] <- retrieveH(NouraguesHD$D, model = HDmodel)$H[filt] NouraguesHD$RSEmix[filt] <- HDmodel$RSE ## ----eval=F, cache=CACHE------------------------------------------------------ # wd <- getWoodDensity(NouraguesHD$genus, NouraguesHD$species) # resultMC <- AGBmonteCarlo( # D = NouraguesHD$D, WD = wd$meanWD, errWD = wd$sdWD, # H = NouraguesHD$Hmix, errH = NouraguesHD$RSEmix, # Dpropag = "chave2004" # ) # Res <- summaryByPlot(resultMC$AGB_simu, NouraguesHD$plotId) # Res <- Res[order(Res$AGB), ] # plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB (Mg/ha)", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) # segments(1:nrow(Res), Res$Cred_2.5, 1:nrow(Res), Res$Cred_97.5, col = "red")
/scratch/gouwar.j/cran-all/cranData/BIOMASS/inst/doc/BIOMASS.R
--- title: 'Vignette BIOMASS' output: prettydoc::html_pretty: number_sections: yes toc: yes highlight: vignette self_contained: yes vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Vignette BIOMASS} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, cache = TRUE) test <- TRUE CACHE <- TRUE require(knitr) require(BIOMASS) ``` #Load BIOMASS and datasets **Install BIOMASS (to be done once)** ```{r, eval=F} install.packages("BIOMASS") ``` **Load the package** ```{r, eval=F} require(BIOMASS) require(knitr) # To build tables in this document ``` **Load the two datasets stored in the package** ```{r, cache=CACHE} data(KarnatakaForest) str(KarnatakaForest) # data(NouraguesHD) str(NouraguesHD) ``` **Select 10 plots for illustrative purpose** ```{r, cache=CACHE} selecPlot <- KarnatakaForest$plotId %in% c("BSP2", "BSP12", "BSP14", "BSP26", "BSP28", "BSP30", "BSP34", "BSP44", "BSP63", "BSP65") KarnatakaForestsub <- droplevels(KarnatakaForest[selecPlot, ]) ``` #Retrieve wood density ##Check and retrieve taxonomy **First, check for any typo in the taxonomy** ```{r eval=test, cache=CACHE} Taxo <- correctTaxo(genus = KarnatakaForestsub$genus, species = KarnatakaForestsub$species, useCache = FALSE, verbose = FALSE) KarnatakaForestsub$genusCorr <- Taxo$genusCorrected KarnatakaForestsub$speciesCorr <- Taxo$speciesCorrected ``` **If needed, retrieve APG III families and orders from genus names** ```{r eval=test, cache=CACHE} APG <- getTaxonomy(KarnatakaForestsub$genusCorr, findOrder = TRUE) KarnatakaForestsub$familyAPG <- APG$family KarnatakaForestsub$orderAPG <- APG$order ``` ## Wood density **Retrieve wood density using the plot level average if no genus level information is available** ```{r eval=test, cache=CACHE} dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, stand = KarnatakaForestsub$plotId ) ``` **The same but using the family average and adding other wood density values as references (here invented for the example)** ```{r eval=test, cache=CACHE} LocalWoodDensity <- data.frame( genus = c("Ziziphus", "Terminalia", "Garcinia"), species = c("oenopolia", "bellirica", "indica"), wd = c(0.65, 0.72, 0.65) ) dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, family = KarnatakaForestsub$familyAPG, stand = KarnatakaForestsub$plotID, addWoodDensityData = LocalWoodDensity ) ``` **Below the number of wood density value estimated at the species, genus and plot level:** ```{r eval=test, cache=CACHE} # At species level sum(dataWD$levelWD == "species") # At genus level sum(dataWD$levelWD == "genus") # At plot level sum(!dataWD$levelWD %in% c("genus", "species")) ``` #Build height-diameter models **You may compare different models at once** ```{r echo=TRUE, cache=CACHE} result <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, useWeight = TRUE ) kable(result) ``` **Compute the local H-D model with the lowest RSE** ```{r, cache=CACHE} HDmodel <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "log2", useWeight = TRUE ) ``` **Compute models specific to given stands** ```{r, cache=CACHE} HDmodelPerPlot <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "weibull", useWeight = TRUE, plot = NouraguesHD$plotId ) ResHD <- t(sapply(HDmodelPerPlot, function(x) c(coef(x$model), RSE = x$RSE))) kable(ResHD, row.names = TRUE, digits = 3) ``` # Retrieve height data **Retrieve height data from a local Height-diameter model** (Note that using a HD model built on French guianan trees for Indian trees is only for illustrative purpose here) ```{r, cache=CACHE} dataHlocal <- retrieveH( D = KarnatakaForestsub$D, model = HDmodel ) ``` **Retrieve height data from a Feldpaush et al. (2012) averaged model** ```{r, cache=CACHE} dataHfeld <- retrieveH( D = KarnatakaForestsub$D, region = "SEAsia" ) ``` **Retrieve height data from Chave et al. (2012) equation 6** ```{r, eval=F, cache=CACHE} dataHchave <- retrieveH( D = KarnatakaForestsub$D, coord = KarnatakaForestsub[, c("long", "lat")] ) ``` # Estimate AGB **Organize data** ```{r, cache=CACHE} KarnatakaForestsub$WD <- dataWD$meanWD KarnatakaForestsub$H <- dataHlocal$H KarnatakaForestsub$Hfeld <- dataHfeld$H ``` **Compute AGB(Mg) per tree** ```{r warning=F, cache=CACHE} AGBtree <- computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$H ) ``` **Compute AGB(Mg) per plot (need to be divided by plot area to get Mg/ha)** ```{r warning=F, cache=CACHE} AGBplot <- summaryByPlot(AGBtree, KarnatakaForestsub$plotId) ``` **Compute AGB(Mg) per tree without height information (Eq. 7 from Chave et al. (2014))** ```{r warning=F, eval=F, cache=CACHE} AGBplotChave <- summaryByPlot( computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, coord = KarnatakaForestsub[, c("long", "lat")] ), KarnatakaForestsub$plotId ) ``` **Compute AGB(Mg) per tree with Feldpausch et al. (2012) regional H-D model** ```{r warning=F, cache=CACHE} AGBplotFeld <- summaryByPlot( computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$Hfeld ), plot = KarnatakaForestsub$plotId ) ``` # Propagate AGB errors **Organize data** ```{r, cache=CACHE } KarnatakaForestsub$sdWD <- dataWD$sdWD KarnatakaForestsub$HfeldRSE <- dataHfeld$RSE ``` **Propagate error for all tree at once using the local HD model constructed above (modelHD), i.e. non-independent allometric errors will be assigned to all trees at each iteration, independently of plots.** ```{r, cache=CACHE} resultMC <- AGBmonteCarlo(D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, HDmodel = HDmodel, Dpropag = "chave2004") Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` **Using the Feldpaush regional HD averaged model (code only given)** ```{r, eval=F, cache=CACHE} resultMC <- AGBmonteCarlo( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, H = KarnatakaForestsub$Hfeld, errH = KarnatakaForestsub$HfeldRSE, Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` **Per plot using the Chave et al. (2014) Equation 7 (code only given)** ```{r, eval=F,cache=CACHE} resultMC <- AGBmonteCarlo( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, coord = KarnatakaForestsub[, c("long", "lat")], Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` # Some tricks ##Mixing measured and estimated height values If you want to use a mix of directly-measured height and of estimated ones, you may do the following steps. (@) Build a vector of H and RSE where we assume an error of 0.5 m on directly measured trees ```{r, cache=CACHE } NouraguesHD$Hmix <- NouraguesHD$H NouraguesHD$RSEmix <- 0.5 filt <- is.na(NouraguesHD$Hmix) NouraguesHD$Hmix[filt] <- retrieveH(NouraguesHD$D, model = HDmodel)$H[filt] NouraguesHD$RSEmix[filt] <- HDmodel$RSE ``` (@) Apply the AGBmonteCarlo by setting the height values and their errors (which depend on whether the tree was directly measured or estimated) ```{r eval=F, cache=CACHE} wd <- getWoodDensity(NouraguesHD$genus, NouraguesHD$species) resultMC <- AGBmonteCarlo( D = NouraguesHD$D, WD = wd$meanWD, errWD = wd$sdWD, H = NouraguesHD$Hmix, errH = NouraguesHD$RSEmix, Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, NouraguesHD$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB (Mg/ha)", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(1:nrow(Res), Res$Cred_2.5, 1:nrow(Res), Res$Cred_97.5, col = "red") ``` ##Add your tricks Please contact Maxime ([email protected]) if you would like to add here a code that may be useful for users (code authorship will be respected)
/scratch/gouwar.j/cran-all/cranData/BIOMASS/inst/doc/BIOMASS.Rmd
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, comment = "#>", fig.align = "center" ) require(BIOMASS) require(knitr) ## ----------------------------------------------------------------------------- trees <- read.csv(system.file("external", "NouraguesPlot.csv", package = "BIOMASS", mustWork = TRUE )) ## ----echo=FALSE--------------------------------------------------------------- kable(head(trees), digits = 3, row.names = FALSE, caption = "Head of the table trees") ## ---- fig.cap="Plot the coordinate long lat"---------------------------------- coord <- read.csv(system.file("external", "Coord.csv", package = "BIOMASS", mustWork = TRUE )) plot(coord[, c("Long", "Lat")], asp = 1) ## ----echo=FALSE--------------------------------------------------------------- kable(head(coord), digits = 3, row.names = FALSE, caption = "Head of the table coord") ## ---- cache=FALSE------------------------------------------------------------- correct_plot <- correctCoordGPS( longlat = coord[, c("Long", "Lat")], coordRel = coord[, c("xRel", "yRel")], rangeX = c(0, 100), rangeY = c(0, 100), drawPlot = TRUE, maxDist = 10, rmOutliers = TRUE ) str(correct_plot, max.level = 1) ## ----------------------------------------------------------------------------- coord_num <- numberCorner( projCoord = correct_plot$cornerCoords, plot = rep("NB1", 4), origin = c(F, F, F, T), clockWise = TRUE ) plot(coord_num[, c("X", "Y")], asp = 1) text(coord_num[, c("X", "Y")], labels = coord_num[, "corner"], pos = 2, offset = 0.2) ## ----------------------------------------------------------------------------- subplot <- cutPlot( projCoord = coord_num[, c("X", "Y")], plot = coord_num[, c("plot")], corner = coord_num[, c("corner")], gridsize = 25, dimX = 100, dimY = 100 ) ## ----echo=FALSE--------------------------------------------------------------- kable(head(subplot)) ## ----------------------------------------------------------------------------- trees$subplot <- attributeTree(trees[, c("xRel", "yRel")], rep("NB1", nrow(trees)), subplot) ## ----------------------------------------------------------------------------- trees$AGB <- computeAGB(trees$D, trees$WD, H = trees$H) AGB <- summaryByPlot(trees$AGB, trees$subplot, drawPlot = TRUE, subplot = subplot) print(AGB) ## ----------------------------------------------------------------------------- TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = subplot, dim = c(100, 100) ) ## ----echo=FALSE--------------------------------------------------------------- kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ## ----------------------------------------------------------------------------- #TreeCoord <- as.data.frame( proj4::project(TreeCoord, proj = correct_plot$codeUTM, inverse = TRUE) ) ## ----echo=FALSE--------------------------------------------------------------- kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ## ----------------------------------------------------------------------------- coordAbs = data.frame(X = c(4.066923, 4.067865, 4.067842, 4.066905), Y = c(52.68883, 52.68877, 52.68793, 52.68783)) ncoordAbs = numberCorner(projCoord = coordAbs, plot = rep("NB1", 4), origin = c(TRUE, FALSE, FALSE, FALSE), clockWise = TRUE) TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = ncoordAbs, dim = c(100, 100) ) ## ----echo=FALSE--------------------------------------------------------------- kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord")
/scratch/gouwar.j/cran-all/cranData/BIOMASS/inst/doc/plot.R
--- title: "Manage trees and plot coordinates with BIOMASS" author: "Arthur Pere" date: "`r Sys.Date()`" output: prettydoc::html_pretty: number_sections: yes toc: yes highlight: vignette self_contained: yes theme: cayman vignette: > %\VignetteIndexEntry{Manage tree and plot coordinate with BIOMASS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, comment = "#>", fig.align = "center" ) require(BIOMASS) require(knitr) ``` # Requirement You need the following information to use the following function in your analysis : - Dimension of the plots - Coordinate GPS of minimum 3 points non-aligned and relatily far away with the corresponding relative coordinates - The origin corner - The X, Y directions ```{r} trees <- read.csv(system.file("external", "NouraguesPlot.csv", package = "BIOMASS", mustWork = TRUE )) ``` ```{r echo=FALSE} kable(head(trees), digits = 3, row.names = FALSE, caption = "Head of the table trees") ``` We can see in the table that we have for each trees the name of the plot we have, the `xRel` and `yRel`, the relative coordinate of the trees inside the plot. The rest of the column is for calculate the AGB at the end. ```{r, fig.cap="Plot the coordinate long lat"} coord <- read.csv(system.file("external", "Coord.csv", package = "BIOMASS", mustWork = TRUE )) plot(coord[, c("Long", "Lat")], asp = 1) ``` We can see on the plot that the corner coordinates are spread. ```{r echo=FALSE} kable(head(coord), digits = 3, row.names = FALSE, caption = "Head of the table coord") ``` In the table, we have the name of the plot, the coordinate `Lat`, `Long` (or another projected coordinates), and `xRel`, `yRel`, the relative coordinate for the points observed. # To manage the plots ## Import the dataset and visualisation The plot is referenced in the longitude latitude coordinate so you must have the package `proj4` if you are in this situation. If you have projected coordinate, you can continue with the `projCoord` argument instead of `longlat` argument. ## Correct the GPS coordinate ```{r, cache=FALSE} correct_plot <- correctCoordGPS( longlat = coord[, c("Long", "Lat")], coordRel = coord[, c("xRel", "yRel")], rangeX = c(0, 100), rangeY = c(0, 100), drawPlot = TRUE, maxDist = 10, rmOutliers = TRUE ) str(correct_plot, max.level = 1) ``` The output of the function is a list with a data.frame `corner` it's the corner of the plot, `polygon` the spatial polygon, `outliers` the vector with the line number of the outliers and `codeUTM` the UTM code for the polygon. The outliers are calculated by a measure of distance between the predicted points and the GPS points. If this distance is higher than the value of `maxDist`, the point is considered like outliers. ## Numbering the corner We have to number the corner of the plot, it is working if we have exactly 4 points for each plot, so we have to do the correctCoordGPS before if we have not the correct number of points. ```{r} coord_num <- numberCorner( projCoord = correct_plot$cornerCoords, plot = rep("NB1", 4), origin = c(F, F, F, T), clockWise = TRUE ) plot(coord_num[, c("X", "Y")], asp = 1) text(coord_num[, c("X", "Y")], labels = coord_num[, "corner"], pos = 2, offset = 0.2) ``` On the graph, you can noted than the corner number 1 the origin of the plot. ## Cut the plot in multiple subplot ```{r} subplot <- cutPlot( projCoord = coord_num[, c("X", "Y")], plot = coord_num[, c("plot")], corner = coord_num[, c("corner")], gridsize = 25, dimX = 100, dimY = 100 ) ``` ```{r echo=FALSE} kable(head(subplot)) ``` # Trees managements ## Attribute the trees to the subplot ```{r} trees$subplot <- attributeTree(trees[, c("xRel", "yRel")], rep("NB1", nrow(trees)), subplot) ``` ## Calculate the AGB and spatialisation ```{r} trees$AGB <- computeAGB(trees$D, trees$WD, H = trees$H) AGB <- summaryByPlot(trees$AGB, trees$subplot, drawPlot = TRUE, subplot = subplot) print(AGB) ``` ## Attribute the trees to GPS coordinates There is two maners to attribute the trees to GPS coordinates ```{r} TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = subplot, dim = c(100, 100) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ``` If you want to have in GPS (longitude/latitude) coordinates (need to install proj4 first) : ```{r} #TreeCoord <- as.data.frame( proj4::project(TreeCoord, proj = correct_plot$codeUTM, inverse = TRUE) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ``` If you want to have the GPS (longitude/latitude) coordinates without passing through all this step however you must use the numberCorner function: ```{r} coordAbs = data.frame(X = c(4.066923, 4.067865, 4.067842, 4.066905), Y = c(52.68883, 52.68877, 52.68793, 52.68783)) ncoordAbs = numberCorner(projCoord = coordAbs, plot = rep("NB1", 4), origin = c(TRUE, FALSE, FALSE, FALSE), clockWise = TRUE) TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = ncoordAbs, dim = c(100, 100) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ```
/scratch/gouwar.j/cran-all/cranData/BIOMASS/inst/doc/plot.Rmd
--- title: 'Vignette BIOMASS' output: prettydoc::html_pretty: number_sections: yes toc: yes highlight: vignette self_contained: yes vignette: > %\VignetteEngine{knitr::rmarkdown} %\VignetteIndexEntry{Vignette BIOMASS} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, cache = TRUE) test <- TRUE CACHE <- TRUE require(knitr) require(BIOMASS) ``` #Load BIOMASS and datasets **Install BIOMASS (to be done once)** ```{r, eval=F} install.packages("BIOMASS") ``` **Load the package** ```{r, eval=F} require(BIOMASS) require(knitr) # To build tables in this document ``` **Load the two datasets stored in the package** ```{r, cache=CACHE} data(KarnatakaForest) str(KarnatakaForest) # data(NouraguesHD) str(NouraguesHD) ``` **Select 10 plots for illustrative purpose** ```{r, cache=CACHE} selecPlot <- KarnatakaForest$plotId %in% c("BSP2", "BSP12", "BSP14", "BSP26", "BSP28", "BSP30", "BSP34", "BSP44", "BSP63", "BSP65") KarnatakaForestsub <- droplevels(KarnatakaForest[selecPlot, ]) ``` #Retrieve wood density ##Check and retrieve taxonomy **First, check for any typo in the taxonomy** ```{r eval=test, cache=CACHE} Taxo <- correctTaxo(genus = KarnatakaForestsub$genus, species = KarnatakaForestsub$species, useCache = FALSE, verbose = FALSE) KarnatakaForestsub$genusCorr <- Taxo$genusCorrected KarnatakaForestsub$speciesCorr <- Taxo$speciesCorrected ``` **If needed, retrieve APG III families and orders from genus names** ```{r eval=test, cache=CACHE} APG <- getTaxonomy(KarnatakaForestsub$genusCorr, findOrder = TRUE) KarnatakaForestsub$familyAPG <- APG$family KarnatakaForestsub$orderAPG <- APG$order ``` ## Wood density **Retrieve wood density using the plot level average if no genus level information is available** ```{r eval=test, cache=CACHE} dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, stand = KarnatakaForestsub$plotId ) ``` **The same but using the family average and adding other wood density values as references (here invented for the example)** ```{r eval=test, cache=CACHE} LocalWoodDensity <- data.frame( genus = c("Ziziphus", "Terminalia", "Garcinia"), species = c("oenopolia", "bellirica", "indica"), wd = c(0.65, 0.72, 0.65) ) dataWD <- getWoodDensity( genus = KarnatakaForestsub$genusCorr, species = KarnatakaForestsub$speciesCorr, family = KarnatakaForestsub$familyAPG, stand = KarnatakaForestsub$plotID, addWoodDensityData = LocalWoodDensity ) ``` **Below the number of wood density value estimated at the species, genus and plot level:** ```{r eval=test, cache=CACHE} # At species level sum(dataWD$levelWD == "species") # At genus level sum(dataWD$levelWD == "genus") # At plot level sum(!dataWD$levelWD %in% c("genus", "species")) ``` #Build height-diameter models **You may compare different models at once** ```{r echo=TRUE, cache=CACHE} result <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, useWeight = TRUE ) kable(result) ``` **Compute the local H-D model with the lowest RSE** ```{r, cache=CACHE} HDmodel <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "log2", useWeight = TRUE ) ``` **Compute models specific to given stands** ```{r, cache=CACHE} HDmodelPerPlot <- modelHD( D = NouraguesHD$D, H = NouraguesHD$H, method = "weibull", useWeight = TRUE, plot = NouraguesHD$plotId ) ResHD <- t(sapply(HDmodelPerPlot, function(x) c(coef(x$model), RSE = x$RSE))) kable(ResHD, row.names = TRUE, digits = 3) ``` # Retrieve height data **Retrieve height data from a local Height-diameter model** (Note that using a HD model built on French guianan trees for Indian trees is only for illustrative purpose here) ```{r, cache=CACHE} dataHlocal <- retrieveH( D = KarnatakaForestsub$D, model = HDmodel ) ``` **Retrieve height data from a Feldpaush et al. (2012) averaged model** ```{r, cache=CACHE} dataHfeld <- retrieveH( D = KarnatakaForestsub$D, region = "SEAsia" ) ``` **Retrieve height data from Chave et al. (2012) equation 6** ```{r, eval=F, cache=CACHE} dataHchave <- retrieveH( D = KarnatakaForestsub$D, coord = KarnatakaForestsub[, c("long", "lat")] ) ``` # Estimate AGB **Organize data** ```{r, cache=CACHE} KarnatakaForestsub$WD <- dataWD$meanWD KarnatakaForestsub$H <- dataHlocal$H KarnatakaForestsub$Hfeld <- dataHfeld$H ``` **Compute AGB(Mg) per tree** ```{r warning=F, cache=CACHE} AGBtree <- computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$H ) ``` **Compute AGB(Mg) per plot (need to be divided by plot area to get Mg/ha)** ```{r warning=F, cache=CACHE} AGBplot <- summaryByPlot(AGBtree, KarnatakaForestsub$plotId) ``` **Compute AGB(Mg) per tree without height information (Eq. 7 from Chave et al. (2014))** ```{r warning=F, eval=F, cache=CACHE} AGBplotChave <- summaryByPlot( computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, coord = KarnatakaForestsub[, c("long", "lat")] ), KarnatakaForestsub$plotId ) ``` **Compute AGB(Mg) per tree with Feldpausch et al. (2012) regional H-D model** ```{r warning=F, cache=CACHE} AGBplotFeld <- summaryByPlot( computeAGB( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, H = KarnatakaForestsub$Hfeld ), plot = KarnatakaForestsub$plotId ) ``` # Propagate AGB errors **Organize data** ```{r, cache=CACHE } KarnatakaForestsub$sdWD <- dataWD$sdWD KarnatakaForestsub$HfeldRSE <- dataHfeld$RSE ``` **Propagate error for all tree at once using the local HD model constructed above (modelHD), i.e. non-independent allometric errors will be assigned to all trees at each iteration, independently of plots.** ```{r, cache=CACHE} resultMC <- AGBmonteCarlo(D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, HDmodel = HDmodel, Dpropag = "chave2004") Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` **Using the Feldpaush regional HD averaged model (code only given)** ```{r, eval=F, cache=CACHE} resultMC <- AGBmonteCarlo( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, H = KarnatakaForestsub$Hfeld, errH = KarnatakaForestsub$HfeldRSE, Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` **Per plot using the Chave et al. (2014) Equation 7 (code only given)** ```{r, eval=F,cache=CACHE} resultMC <- AGBmonteCarlo( D = KarnatakaForestsub$D, WD = KarnatakaForestsub$WD, errWD = KarnatakaForestsub$sdWD, coord = KarnatakaForestsub[, c("long", "lat")], Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, KarnatakaForestsub$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(seq(nrow(Res)), Res$Cred_2.5, seq(nrow(Res)), Res$Cred_97.5, col = "red") ``` # Some tricks ##Mixing measured and estimated height values If you want to use a mix of directly-measured height and of estimated ones, you may do the following steps. (@) Build a vector of H and RSE where we assume an error of 0.5 m on directly measured trees ```{r, cache=CACHE } NouraguesHD$Hmix <- NouraguesHD$H NouraguesHD$RSEmix <- 0.5 filt <- is.na(NouraguesHD$Hmix) NouraguesHD$Hmix[filt] <- retrieveH(NouraguesHD$D, model = HDmodel)$H[filt] NouraguesHD$RSEmix[filt] <- HDmodel$RSE ``` (@) Apply the AGBmonteCarlo by setting the height values and their errors (which depend on whether the tree was directly measured or estimated) ```{r eval=F, cache=CACHE} wd <- getWoodDensity(NouraguesHD$genus, NouraguesHD$species) resultMC <- AGBmonteCarlo( D = NouraguesHD$D, WD = wd$meanWD, errWD = wd$sdWD, H = NouraguesHD$Hmix, errH = NouraguesHD$RSEmix, Dpropag = "chave2004" ) Res <- summaryByPlot(resultMC$AGB_simu, NouraguesHD$plotId) Res <- Res[order(Res$AGB), ] plot(Res$AGB, pch = 20, xlab = "Plots", ylab = "AGB (Mg/ha)", ylim = c(0, max(Res$Cred_97.5)), las = 1, cex.lab = 1.3) segments(1:nrow(Res), Res$Cred_2.5, 1:nrow(Res), Res$Cred_97.5, col = "red") ``` ##Add your tricks Please contact Maxime ([email protected]) if you would like to add here a code that may be useful for users (code authorship will be respected)
/scratch/gouwar.j/cran-all/cranData/BIOMASS/vignettes/BIOMASS.Rmd
--- title: "Manage trees and plot coordinates with BIOMASS" author: "Arthur Pere" date: "`r Sys.Date()`" output: prettydoc::html_pretty: number_sections: yes toc: yes highlight: vignette self_contained: yes theme: cayman vignette: > %\VignetteIndexEntry{Manage tree and plot coordinate with BIOMASS} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, echo = TRUE, comment = "#>", fig.align = "center" ) require(BIOMASS) require(knitr) ``` # Requirement You need the following information to use the following function in your analysis : - Dimension of the plots - Coordinate GPS of minimum 3 points non-aligned and relatily far away with the corresponding relative coordinates - The origin corner - The X, Y directions ```{r} trees <- read.csv(system.file("external", "NouraguesPlot.csv", package = "BIOMASS", mustWork = TRUE )) ``` ```{r echo=FALSE} kable(head(trees), digits = 3, row.names = FALSE, caption = "Head of the table trees") ``` We can see in the table that we have for each trees the name of the plot we have, the `xRel` and `yRel`, the relative coordinate of the trees inside the plot. The rest of the column is for calculate the AGB at the end. ```{r, fig.cap="Plot the coordinate long lat"} coord <- read.csv(system.file("external", "Coord.csv", package = "BIOMASS", mustWork = TRUE )) plot(coord[, c("Long", "Lat")], asp = 1) ``` We can see on the plot that the corner coordinates are spread. ```{r echo=FALSE} kable(head(coord), digits = 3, row.names = FALSE, caption = "Head of the table coord") ``` In the table, we have the name of the plot, the coordinate `Lat`, `Long` (or another projected coordinates), and `xRel`, `yRel`, the relative coordinate for the points observed. # To manage the plots ## Import the dataset and visualisation The plot is referenced in the longitude latitude coordinate so you must have the package `proj4` if you are in this situation. If you have projected coordinate, you can continue with the `projCoord` argument instead of `longlat` argument. ## Correct the GPS coordinate ```{r, cache=FALSE} correct_plot <- correctCoordGPS( longlat = coord[, c("Long", "Lat")], coordRel = coord[, c("xRel", "yRel")], rangeX = c(0, 100), rangeY = c(0, 100), drawPlot = TRUE, maxDist = 10, rmOutliers = TRUE ) str(correct_plot, max.level = 1) ``` The output of the function is a list with a data.frame `corner` it's the corner of the plot, `polygon` the spatial polygon, `outliers` the vector with the line number of the outliers and `codeUTM` the UTM code for the polygon. The outliers are calculated by a measure of distance between the predicted points and the GPS points. If this distance is higher than the value of `maxDist`, the point is considered like outliers. ## Numbering the corner We have to number the corner of the plot, it is working if we have exactly 4 points for each plot, so we have to do the correctCoordGPS before if we have not the correct number of points. ```{r} coord_num <- numberCorner( projCoord = correct_plot$cornerCoords, plot = rep("NB1", 4), origin = c(F, F, F, T), clockWise = TRUE ) plot(coord_num[, c("X", "Y")], asp = 1) text(coord_num[, c("X", "Y")], labels = coord_num[, "corner"], pos = 2, offset = 0.2) ``` On the graph, you can noted than the corner number 1 the origin of the plot. ## Cut the plot in multiple subplot ```{r} subplot <- cutPlot( projCoord = coord_num[, c("X", "Y")], plot = coord_num[, c("plot")], corner = coord_num[, c("corner")], gridsize = 25, dimX = 100, dimY = 100 ) ``` ```{r echo=FALSE} kable(head(subplot)) ``` # Trees managements ## Attribute the trees to the subplot ```{r} trees$subplot <- attributeTree(trees[, c("xRel", "yRel")], rep("NB1", nrow(trees)), subplot) ``` ## Calculate the AGB and spatialisation ```{r} trees$AGB <- computeAGB(trees$D, trees$WD, H = trees$H) AGB <- summaryByPlot(trees$AGB, trees$subplot, drawPlot = TRUE, subplot = subplot) print(AGB) ``` ## Attribute the trees to GPS coordinates There is two maners to attribute the trees to GPS coordinates ```{r} TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = subplot, dim = c(100, 100) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ``` If you want to have in GPS (longitude/latitude) coordinates (need to install proj4 first) : ```{r} #TreeCoord <- as.data.frame( proj4::project(TreeCoord, proj = correct_plot$codeUTM, inverse = TRUE) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ``` If you want to have the GPS (longitude/latitude) coordinates without passing through all this step however you must use the numberCorner function: ```{r} coordAbs = data.frame(X = c(4.066923, 4.067865, 4.067842, 4.066905), Y = c(52.68883, 52.68877, 52.68793, 52.68783)) ncoordAbs = numberCorner(projCoord = coordAbs, plot = rep("NB1", 4), origin = c(TRUE, FALSE, FALSE, FALSE), clockWise = TRUE) TreeCoord <- attributeTreeCoord( xy = trees[, c("xRel", "yRel")], plot = trees$plot, coordAbs = ncoordAbs, dim = c(100, 100) ) ``` ```{r echo=FALSE} kable(head(TreeCoord), digits = 3, row.names = FALSE, caption = "Head of the table TreeCoord") ```
/scratch/gouwar.j/cran-all/cranData/BIOMASS/vignettes/plot.Rmd
amod <- structure(function#Allometric scaling. ### Allometric models and parameters are used to scale organic growth. ##details<<. Allometric models are useful to ##scale size-components of organisms such as ##tree diameters (mp = \code{c(2,1)}) and ##basal areas (mp = \code{c(0.25 * ##pi,2)}). Several parameter groups ##(\code{c(a1,b1,a2,b2, ..., an,bn)}) can be ##recursively processed. This enables ##computation of complex organic ##variables. For example, above-ground tree ##biomass could be computed from two ##parameter groups for tree-biomass, and ##over-bark diameter scaling. ( x, ##<<\code{numeric} vector. mp = c(1,1), ##<<\code{numeric}. Allometric ##parameters. Default ##\code{c(1,1)} (see ##details). fun = y ~ a*(x ^ b) ##<<\code{formula}. ##Allometric ##model. To properly ##specify other ##formulas, the ##variables (e.g. x ##and y) should ##belong to ##\code{letters[20:26]}. ) { xn. <- FALSE if(is.data.frame(x)){ xnu <- cClass(x,'numeric') xn <- c(cClass(x,'integer'), cClass(x,'factor')) xn. <- length(xn)!=0 xn.. <- xn[!xn%in%c('x','csx')] cd <- x x <- x[,'csx'] names(x) <- cd[,'year']} feval <- function(fun,...){ e <- list(...) y <- eval(parse(text=fun), e) return(y)} allv <- all.vars(fun) prm <- allv[!allv%in%letters[20:26]] spt <- ceiling(seq_along(mp)/length(prm)) if(!is.list(mp)){ mp <- split(mp,spt) } dpr <- data.frame(do.call(rbind,mp)) names(dpr) <- prm for(i in 1:length(mp)){ x <- do.call(feval, c(fun,as.list(dpr[i,]))) } x1 <- c(NA,diff(x)) names(x1) <- names(x) xd <- data.frame(x = x1, csx = x) if(xn.&& length(xnu) > 1){ xd <- cd[,xnu] xd[,'x'] <- x1 xd[,'csx'] <- x } if(xn.) xd <- cbind(xd,cd[,xn..]) return(xd) ### \code{data.frame} of the scaled variable (x) and relative ### increments (csx). These are computed with \code{\link{setdiff}} ### function. } , ex=function() { ## Simulating TRW records: set.seed(1) trw <- ts(abs(rnorm(12,1,1)),start = 1950) ## Cumulative TRW: cri <- cumsum(trw) ## tree diameters td <- amod(cri,mp = c(2,1)) ## plot of the tree diameters and the ## relative increments: plot(ts(td)) })
/scratch/gouwar.j/cran-all/cranData/BIOdry/R/amod.R