content
stringlengths
0
14.9M
filename
stringlengths
44
136
`ordicoeno` <- function(x, ordiplot, axis=1, legend=FALSE, cex=0.8, ncol=4, ...) { # if (!require(mgcv)) {stop("Requires package mgcv")} ordiscore <- scores(ordiplot,display="sites")[,axis] original <- cbind(x,ordiscore) sorted <- original seq <- order(ordiscore) sorted[1:nrow(original),] <- original[seq,] edfs <- array(NA,dim=c(ncol(x))) names(edfs) <- colnames(x) grDevices::palette(grDevices::rainbow(ncol(x))) # pchtypes <- c(0:ncol(x)) names(pchtypes) <- pchtypes pchtypes <- pchtypes - trunc(pchtypes/26)*26 # gammodel <- mgcv::gam(sorted[,1]~s(ordiscore),data=sorted) edfs[1] <- summary(gammodel)$edf newdata1 <- data.frame(seq(min(sorted$ordiscore), max(sorted$ordiscore), length = 1000)) newdata2 <- data.frame(seq(min(sorted$ordiscore), max(sorted$ordiscore), length = 20)) colnames(newdata1) <- colnames(newdata2) <- "ordiscore" gamresult1 <- predict(gammodel, newdata1) gamresult2 <- predict(gammodel, newdata2) graphics::plot(newdata1$ordiscore, gamresult1, type="l", ylim=c(0,max(x)), col=1, pch=0, xlab="site score on ordination axis", ylab="species values", ...) graphics::points(newdata2$ordiscore, gamresult2, type="p", col=1, pch=pchtypes[1], cex=cex, ...) for (i in 2:ncol(x)) { gammodel <- mgcv::gam(sorted[,i]~s(ordiscore), data=sorted) gamresult1 <- predict(gammodel, newdata1) gamresult2 <- predict(gammodel, newdata2) edfs[i] <- summary(gammodel)$edf graphics::points(newdata1$ordiscore, gamresult1, type="l", pch=0, col=i, cex=cex, ...) graphics::points(newdata2$ordiscore, gamresult2, type="p", pch=pchtypes[i], col=i, cex=cex, ...) } colnames <- names(edfs) edfs <- as.numeric(edfs) names(edfs) <- colnames if (legend == T) { legend("top", legend=colnames, pch=pchtypes[1:ncol(x)], lty=1, col=c(1:ncol(x)), ncol=ncol) } grDevices::palette("default") cat("edfs from GAM models for each species...\n") return(edfs) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordicoeno.R
`ordiequilibriumcircle` <- function(pca,ordiplot,...) { `drawcircle` <- function(x0=0, y0=0, radius=1, npoints=100,...) { a <- seq(0, 2*pi, len=npoints) c <- array(dim=c(2,npoints)) c[1,] <- x0+cos(a)*radius c[2,] <- y0+sin(a)*radius for (i in 1:(npoints-1)) {graphics::segments(c[1, i], c[2, i], c[1, 1+i], c[2, 1+i],...)} graphics::segments(c[1, i], c[2, i], c[1, npoints], c[2, npoints],...) } eigen <- pca$CA$eig p <- length(eigen) n <- nrow(pca$CA$u) tot <- sum(eigen) const <- ((n-1)*tot)^0.25 radius <- (2/p)^0.5 radius <- radius * const result <- list(radius=radius, constant=const) drawcircle(radius=radius,...) speciescoord <- scores(ordiplot, display="species") for (i in 1:nrow(speciescoord)) { length <- (speciescoord[i,1]^2+speciescoord[i,2]^2)^0.5 if (length > radius) {graphics::arrows(0, 0, speciescoord[i,1], speciescoord[i,2],...)} } return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordiequilibriumcircle.R
`ordinearest` <- function (ordiplot,dist,...) { ord <- scores(ordiplot, display="sites",...) dist <- as.matrix(dist) diag(dist) <- Inf nabo <- apply(dist, 1, which.min) graphics::arrows(ord[,1], ord[,2], ord[nabo,1], ord[nabo,2], ...) invisible() }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordinearest.R
`ordisymbol` <- function(ordiplot, y, factor, col=1, colors=TRUE, pchs=TRUE, rainbow_hcl=TRUE, rainbow_hcl.c=90, rainbow_hcl.l=50, rainbow=TRUE, heat.colors=FALSE, terrain.colors=FALSE, topo.colors=FALSE, cm.colors=FALSE, legend=TRUE, legend.x="topleft", legend.ncol=1, ...) { ordiscores <- scores(ordiplot, display="sites") groups <- table(y[,factor]) m <- length(groups) if (m > 25) { warning("Symbol size was kept constant as there were more than 25 categories (> number of symbols that are currently used in R)") colors <- TRUE pchs <- FALSE } levels <- names(groups) if (rainbow == T) { grDevices::palette(rainbow(m)) colors <- TRUE } if (heat.colors == T) { grDevices::palette(heat.colors(m)) colors <- TRUE } if (terrain.colors == T) { grDevices::palette(terrain.colors(m)) colors <- TRUE } if (topo.colors == T) { grDevices::palette(topo.colors(m)) colors <- TRUE } if (cm.colors == T) { grDevices::palette(topo.colors(m)) colors <- TRUE } if (rainbow_hcl == T) { grDevices::palette(colorspace::rainbow_hcl(m, c=rainbow_hcl.c, l=rainbow_hcl.l)) colors <- TRUE } for (i in 1:m) { subs <- y[,factor]==levels[i] for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q]<-F} } scores <- ordiscores[subs,,drop=F] if (colors==T && pchs==T) { graphics::points(scores[,1], scores[,2], pch=i, col=i,...) } if (colors==T && pchs==F) { graphics::points(scores[,1], scores[,2], pch=19, col=i,...) } if (colors == F) { graphics::points(scores[,1], scores[,2], pch=i, col=col,...) } } if (legend==T) { if (colors==T && pchs==T) {legend(x=legend.x, legend=levels, pch=c(1:m), col=c(1:m), ncol=legend.ncol)} if (colors==T && pchs==F) {legend(x=legend.x, legend=levels, pch=rep(19, m), col=c(1:m), ncol=legend.ncol)} if (colors == F) {legend(x=legend.x, legend=levels, pch=c(1:m))} } grDevices::palette("default") }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordisymbol.R
`ordivector` <- function(ordiplot,spec,lty=2,...) { speciescoord <- scores(ordiplot, display="species") speciesselect <- speciescoord[rownames(speciescoord)==spec] sitescoord <- scores(ordiplot, display="sites") b1 <- speciesselect[2]/speciesselect[1] b2 <- -1/b1 calc <- array(dim=c(nrow(sitescoord),3)) calc[,3] <- sitescoord[,2]-b2*sitescoord[,1] calc[,1] <- calc[,3]/(b1-b2) calc[,2] <- b1*calc[,1] for (i in 1:nrow(sitescoord)) { graphics::segments(sitescoord[,1], sitescoord[,2], calc[,1], calc[,2], lty=lty) } graphics::abline(0, b1, lty=lty) graphics::arrows(0, 0, speciesselect[1], speciesselect[2],lty=1,...) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/ordivector.R
`prepare.bioenv` <- function(env, as.numeric = c()) { env2 <- env if (is.null(as.numeric) == F) { for (i in 1:length(as.numeric)) { if(any(names(env) == as.numeric[i])) { env2[, as.numeric[i]] <- as.numeric(env[, as.numeric[i]]) } } } vars <- names(env2) for (i in 1:length(vars)) { focal.var <- which(names(env2)==vars[i]) if (is.numeric(env2[, focal.var]) == F) {env2 <- env2[, -focal.var]} } return(env2) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/prepare.bioenv.R
`radfitresult` <- function(x,y="",factor,level,plotit=T){ if(inherits(y, "data.frame")) { subs <- y[,factor]==level for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q]<-F} } x <- x[subs,,drop=F] freq <- apply(x,2,sum) subs <- freq>0 x <- x[,subs,drop=F] } x <- as.matrix(apply(x,2,sum)) result1 <- radfit(x) result2 <- fisherfit(x) result3 <- prestonfit(x) if(plotit==T) {graphics::plot(result1)} result <- list(radfit=result1, fisherfit=result2, prestonfit=result3) return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/radfitresult.R
`rankabuncomp` <- function(x, y=NULL, factor=NULL, return.data=T, specnames=c(1:3), scale="abundance", scaledx=F, type="o", rainbow=T, legend=T, xlim=c(1, max1), ylim=c(0, max2), ... ) { groups <- table(y[,factor]) levels <- names(groups) m <- length(groups) max1 <- max(diversitycomp(x, y, factor1=factor, index="richness", method="pooled")[,2]) if (scaledx==T) {xlim<-c(0, 100)} # freq <- diversityresult(x, index="Berger", method="pooled") max2 <- max.2 <- 0 for (i in 1:m) { if (scale=="abundance") {max.2 <- rankabundance(x, y, factor, levels[i])[1, "abundance"]} if (scale=="logabun") {max.2 <- rankabundance(x, y, factor, levels[i])[1, "abundance"]} if (scale=="proportion") {max.2 <- rankabundance(x, y, factor, levels[i])[1, "proportion"]} if (max.2 > max2) {max2 <- max.2} } if (scale=="accumfreq") {max2 <- 100} max2 <- as.numeric(max2) if (rainbow==F) { if (scale == "logabun" && all.equal(ylim, c(0, max2)) == T) {ylim <- c(1, max2)} rankabunplot(rankabundance(x, y, factor, levels[1]), scale=scale, scaledx=scaledx, type=type, labels=levels[1], xlim=xlim, ylim=ylim, pch=1, specnames=NULL, ...) for (i in 2:m) { rankabunplot(rankabundance(x, y, factor, levels[i]), addit=T, scale=scale, scaledx=scaledx, type=type, labels=levels[i], pch=i, specnames=NULL,...) } if (legend==T) {legend(graphics::locator(1), legend=levels, pch=c(1:m))} }else{ grDevices::palette(colorspace::rainbow_hcl(m, c=90, l=50)) if (scale == "logabun" && all.equal(ylim, c(0, max2)) == T) {ylim <- c(1, max2)} rankabunplot(rankabundance(x, y, factor, levels[1]), scale=scale, scaledx=scaledx, type=type, labels=levels[1], xlim=xlim, ylim=ylim, col=1, pch=1, specnames=NULL,...) for (i in 2:m) { rankabunplot(rankabundance(x, y, factor, levels[i]), addit=T, scale=scale, scaledx=scaledx, type=type, labels=levels[i], col=i, pch=i, specnames=NULL,...) } if (legend==T) {legend(graphics::locator(1), legend=levels, pch=c(1:m), col=c(1:m))} grDevices::palette("default") } if (return.data == T) { for (i in 1:m) { resulti <- data.frame(rankabundance(x, y, factor, levels[i])) resulti <- data.frame(Grouping=rep(levels[i], nrow(resulti)), species=rownames(resulti), labelit=rep(FALSE, nrow(resulti)), resulti) spec.max <- min(max(specnames), nrow(resulti)) resulti[c(1:spec.max), "labelit"] <- as.logical(1) rownames(resulti) <- NULL if (i == 1) { result <- resulti }else{ result <- rbind(result, resulti) } } return(result) } }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/rankabuncomp.R
`rankabundance` <- function(x,y="",factor="",level,digits=1,t=qt(0.975,df=n-1)) { if(inherits(y, "data.frame") && factor != "") { subs <- y[,factor]==level for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q]<-F} } x <- x[subs,,drop=F] freq <- apply(x,2,sum) subs <- freq>0 x <- x[,subs,drop=F] } if(dim(as.matrix(x))[1]==0) { result <- array(NA,dim=c(1,8)) colnames(result) <- c("rank", "abundance", "proportion", "plower", "pupper", "accumfreq", "logabun", "rankfreq") rownames(result) <- "none" return(result) } total <- apply(x,1,sum) p <- ncol(x) n <- nrow(x) mu <- sum(total)/n result <- array(dim=c(p,8)) colnames(result) <- c("rank", "abundance", "proportion", "plower", "pupper", "accumfreq", "logabun", "rankfreq") rownames(result) <- colnames(x) for (j in 1:p) { spec <- x[,j] pi <- spec/total p <- sum(spec)/sum(total) sigma2 <- 0 for (i in 1:n) { sigma2 <- sigma2 + (total[i]^2 * (pi[i]-p)^2) } sigma2 <- sigma2 / (n * (n-1) * mu * mu) sigma <- sigma2^0.5 result[j,2] <- sum(spec) result[j,3] <- p*100 result[j,4] <- (p - t*sigma)*100 result[j,5] <- (p + t*sigma)*100 } p <- ncol(x) result2 <- result seq <- rev(order(result[,2],-order(rownames(result)))) result[1:p,] <- result2[seq,] rownames(result)[1:p] <- rownames(result2)[seq] result[,1] <- c(1:ncol(x)) result[,6] <- cumsum(result[,3]) result[,7] <- log(result[,2],base=10) result[,8] <- result[,1]/ncol(x)*100 result <- round(result,digits=digits) return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/rankabundance.R
`rankabunplot` <- function(xr, addit=F, labels="", scale="abundance",scaledx=F,type="o", xlim=c(min(xpos),max(xpos)), ylim=c(0,max(x[,scale])), specnames=c(1:5), srt=0, ...) { x <- xr xpos <- 1:nrow(x) if (scaledx==T) {xpos <- xpos/nrow(x)*100} if (scale=="accumfreq") {type <- "o"} if (addit==F) { if (scale == "logabun") { if (all.equal(ylim, c(0, max(x[,scale]))) == T) {ylim <- c(1, max(x[, "abundance"]))} graphics::plot(xpos, x[,"abundance"], xlab="species rank", ylab="abundance", type=type, bty="l", log="y", xlim=xlim, ylim=ylim, ...) }else{ graphics::plot(xpos, x[,scale], xlab="species rank", ylab=scale, type=type, bty="l", ylim=ylim, xlim=xlim,...) } }else{ if (scale=="logabun") { graphics::points(xpos,x[,"abundance"], type=type,...) }else{ graphics::points(xpos,x[,scale], type=type,...) } } if (length(specnames) > 0) { names.space <- paste0(" ", rownames(x)) for (i in specnames) { if (scale=="logabun") { graphics::text(i, x[i, "abundance"], names.space[i], pos=4, srt=srt, offset=0, adj=1) }else{ graphics::text(i, x[i, scale], names.space[i], pos=4, srt=srt, offset=0, adj=1) } } } if (labels!="") { if (scale=="logabun") { graphics::text(1, x[1,"abundance"], labels=labels, pos=2) }else{ graphics::text(1, x[1,scale], labels=labels, pos=2) } } }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/rankabunplot.R
`removeNAcomm` <- function(x,y,variable) { subs <- is.na(y[,variable]) subs <- (subs==F) x <- x[subs,,drop=F] return(x) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/removeNAcomm.R
`removeNAenv` <- function(x,variable) { subs <- is.na(x[,variable]) subs <- (subs==F) x <- x[subs,,drop=F] for (i in 1:ncol(x)) { if (is.factor(x[,i])) {x[,i] <- factor(x[,i][drop=T])} } return(x) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/removeNAenv.R
`removezerospecies` <- function(x) { freq <- apply(x,2,sum) subs <- freq>0 x <- x[,subs,drop=F] return(x) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/removezerospecies.R
`renyiaccumresult` <- function( x, y=NULL, factor, level, scales=c(0, 0.25, 0.5, 1, 2, 4, 8, Inf), permutations=100,...) { if(is.null(y) == F) { if((factor %in% names(y)) == F) {stop("specified factor '", factor, "' is not a variable of the environmental data frame")} if(is.factor(y[, factor]) == F) {stop("specified factor '", factor, "' is not a factor")} levels1 <- as.character(levels(as.factor(as.character(y[, factor])))) if((level %in% levels1) == F) {stop("specified level '", level, "' is not an available factor level")} subs <- y[, factor] == level for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q] <- F} } x <- x[subs,,drop=F] freq <- apply(x,2,sum) subs <- freq>0 x <- x[, subs, drop=F] } result <- renyiaccum(x, scales=scales, permutations=permutations, ...) return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/renyiaccumresult.R
`renyicomp` <- function( x, y, factor, sites=Inf, scales=c(0, 0.25, 0.5, 1, 2, 4, 8, Inf), permutations=100, plotit=FALSE, ...) { factors <- NULL for (i in 1:length(names(y)) ) { if (is.factor(y[ , i]) == T) {factors <- c(factors, names(y)[i])} } if((factor %in% factors) == F) {stop("specified factor '", factor, "' is not an available factor variable")} groups <- table(y[,factor]) if (sites == Inf) {sites <- min(groups)} m <- length(groups) n <- max(groups) s <- length(scales) levels <- names(groups) result <- array(NA,dim=c(m,s,6)) dimnames(result) <- list(level=levels, scale=scales, c("mean", "stdev", "min", "max", "Qnt 0.025", "Qnt 0.975")) names(dimnames(result)) <- c(factor,"scale","") for (i in 1:m) { if (groups[i] == sites) {result[i, , 1] <- as.matrix(renyiresult(x, y, factor, levels[i], scales=scales))} if (groups[i] > sites) {result[i, , ] <- renyiaccumresult(x, y, factor, levels[i], scales=scales, permutations=permutations)[sites,,]} } if (plotit == T) {renyiplot(result[, , 1], ...)} return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/renyicomp.R
`renyiplot` <- function( xr, addit=FALSE, pch=1, xlab="alpha", ylab="H-alpha", ylim=NULL, labelit=TRUE, legend=TRUE, legend.x="topleft", legend.ncol=8, col=1, cex=1, rainbow=TRUE, evenness=FALSE, ...) { x <- xr # x <- as.matrix(x) x <- data.frame(x) names <- names(x) <- names(xr) p <- ncol(x) n <- nrow(x) m <- max(x, na.rm=T) if(is.null(ylim) == T) {ylim <- c(0, m)} pos <- -1000 names <- as.factor(names) # ylab <- "H-alpha" if(evenness == T) { pos <- 1000 if (ylab == "H-alpha") {ylab <- "E-alpha"} x[,] <- x[,]-x[,1] m <- min(x, na.rm=T) if(is.null(ylim) == T) {ylim <- c(m, 0)} } if(addit==F) { graphics::plot(names, rep(pos, p), xlab=xlab, ylab=ylab, ylim=ylim, bty="l", xlim=c(0.5, p+1), ...) } if (n > 25) { warning("Symbol type was kept constant as there were more than 25 profiles (> number of symbols that are currently used in R)") rainbow <- T } if (rainbow==T && n > 1) { grDevices::palette(colorspace::rainbow_hcl(m, c=90, l=50)) for (i in 1:n) { if (n<26) {graphics::points(c(1:p), x[i,], pch=i, col=i, cex=cex, type="o")} if (n>25) {graphics::points(c(1:p), x[i,], pch=19, col=i, cex=cex, type="o")} if (labelit==T) { graphics::text(1, x[i,1], labels=rownames(x)[i], pos=2, col=i, cex=cex) graphics::text(p, x[i,p], labels=rownames(x)[i], pos=4, col=i, cex=cex) } } if (legend==T && n<26) {legend(graphics::locator(1), legend=rownames(x), pch=c(1:n), col=c(1:n), ncol=legend.ncol)} if (legend==T && n>25) {legend(graphics::locator(1), legend=rownames(x), pch=rep(19,n), col=c(1:n), ncol=legend.ncol)} }else{ for (i in 1:n) { if (n<26) {graphics::points(c(1:p), x[i,], pch=i, col=col, cex=cex, type="o")} if (n>25) {graphics::points(c(1:p), x[i,], pch=19, col=col, cex=cex, type="o")} if (labelit==T) { graphics::text(1, x[i,1], labels=rownames(x)[i], pos=2, col=col, cex=cex) graphics::text(p, x[i,p], labels=rownames(x)[i], pos=4, col=col, cex=cex) } } if (legend==T && n<26) {legend(x=legend.x, legend=rownames(x), pch=c(1:n), col=col, ncol=legend.ncol)} if (legend==T && n>25) {legend(x=legend.x, legend=rownames(x), pch=rep(19,n), col=col, ncol=legend.ncol)} } grDevices::palette("default") }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/renyiplot.R
`renyiresult` <- function( x, y=NULL, factor, level, method="all", scales=c(0, 0.25, 0.5, 1, 2, 4, 8, Inf), evenness=FALSE, ...) { if (is.null(y) == F) { if((factor %in% names(y)) == F) {stop("specified factor '", factor, "' is not a variable of the environmental data frame")} if(is.factor(y[, factor]) == F) {stop("specified factor '", factor, "' is not a factor")} levels1 <- as.character(levels(as.factor(as.character(y[, factor])))) if((level %in% levels1) == F) {stop("specified level '", level, "' is not an available factor level")} subs <- y[,factor] == as.character(level) for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q]<-F} } x <- x[subs,, drop=F] freq <- apply(x, 2, sum) subs <- freq > 0 x <- x[, subs, drop=F] } if(method=="all") {x <- t(as.matrix(apply(x,2,sum)))} result <- renyi(x, scales=scales,...) if (attributes(result)$class[2] == "numeric") { result <- data.frame(t(as.matrix(result))) rownames(result) <- "all" colnames(result) <- scales } if (evenness == T) {result[,] <- result[,]-renyi(x,scales=c(0))} return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/renyiresult.R
`replaceNAcomm` <- function(x) { for (j in 1:ncol(x)) { x[is.na(x[,j]), j] <- 0 } return(x) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/replaceNAcomm.R
`same.sites` <- function(x,y) { n <- nrow(y) p <- ncol(x) result <- array(0,dim=c(n,p)) for (i in 1:n) { index <- rownames(y)[i] == rownames(x) if (any(index)==T) { result[i,] <- as.matrix(x[index,,drop=F]) } } result <- data.frame(result) rownames(result) <- rownames(y) colnames(result) <- colnames(x) totals1 <- rowSums(result) index1 <- totals1 == 0 zerosites1 <- rownames(result)[index1] totals2 <- rowSums(x) index2 <- totals2 == 0 zerosites2 <- rownames(x)[index2] if (length(zerosites1)>0 || length(zerosites2) >0) { if (any(zerosites1==zerosites2)==F) { cat("Warning: some sites without species are different in original and resulting data\n") cat("Original sites without species: ", zerosites2, "\n") cat("Resulting sites without species: ", zerosites1, "\n") } } return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/same.sites.R
`sites.long` <- function(x, env.data=NULL){ if (is.null(env.data)) { result <- data.frame(axis1=x$sites[, 1], axis2=x$sites[, 2], labels=rownames(x$sites)) }else{ result <- data.frame(cbind(env.data, axis1=x$sites[, 1], axis2=x$sites[, 2], labels=rownames(x$sites))) } return(result) } `species.long` <- function(x, spec.data=NULL){ if (is.null(x$species)) { cat(paste("No species scores available", "\n")) return(NULL) } if (is.null(spec.data)) { result <- data.frame(axis1=x$species[, 1], axis2=x$species[, 2], labels=rownames(x$species)) }else{ result <- data.frame(cbind(spec.data, axis1=x$species[, 1], axis2=x$species[, 2], labels=rownames(x$species))) } } `centroids.long` <- function(y, grouping, FUN=mean, centroids.only=FALSE){ gr.name <- rlang::enexpr(grouping) cent.means <- stats::aggregate(cbind(axis1, axis2) ~ grouping, data = y, FUN = FUN) names(cent.means) <- c(gr.name, "axis1c", "axis2c") cent.means$Centroid <- cent.means[, 1] if (centroids.only == TRUE) {return(cent.means)} by.name <- names(y)[names(y) == gr.name] result <- dplyr::full_join(y, cent.means, by=by.name) return(result) } `vectorfit.long` <- function(z){ z1 <- data.frame(z$vectors$arrows) names(z1) <- c("axis1", "axis2") z1$r <- z$vectors$r z1$p <- z$vectors$pvals result <- data.frame(vector=rownames(z1), z1) return(result) } `ordisurfgrid.long` <- function(z) { zg <- z$grid lx <- length(zg$x) for (i in 1:lx) { result.i <- cbind(x=rep(zg$x[i], lx), y=zg$y, z=zg$z[i, ]) if (i == 1) { result <- result.i }else{ result <- rbind(result, result.i) } } return(data.frame(result)) } `ordiellipse.long` <- function(z, grouping.name="Grouping") { # copied from vegan 2.5-6 (a function that is not exported) `veganCovEllipse2` <- function(cov, center = c(0,0), scale = 1, npoints = 100) { ## Basically taken from the 'car' package: The Cirlce theta <- (0:npoints) * 2 * pi/npoints Circle <- cbind(cos(theta), sin(theta)) ## scale, center and cov must be calculated separately Q <- chol(cov, pivot = TRUE) ## pivot takes care of cases when points are on a line o <- attr(Q, "pivot") t(center + scale * t(Circle %*% Q[,o])) } grouping <- names(z) for (g in 1:length(grouping)) { g.arg <- z[[grouping[g]]] result.g <- data.frame(veganCovEllipse2(g.arg$cov, center=g.arg$center, scale=g.arg$scale, npoints=100)) result.g <- data.frame(cbind(rep(grouping[g], nrow(result.g)), result.g)) names(result.g) <- c(grouping.name, "axis1", "axis2") result.g[, 1] <- as.factor(result.g[, 1]) if (g == 1) { result <- result.g }else{ result <- rbind(result, result.g) } } return(result) } `pvclust.long` <- function(cl, cl.data) { if(inherits(cl, "pvclust") == FALSE) {stop("function requires a 'pvclust' object")} merge.data <- data.frame(cbind(cl$hclust$merge, cl$hclust$height)) names(merge.data) <- c("m1", "m2", "height") merge.data$ID <- c(1:nrow(merge.data)) nodes.scores <- data.frame(cl.data$scores) nodes.scores$ID <- c(1:nrow(nodes.scores)) nodes.scores2 <- dplyr::left_join(nodes.scores, merge.data, by="ID") edges.pv <- data.frame(cl$edges) edges.pv$ID <- c(1:nrow(edges.pv)) edges.pv$prune <- -edges.pv$ID + nrow(edges.pv) nodes.scores3 <- dplyr::left_join(nodes.scores2, edges.pv, by="ID") segments.scores <- data.frame(cl.data$segments, height=nodes.scores3$height, prune=nodes.scores3$prune, au=nodes.scores3$au, bp=nodes.scores3$bp) return(list(nodes=nodes.scores3, segments=segments.scores)) } `axis.long` <- function(w, choices=c(1, 2), cmdscale.model=FALSE, CAPdiscrim.model=FALSE){ if (cmdscale.model==FALSE && CAPdiscrim.model==FALSE) { eigs <- NULL if ("cca" %in% class(w)) { if (is.null(w$CCA$eig)) { eigs.all <- w$CA$eig }else{ eigs.all <- c(w$CCA$eig, w$CA$eig) } eigs <- round(100 * eigs.all / w$tot.chi, digits=1)[choices] } if ("monoMDS" %in% class(w)) { labels <- paste0("NMS", choices) return(data.frame(axis=c(1:2), ggplot=c("xlab.label", "ylab.label"), label=labels)) } if ("wcmdscale" %in% class(w)) { eigs <- round(100 * w$eig / sum(w$eig), digits=1)[choices] names(eigs) <- paste0("WMDS", choices) } if ("decorana" %in% class(w)) {eigs <- round(100 * w$evals / sum(w$evals), digits=1)[choices]} if (is.null(eigs)) { labels <- paste0("DIM", choices) return(data.frame(axis=c(1:2), ggplot=c("xlab.label", "ylab.label"), label=labels)) } } if (cmdscale.model==TRUE) { eigs <- round(100 * w$eig / sum(w$eig), digits=1)[choices] names(eigs) <- paste0("MDS", choices) } if (CAPdiscrim.model==TRUE) { eigs <- round(100 * w$F / sum(w$F), digits=1)[choices] names(eigs) <- names(data.frame(w$x))[choices] } xlab.label <- paste0(names(eigs)[1], " (", eigs[1], "%)") ylab.label <- paste0(names(eigs)[2], " (", eigs[2], "%)") result <- data.frame(axis=c(1:2), ggplot=c("xlab.label", "ylab.label"), label=c(xlab.label, ylab.label)) return(result) } `accumcomp.long` <- function(x, ci=2, label.freq=1) { grouping <- rownames(data.frame(x[, 1, "Sites"])) for (g in 1:length(grouping)) { g.obs <- sum(is.na(x[grouping[g], , "Sites"]) == FALSE) g.data <- data.frame(Grouping = rep(grouping[g], times=g.obs), Obs = 1:g.obs, Sites = x[grouping[g], c(1:g.obs), "Sites"], Richness = x[grouping[g], c(1:g.obs), "Richness"], SD = x[grouping[g], c(1:g.obs), "sd"]) if (is.na(ci)) {ci <- stats::qt(p = 0.975, df = g.obs)} g.data$LWR <- g.data$Richness - ci*g.data$SD g.data$UPR <- g.data$Richness + ci*g.data$SD g.data$labelit <- rep(FALSE, g.obs) test1 <- (g.data$Obs-1)/label.freq test2 <- round((g.data$Obs-1)/label.freq) g.data[test1 == test2, "labelit"] <- as.logical(1) rownames(g.data) <- NULL if (g == 1) { g.all <- g.data }else{ g.all <- rbind(g.data, g.all) } } return(g.all) } `renyicomp.long` <- function(x, label.freq=1) { grouping <- rownames(data.frame(x[, 1, "mean"])) for (g in 1:length(grouping)) { g.obs <- length(x[grouping[g], , "mean"]) g.data <- data.frame(Grouping = rep(grouping[g], times=g.obs), Obs = 1:g.obs, Scales = names(x[1,,"mean"]), Diversity = x[grouping[g], c(1:g.obs), "mean"], Stdev = x[grouping[g], c(1:g.obs), "stdev"], Min = x[grouping[g], c(1:g.obs), "min"], Min = x[grouping[g], c(1:g.obs), "min"], Min = x[grouping[g], c(1:g.obs), "min"], LWR = x[grouping[g], c(1:g.obs), "Qnt 0.025"], UPR = x[grouping[g], c(1:g.obs), "Qnt 0.975"]) g.data$labelit <- rep(FALSE, g.obs) test1 <- (g.data$Obs-1)/label.freq test2 <- round((g.data$Obs-1)/label.freq) g.data[test1 == test2, "labelit"] <- as.logical(1) rownames(g.data) <- NULL if (g == 1) { g.all <- g.data }else{ g.all <- rbind(g.all, g.data) } } return(g.all) } `renyi.long` <- function(x, env.data=NULL, label.freq=1) { grouping <- rownames(data.frame(x)) for (g in 1:length(grouping)) { g.obs <- length(x[grouping[g], ]) if (is.null(env.data)) { g.data <- data.frame(Grouping = rep(grouping[g], times=g.obs), Obs = 1:g.obs, Scales = as.character(names(x)), Diversity = as.numeric(x[grouping[g], ])) }else{ g.data <- data.frame(Grouping = rep(grouping[g], times=g.obs), Obs = 1:g.obs, Scales = as.character(names(x)), Diversity = as.numeric(x[grouping[g], ]), env.data[g, , drop=FALSE]) } g.data$labelit <- rep(FALSE, g.obs) test1 <- (g.data$Obs-1)/label.freq test2 <- round((g.data$Obs-1)/label.freq) g.data[test1 == test2, "labelit"] <- as.logical(1) rownames(g.data) <- NULL if (g == 1) { g.all <- g.data }else{ g.all <- rbind(g.all, g.data) } } return(g.all) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/sites.long.R
`spatialsample` <- function(x,method="random", n=5, xwidth=0.5, ywidth=0.5, xleft=0, ylower=0, xdist=0, ydist=0, plotit=T, plothull=F){ # if (!require(splancs)) {stop("Requires package splancs")} xpos <- x[,1] ypos <- x[,2] minx <- min(xpos) maxx <- max(xpos) miny <- min(ypos) maxy <- max(ypos) xwidth <- xwidth/2 ywidth <- ywidth/2 if (method=="random") { result <- array(dim=c(n,2)) for (i in 1:n) { result[i,1] <- minx-1 result[i,2] <- miny-1 while((splancs::inout(cbind(result[i,1]-xwidth, result[i,2]-ywidth), x, bound=T)==F) || (splancs::inout(cbind(result[i,1]-xwidth, result[i,2]+ywidth), x, bound=T)==F) || (splancs::inout(cbind(result[i,1]+xwidth, result[i,2]-ywidth), x, bound=T)==F) || (splancs::inout(cbind(result[i,1]+xwidth, result[i,2]+ywidth), x, bound=T)==F)) { result[i,1] <- minx + (maxx-minx)*runif(1) result[i,2] <- miny + (maxy-miny)*runif(1) } } } if (method=="grid" || method=="random grid" ) { if (xdist==0) {xdist <- (maxx-minx)/n} if (ydist==0) {ydist <- (maxy-miny)/n} if (xleft < minx) {xleft <- minx + xdist*runif(1)} if (ylower < miny) {ylower <- miny + ydist*runif(1)} a <- round((maxx-minx)/xdist) b <- round((maxy-miny)/ydist) result <- array(dim=c(a*b,2)) for (i in 1:a) { for (j in 1:b) { result[((i-1)*b+j),1] <- xleft + (i-1)*xdist result[((i-1)*b+j),2] <- ylower + (j-1)*ydist } } i <- 1 while (i <= nrow(result)) { if (splancs::inout(cbind(result[i,1]-xwidth, result[i,2]-ywidth), x, bound=T)==F) { result <- result[-i,] }else{ i <- i+1 } } i <- 1 while (i <= nrow(result)) { if (splancs::inout(cbind(result[i,1]-xwidth, result[i,2]+ywidth), x, bound=T)==F) { result <- result[-i,] }else{ i <- i+1 } } i <- 1 while (i <= nrow(result)) { if (splancs::inout(cbind(result[i,1]+xwidth, result[i,2]-ywidth), x, bound=T)==F) { result <- result[-i,] }else{ i <- i+1 } } i <- 1 while (i <= nrow(result)) { if (splancs::inout(cbind(result[i,1]+xwidth, result[i,2]+ywidth), x, bound=T)==F) { result <- result[-i,] }else{ i <- i+1 } } if (n < nrow(result) && method=="random grid") {result <- result[(sample(nrow(result), n)),]} } if (plotit==T) { graphics::rect(result[,1]-xwidth, result[,2]-ywidth, result[,1]+xwidth, result[,2]+ywidth) if (plothull==T) { points2 <- grDevices::chull(result) points3 <- c(points2, points2[1]) graphics::lines(result[points3,]) } } return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/spatialsample.R
`stackcommunitydataset` <- function(comm, remove.zeroes=FALSE, order.sites=FALSE, order.species=FALSE) { x <- data.frame(comm) site.names <- rownames(x) site.names <- as.character(levels(factor(site.names, levels=unique(site.names)))) if (order.sites == T) {site.names <- as.character(levels(factor(site.names)))} n.sites <- length(site.names) species.names <- names(x) species.names <- as.character(levels(factor(species.names, levels=unique(species.names)))) if (order.species == T) { species.names <- as.character(levels(factor(species.names)))} n.species <- length(species.names) result <- data.frame(array(0, dim=c(n.sites*n.species, 3))) names(result) <- c("sites", "species", "abundance") result[, "sites"] <- rep(site.names, each=n.species) result[, "species"] <- rep(species.names, times=n.sites) for (r in 1:nrow(x)) { for (c in 1:ncol(x)) { i <- result[, "sites"] == site.names[r] & result[, "species"] == species.names[c] if (is.na(x[r, c]) == F) {result[i, "abundance"] <- result[i, "abundance"] + x[r, c]} } } if (remove.zeroes == T) {result <- result[which(result[, "abundance"] > 0), ]} return(result) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/stackcommunitydataset.R
`subsetcomm` <- function(x, y, factor, level, returncomm=TRUE) { subs <- y[,factor]==level for (q in 1:length(subs)) { if(is.na(subs[q])) {subs[q]<-F} } if (returncomm==T) { x <- x[subs, , drop=F] freq <- apply(x,2,sum) subs <- freq>0 x <- x[,subs,drop=F] return(x) }else{ y <- y[subs,,drop=F] for (i in 1:ncol(y)) { if (is.factor(y[,i])) {y[,i] <- factor(y[,i][drop=T])} } return(y) } }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/subsetcomm.R
`treegoer.widen` <- function( treegoer, species=unique(treegoer$species)[1:100], filter.vars=c("bio05", "bio14", "climaticMoistureIndex") ) { treegoer.subset <- treegoer$species %in% species treegoer <- treegoer[treegoer.subset, ] focal.treegoer <- filter.vars[1] focal.ranges <- treegoer[treegoer$var == focal.treegoer, c("species", "n", "MIN", "Q05", "QRT1", "QRT3", "Q95", "MAX")] names(focal.ranges)[2:8] <- paste0(focal.treegoer, "_", names(focal.ranges)[2:8]) ranges.lookup <- focal.ranges for (i in 2:length(filter.vars)) { focal.treegoer <- filter.vars[i] focal.ranges <- treegoer[treegoer$var == focal.treegoer, c("species", "n", "MIN", "Q05", "QRT1", "QRT3", "Q95", "MAX")] names(focal.ranges)[2:8] <- paste0(focal.treegoer, "_", names(focal.ranges)[2:8]) # check - note that data was missing for some explanatory variables especially soil cat(paste(focal.treegoer, "- ranges for all species:", all.equal(focal.ranges$species, ranges.lookup$species), "\n")) ranges.lookup <- dplyr::left_join(ranges.lookup, focal.ranges, by="species") # ranges.lookup <- cbind(ranges.lookup, focal.ranges[, c(2:8)]) # used in the Rpub, can not handle } return(ranges.lookup) } `treegoer.filter` <- function( site.data, treegoer.wide, filter.vars=c("bio05", "bio14", "climaticMoistureIndex"), limit.vars=c("Q05", "Q95")) { filtered.data <- treegoer.wide for (f in 1:length(filter.vars)) { focal.var <- filter.vars[f] LL <- paste0(focal.var, "_", limit.vars[1]) filtered.data <- filtered.data[filtered.data[, LL] <= as.numeric(site.data[, focal.var]), ] UL <- paste0(focal.var, "_", limit.vars[2]) filtered.data <- filtered.data[filtered.data[, UL] >= as.numeric(site.data[, focal.var]), ] } return(filtered.data) # modify the function to return the list of the suitable species # return(nrow(filtered.data)) # return number of species as in TreeGOER manuscript } `treegoer.score` <- function( site.data, site.species=treegoer.wide$species, treegoer.wide, filter.vars=c("bio05", "bio14", "climaticMoistureIndex") ) { site.species <- data.frame(species=site.species) filteri <- data.frame(treegoer.wide) site.species$climate.score <- rep(-1, nrow(site.species)) site.species[site.species$species %in% filteri$species, "climate.score"] <- 0 first.n <- paste0(filter.vars[1], "_n") site.species <- dplyr::left_join(site.species, filteri[ , c("species", first.n)], by="species") names(site.species)[which(names(site.species)==first.n)] <- "n.TreeGOER" filteri <- treegoer.filter(site.data=site.data, treegoer.wide=filteri, filter.vars=filter.vars, limit.vars=c("MIN", "MAX")) site.species[site.species$species %in% filteri$species, "climate.score"] <- 1 filteri <- treegoer.filter(site.data=site.data, treegoer.wide=filteri, filter.vars=filter.vars, limit.vars=c("Q05", "Q95")) site.species[site.species$species %in% filteri$species, "climate.score"] <- 2 filteri <- treegoer.filter(site.data=site.data, treegoer.wide=filteri, filter.vars=filter.vars, limit.vars=c("QRT1", "QRT3")) site.species[site.species$species %in% filteri$species, "climate.score"] <- 3 return(site.species) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/treegoer.R
.onAttach <- function(...) { packageStartupMessage("BiodiversityR ", utils::packageDescription("BiodiversityR", field="Version"), ": Use command BiodiversityRGUI() to launch the Graphical User Interface; \nto see changes use BiodiversityRGUI(changeLog=TRUE, backward.compatibility.messages=TRUE)\n") }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/R/zzz.R
# BiodiversityR # Biodiversity Analysis Functions for R # Developed by Roeland Kindt # # This software accompanies Kindt R and Coe R. 2005. Tree Diversity Analysis. A manual # and software for some common statistical methods for biodiversity and ecological # analysis. World Agroforestry Centre (ICRAF), Nairobi. vi+196 pp. This is also the suggested citation # for this software. # # Many of the user interface functions were based on John Fox's R Commander (Rcmdr) # Other functions are often based on the vegan package (Oksanen, Kindt, Legendre & O'Hara) # # To use most of the BiodiversityR functions, a community and an environmental dataset need to be identified first. # Both datasets have the same number of rows (= number of sample units) # Columns in the community dataset usually represent species # Columns in the environmental represent environmental characteristics of sample units # # Roeland Kindt takes no liability for any direct, special, indirect or consequential damages resulting # from loss of use, data or profits arising in connection with the use or performance of this software. # # The software can be quoted or reproduced without charge, provided the source is # acknowledged. You must adhere to conditions of copyright of R software documented in # rw2011\COPYING. Use the citation() or loaded.citations() function for acknowledgments in publications for # any package that you made use of. # library(Rcmdr, quietly=TRUE) Rcmdr::putRcmdr(".communityDataSet", NULL) Rcmdr::putRcmdr("operatorFont", tkfont.create(family="courier", size=Rcmdr::getRcmdr("log.font.size"))) #changed based on R Commander 1.9-6 (data-Menu.R) selectCommunityDataSet <- function(){ dataSets <- listDataSets() .communityDataSet <- CommunityDataSet() if ((length(dataSets) == 1) && !is.null(.communityDataSet)) { Message(message=gettextRcmdr("There is only one dataset in memory."), type="warning") tkfocus(CommanderWindow()) return() } if (length(dataSets) == 0){ Message(message=gettextRcmdr("There are no data sets from which to choose."), type="error") tkfocus(CommanderWindow()) return() } initializeDialog(title=gettextRcmdr("Select Community Data Set")) dataSetsBox <- variableListBox(top, dataSets, title=gettextRcmdr("Data Sets (pick one)"), initialSelection=if (is.null(.communityDataSet)) NULL else which(.communityDataSet == dataSets) - 1) onOK <- function(){ communityDataSet(getSelection(dataSetsBox)) closeDialog() tkfocus(CommanderWindow()) } OKCancelHelp() tkgrid(getFrame(dataSetsBox), sticky="nw") tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=2, columns=1) } #changed based on R Commander 1.9-6 (utilities.R) communityDataSet <- function(dsname, flushModel=TRUE, flushDialogMemory=TRUE){ .communityDataSet <- CommunityDataSet() if (missing(dsname)) { if (is.null(.communityDataSet)){ Message(message=gettextRcmdr("There is no community data set."), type="error") return(FALSE) } else return(.communityDataSet) } if (!is.data.frame(ds <- get(dsname, envir=.GlobalEnv))){ if (!exists.method("as.data.frame", ds, default=FALSE)){ Message(message=paste(dsname, gettextRcmdr(" is not a data frame and cannot be attached."), sep=""), type="error") tkfocus(CommanderWindow()) return() } command <- paste(dsname, " <- as.data.frame(", dsname, ")", sep="") justDoIt(command) logger(command) Message(message=paste(dsname, gettextRcmdr(" has been coerced to a data frame"), sep=""), type="warning") } varnames <- names(get(dsname, envir=.GlobalEnv)) newnames <- make.names(varnames) badnames <- varnames != newnames if (any(badnames)){ command <- paste("names(", dsname, ") <- make.names(names(", dsname, "))", sep="") doItAndPrint(command) } if (!is.null(.communityDataSet) && Rcmdr::getRcmdr("attach.data.set") && (length(grep(.communityDataSet, search())) !=0)) { detach(pos = match(.communityDataSet, search())) logger(paste("detach(", .communityDataSet, ")", sep="")) } if (flushModel) { Rcmdr::putRcmdr(".activeModel", NULL) RcmdrTclSet("modelName", gettextRcmdr("<No active model>")) if (!is.SciViews()) tkconfigure(Rcmdr::getRcmdr("modelLabel"), foreground="red") else refreshStatus() } if (flushDialogMemory) Rcmdr::putRcmdr("dialog.values", list()) # -PhG tkconfigure(.modelLabel, foreground="red") CommunityDataSet(dsname) Message(sprintf(gettextRcmdr("The dataset %s has %d rows and %d columns."), dsname, nrow(get(dsname, envir=.GlobalEnv)), ncol(get(dsname, envir=.GlobalEnv))), type="note") if (any(badnames)) Message(message=paste(dsname, gettextRcmdr(" contains non-standard variable names:\n"), paste(varnames[badnames], collapse=", "), gettextRcmdr("\nThese have been changed to:\n"), paste(newnames[badnames], collapse=", "), sep=""), type="warning") CVariables(listCVariables()) # Numeric(listNumeric()) # Factors(listFactors()) # TwoLevelFactors(listTwoLevelFactors()) # changed 2014 all to # # RcmdrTclSet("dataSetName", paste(" ", dsname, " ")) # -PhG tkconfigure(.dataSetLabel, foreground="blue") # if (!is.SciViews()) tkconfigure(Rcmdr::getRcmdr("dataSetLabel"), foreground="blue") else refreshStatus() # +PhG # if (Rcmdr::getRcmdr("attach.data.set")){ # attach(get(dsname, envir=.GlobalEnv), name=dsname) # logger(paste("attach(", dsname, ")", sep="")) # } # if (is.SciViews()) refreshStatus() else if (flushModel) tkconfigure(Rcmdr::getRcmdr("modelLabel"), foreground="red") # +PhG (& J.Fox, 25Dec04) activateMenus() dsname } # changed based on R Commander 1.9-6 (utilities.R) checkCommunityDataSet <- function(){ if (communityDataSet() == FALSE) { tkfocus(CommanderWindow()) FALSE } else TRUE } # changed based on R Commander 1.9-6 (utilities.R) CommunityDataSet <- function(name){ if (missing(name)) { temp <- Rcmdr::getRcmdr(".communityDataSet") if (is.null(temp)) return(NULL) else if (!exists(temp) || !is.data.frame(get(temp,envir=.GlobalEnv))) { Message(sprintf(gettextRcmdr("the dataset %s is no longer available"), temp), type="error") Rcmdr::putRcmdr(".communityDataSet", NULL) RcmdrTclSet("dataSetName", gettextRcmdr("<No active dataset>")) Rcmdr::putRcmdr(".activeModel", NULL) RcmdrTclSet("modelName", gettextRcmdr("<No active model>")) if (!is.SciViews()) { tkconfigure(Rcmdr::getRcmdr("dataSetLabel"), foreground="red") tkconfigure(Rcmdr::getRcmdr("modelLabel"), foreground="red") } else refreshStatus() activateMenus() if (Rcmdr::getRcmdr("suppress.menus") && RExcelSupported()) return(NULL) } return(temp) } else Rcmdr::putRcmdr(".communityDataSet", name) } CVariables <- function(cnames){ if (missing(cnames)) Rcmdr::getRcmdr("cvariables") else Rcmdr::putRcmdr("cvariables", cnames) } listCVariables <- function(dataSet=CommunityDataSet()) { cvars <- eval(parse(text=paste("names(", dataSet,")")), envir=.GlobalEnv) if (Rcmdr::getRcmdr("sort.names")) sort(cvars) else cvars } communityDataSetP <- function() !is.null(CommunityDataSet()) data(dune) data(dune.env) dune2 <- dune dune.env2 <- dune.env seq <- c(2,13,4,16,6,1,8,5,17,15,10,11,9,18,3,20,14,19,12,7) dune2[seq,] <- dune[1:20,] dune.env2[seq,] <- dune.env[1:20,] rownames(dune2)[seq] <- rownames(dune)[1:20] rownames(dune.env2)[seq] <- rownames(dune)[1:20] dune3 <- dune2 seq <- order(colnames(dune2)) dune3[,1:30] <- dune2[,seq] colnames(dune3)[1:30] <- colnames(dune2)[seq] dune <- dune3 dune.env <- dune.env2 rownames(dune) <- rownames(dune.env) <- c("X01", "X02", "X03", "X04", "X05", "X06", "X07", "X08", "X09", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20") remove(dune2, dune3, dune.env2) makecommunityGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Make community matrix") .activeDataSet <- ActiveDataSet() .fvariables <- Factors() fvariables <- paste(.fvariables, ifelse(is.element(.fvariables, Factors()), "[factor]", "")) .nvariables <- Numeric() nvariables <- paste(.nvariables) modelName <- tclVar("Community.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) siteFrame <- tkframe(top, relief="groove", borderwidth=2) siteBox <- tklistbox(siteFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") siteScroll <- tkscrollbar(siteFrame, repeatinterval=5, command=function(...) tkyview(siteBox, ...)) tkconfigure(siteBox, yscrollcommand=function(...) tkset(siteScroll, ...)) for (x in fvariables) tkinsert(siteBox, "end", x) specFrame <- tkframe(top, relief="groove", borderwidth=2) specBox <- tklistbox(specFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") specScroll <- tkscrollbar(specFrame, repeatinterval=5, command=function(...) tkyview(specBox, ...)) tkconfigure(specBox, yscrollcommand=function(...) tkset(specScroll, ...)) for (x in fvariables) tkinsert(specBox, "end", x) valueFrame <- tkframe(top, relief="groove", borderwidth=2) valueBox <- tklistbox(valueFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") valueScroll <- tkscrollbar(valueFrame, repeatinterval=5, command=function(...) tkyview(valueBox, ...)) tkconfigure(valueBox, yscrollcommand=function(...) tkset(valueScroll, ...)) for (x in nvariables) tkinsert(valueBox, "end", x) subsetFrame <- tkframe(top, relief="groove", borderwidth=2) subset1Frame <- tkframe(subsetFrame) subset2Frame <- tkframe(subsetFrame) subsetBox <- tklistbox(subset1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(subset1Frame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("all",fvariables) for (x in variables) tkinsert(subsetBox, "end", x) subset <- tclVar("") subsetEntry <- tkentry(subset2Frame, width=10, textvariable=subset) onOK <- function(){ modelValue <- tclvalue(modelName) site <- .fvariables[as.numeric(tkcurselection(siteBox))+1] spec <- .fvariables[as.numeric(tkcurselection(specBox))+1] value <- .nvariables[as.numeric(tkcurselection(valueBox))+1] var <- variables[as.numeric(tkcurselection(subsetBox))+1] sub <- tclvalue(subset) if (var == "all") { command <- paste("makecommunitydataset(", .activeDataSet, ", row='", site, "', column='", spec, "', value='", value, "')", sep="") }else{ var <- .fvariables[as.numeric(tkcurselection(subsetBox))] command <- paste("makecommunitydataset(", .activeDataSet, ", row='", site, "', column='", spec, "', value='", value, "', factor='", var, "', level='", sub, "')", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(modelValue) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('makecommunitydataset', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(siteFrame, text="Site variable (rows)"), sticky="w") tkgrid(siteBox, siteScroll, sticky="w") tkgrid(siteFrame, sticky="w") tkgrid(tklabel(specFrame, text="Species variable (columns)"), sticky="w") tkgrid(specBox, specScroll, sticky="w") tkgrid(specFrame, sticky="w") tkgrid(tklabel(valueFrame, text="Abundance variable"), sticky="w") tkgrid(valueBox, valueScroll, sticky="w") tkgrid(valueFrame, sticky="w") tkgrid(tklabel(subsetFrame, text="Subset options"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(tklabel(subset2Frame, text="subset: ", width=15), subsetEntry, sticky="w") tkgrid(subset1Frame, sticky="w") tkgrid(subset2Frame, sticky="w") tkgrid(subsetFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") tkgrid.configure(siteScroll, sticky="ns") tkgrid.configure(specScroll, sticky="ns") tkgrid.configure(valueScroll, sticky="ns") tkselection.set(subsetBox, 0) tkselection.set(siteBox, 0) tkselection.set(specBox, 0) tkselection.set(valueBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(siteBox) tkwait.window(top) } importfromExcelGUI2 <- function() { initializeDialog(title="Read Community and Environmental data From Excel") optionsFrame <- tkframe(top, relief="groove", borderwidth=2) comdsname <- tclVar("CommunityData") entrycomDsname <- tkentry(optionsFrame, width="20", textvariable=comdsname) envdsname <- tclVar("EnvironmentalData") entryenvDsname <- tkentry(optionsFrame, width="20", textvariable=envdsname) dsites <- tclVar("sites") entrysites <- tkentry(optionsFrame, width="20", textvariable=dsites) stackedFrame <- tkframe(top, relief="groove", borderwidth=2) stackedVariable <- tclVar("0") stackedCheckBox <- tkcheckbutton(stackedFrame, variable=stackedVariable) scolumn <- tclVar("species") entrycol <- tkentry(stackedFrame, width="20", textvariable=scolumn) sval <- tclVar("abundance") entryval <- tkentry(stackedFrame, width="20", textvariable=sval) sfactor <- tclVar("all") entryfactor <- tkentry(stackedFrame, width="20", textvariable=sfactor) slevel <- tclVar("all") entrylevel <- tkentry(stackedFrame, width="20", textvariable=slevel) onOK <- function(){ closeDialog() comdsnameValue <- tclvalue(comdsname) envdsnameValue <- tclvalue(envdsname) sitesValue <- tclvalue(dsites) colValue <- tclvalue(scolumn) valValue <- tclvalue(sval) factorValue <- tclvalue(sfactor) levelValue <- tclvalue(slevel) file <- tclvalue(tkgetOpenFile(filetypes='{"Excel Files" {".xls" ".xlsx" ".XLS" ".XLSX"}} {"All Files" {"*"}}')) if (file == "") { if (Rcmdr::getRcmdr("grab.focus")) tkgrab.release(top) tkdestroy(top) return() } justDoIt(paste("library(RODBC)", sep="")) logger(paste("library(RODBC)", sep="")) stacked <- tclvalue(stackedVariable) == "1" if (stacked==F) { command <- paste("import.with.readxl('", file, "', data.type='community', sheet='community', sitenames='", sitesValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ if (factorValue=="all") { command <- paste("import.with.readxl('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ command <- paste("import.with.readxl('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', factor='", factorValue, "', level='", levelValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") } } logger(paste(comdsnameValue, " <- ", command, sep="")) assign(comdsnameValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(comdsnameValue) command <- paste("import.with.readxl('", file, "', data.type='environmental', sheet='environmental', sitenames='", sitesValue, "', write.csv=F, csv.file='environmental.csv')", sep="") logger(paste(envdsnameValue, " <- ", command, sep="")) assign(envdsnameValue, justDoIt(command), envir=.GlobalEnv) activeDataSet(envdsnameValue) tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('import.from.Excel', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(optionsFrame, text="Names for new datasets"), sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for community data set:"), entrycomDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for environmental data set:"), entryenvDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for variable with sites:"), entrysites, sticky="w") tkgrid(optionsFrame, sticky="w") tkgrid(tklabel(stackedFrame, text="Options for stacked data entry"), sticky="w") tkgrid(tklabel(stackedFrame, text="Import community dataset from stacked format:"), stackedCheckBox, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for species:"), entrycol, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for abundance:"), entryval, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter factor for subset:"), entryfactor, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter level for subset:"), entrylevel, sticky="w") tkgrid(stackedFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=4, columns=1) } importfromExcelGUI <- function() { initializeDialog(title="Read Community and Environmental data From Excel") optionsFrame <- tkframe(top, relief="groove", borderwidth=2) comdsname <- tclVar("CommunityData") entrycomDsname <- tkentry(optionsFrame, width="20", textvariable=comdsname) envdsname <- tclVar("EnvironmentalData") entryenvDsname <- tkentry(optionsFrame, width="20", textvariable=envdsname) dsites <- tclVar("sites") entrysites <- tkentry(optionsFrame, width="20", textvariable=dsites) stackedFrame <- tkframe(top, relief="groove", borderwidth=2) stackedVariable <- tclVar("0") stackedCheckBox <- tkcheckbutton(stackedFrame, variable=stackedVariable) scolumn <- tclVar("species") entrycol <- tkentry(stackedFrame, width="20", textvariable=scolumn) sval <- tclVar("abundance") entryval <- tkentry(stackedFrame, width="20", textvariable=sval) sfactor <- tclVar("all") entryfactor <- tkentry(stackedFrame, width="20", textvariable=sfactor) slevel <- tclVar("all") entrylevel <- tkentry(stackedFrame, width="20", textvariable=slevel) onOK <- function(){ closeDialog() comdsnameValue <- tclvalue(comdsname) envdsnameValue <- tclvalue(envdsname) sitesValue <- tclvalue(dsites) colValue <- tclvalue(scolumn) valValue <- tclvalue(sval) factorValue <- tclvalue(sfactor) levelValue <- tclvalue(slevel) file <- tclvalue(tkgetOpenFile(filetypes='{"Excel Files" {".xls" ".XLS"}} {"All Files" {"*"}}')) if (file == "") { if (Rcmdr::getRcmdr("grab.focus")) tkgrab.release(top) tkdestroy(top) return() } justDoIt(paste("library(RODBC)", sep="")) logger(paste("library(RODBC)", sep="")) stacked <- tclvalue(stackedVariable) == "1" if (stacked==F) { command <- paste("import.from.Excel('", file, "', data.type='community', sheet='community', sitenames='", sitesValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ if (factorValue=="all") { command <- paste("import.from.Excel('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ command <- paste("import.from.Excel('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', factor='", factorValue, "', level='", levelValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") } } logger(paste(comdsnameValue, " <- ", command, sep="")) assign(comdsnameValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(comdsnameValue) command <- paste("import.from.Excel('", file, "', data.type='environmental', sheet='environmental', sitenames='", sitesValue, "', write.csv=F, csv.file='environmental.csv')", sep="") logger(paste(envdsnameValue, " <- ", command, sep="")) assign(envdsnameValue, justDoIt(command), envir=.GlobalEnv) activeDataSet(envdsnameValue) tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('import.from.Excel', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(optionsFrame, text="Names for new datasets"), sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for community data set:"), entrycomDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for environmental data set:"), entryenvDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for variable with sites:"), entrysites, sticky="w") tkgrid(optionsFrame, sticky="w") tkgrid(tklabel(stackedFrame, text="Options for stacked data entry"), sticky="w") tkgrid(tklabel(stackedFrame, text="Import community dataset from stacked format:"), stackedCheckBox, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for species:"), entrycol, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for abundance:"), entryval, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter factor for subset:"), entryfactor, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter level for subset:"), entrylevel, sticky="w") tkgrid(stackedFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=4, columns=1) } importfromExcel2007GUI <- function() { initializeDialog(title="Read Community and Environmental data From Excel 2007") optionsFrame <- tkframe(top, relief="groove", borderwidth=2) comdsname <- tclVar("CommunityData") entrycomDsname <- tkentry(optionsFrame, width="20", textvariable=comdsname) envdsname <- tclVar("EnvironmentalData") entryenvDsname <- tkentry(optionsFrame, width="20", textvariable=envdsname) dsites <- tclVar("sites") entrysites <- tkentry(optionsFrame, width="20", textvariable=dsites) stackedFrame <- tkframe(top, relief="groove", borderwidth=2) stackedVariable <- tclVar("0") stackedCheckBox <- tkcheckbutton(stackedFrame, variable=stackedVariable) scolumn <- tclVar("species") entrycol <- tkentry(stackedFrame, width="20", textvariable=scolumn) sval <- tclVar("abundance") entryval <- tkentry(stackedFrame, width="20", textvariable=sval) sfactor <- tclVar("all") entryfactor <- tkentry(stackedFrame, width="20", textvariable=sfactor) slevel <- tclVar("all") entrylevel <- tkentry(stackedFrame, width="20", textvariable=slevel) onOK <- function(){ closeDialog() comdsnameValue <- tclvalue(comdsname) envdsnameValue <- tclvalue(envdsname) sitesValue <- tclvalue(dsites) colValue <- tclvalue(scolumn) valValue <- tclvalue(sval) factorValue <- tclvalue(sfactor) levelValue <- tclvalue(slevel) file <- tclvalue(tkgetOpenFile(filetypes='{"Excel Files" {".xlsx" ".XLSX"}} {"All Files" {"*"}}')) if (file == "") { if (Rcmdr::getRcmdr("grab.focus")) tkgrab.release(top) tkdestroy(top) return() } justDoIt(paste("library(RODBC)", sep="")) logger(paste("library(RODBC)", sep="")) stacked <- tclvalue(stackedVariable) == "1" if (stacked==F) { command <- paste("import.from.Excel2007('", file, "', data.type='community', sheet='community', sitenames='", sitesValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ if (factorValue=="all") { command <- paste("import.from.Excel2007('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") }else{ command <- paste("import.from.Excel2007('", file, "', data.type='stacked', sheet='stacked', sitenames='", sitesValue, "', column='", colValue, "', value='", valValue, "', factor='", factorValue, "', level='", levelValue, "', cepnames=F, write.csv=F, csv.file='community.csv')", sep="") } } logger(paste(comdsnameValue, " <- ", command, sep="")) assign(comdsnameValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(comdsnameValue) command <- paste("import.from.Excel2007('", file, "', data.type='environmental', sheet='environmental', sitenames='", sitesValue, "', write.csv=F, csv.file='environmental.csv')", sep="") logger(paste(envdsnameValue, " <- ", command, sep="")) assign(envdsnameValue, justDoIt(command), envir=.GlobalEnv) activeDataSet(envdsnameValue) tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('import.from.Excel', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(optionsFrame, text="Names for new datasets"), sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for community data set:"), entrycomDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for environmental data set:"), entryenvDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for variable with sites:"), entrysites, sticky="w") tkgrid(optionsFrame, sticky="w") tkgrid(tklabel(stackedFrame, text="Options for stacked data entry"), sticky="w") tkgrid(tklabel(stackedFrame, text="Import community dataset from stacked format:"), stackedCheckBox, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for species:"), entrycol, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for abundance:"), entryval, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter factor for subset:"), entryfactor, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter level for subset:"), entrylevel, sticky="w") tkgrid(stackedFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=4, columns=1) } importfromAccessGUI <- function() { initializeDialog(title="Read Community and Environmental data From Access") optionsFrame <- tkframe(top, relief="groove", borderwidth=2) comdsname <- tclVar("CommunityData") entrycomDsname <- tkentry(optionsFrame, width="20", textvariable=comdsname) envdsname <- tclVar("EnvironmentalDataset") entryenvDsname <- tkentry(optionsFrame, width="20", textvariable=envdsname) dsites <- tclVar("sites") entrysites <- tkentry(optionsFrame, width="20", textvariable=dsites) stackedFrame <- tkframe(top, relief="groove", borderwidth=2) stackedVariable <- tclVar("0") stackedCheckBox <- tkcheckbutton(stackedFrame, variable=stackedVariable) scolumn <- tclVar("species") entrycol <- tkentry(stackedFrame, width="20", textvariable=scolumn) sval <- tclVar("abundance") entryval <- tkentry(stackedFrame, width="20", textvariable=sval) sfactor <- tclVar("all") entryfactor <- tkentry(stackedFrame, width="20", textvariable=sfactor) slevel <- tclVar("all") entrylevel <- tkentry(stackedFrame, width="20", textvariable=slevel) onOK <- function(){ closeDialog() comdsnameValue <- tclvalue(comdsname) envdsnameValue <- tclvalue(envdsname) sitesValue <- tclvalue(dsites) colValue <- tclvalue(scolumn) valValue <- tclvalue(sval) factorValue <- tclvalue(sfactor) levelValue <- tclvalue(slevel) file <- tclvalue(tkgetOpenFile(filetypes='{"Access Files" {".mdb" ".MDB"}} {"All Files" {"*"}}')) if (file == "") { if (Rcmdr::getRcmdr("grab.focus")) tkgrab.release(top) tkdestroy(top) return() } justDoIt(paste("library(RODBC)", sep="")) logger(paste("library(RODBC)", sep="")) stacked <- tclvalue(stackedVariable) == "1" if (stacked==F) { command <- paste("import.from.Access('", file, "', data.type='community', table='community',sitenames='", sitesValue, "')", sep="") }else{ if (factorValue=="all") { command <- paste("import.from.Access('", file, "', data.type='stacked', table='stacked', sitenames='", sitesValue, "',column='", colValue, "',value='", valValue, "')", sep="") }else{ command <- paste("import.from.Access('", file, "', data.type='stacked', table='stacked', sitenames='", sitesValue, "',column='", colValue, "',value='", valValue, "',factor='", factorValue, "',level='", levelValue, "')", sep="") } } logger(paste(comdsnameValue, " <- ", command, sep="")) assign(comdsnameValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(comdsnameValue) command <- paste("import.from.Access('", file, "', data.type='environmental', table='environmental',sitenames='", sitesValue, "')", sep="") logger(paste(envdsnameValue, " <- ", command, sep="")) assign(envdsnameValue, justDoIt(command), envir=.GlobalEnv) activeDataSet(envdsnameValue) tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('import.from.Access', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(optionsFrame, text="Names for new datasets"), sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for community data set:"), entrycomDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for environmental data set:"), entryenvDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for variable with sites:"), entrysites, sticky="w") tkgrid(optionsFrame, sticky="w") tkgrid(tklabel(stackedFrame, text="Options for stacked data entry"), sticky="w") tkgrid(tklabel(stackedFrame, text="Import community dataset from stacked format:"), stackedCheckBox, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for species:"), entrycol, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for abundance:"), entryval, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter factor for subset:"), entryfactor, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter level for subset:"), entrylevel, sticky="w") tkgrid(stackedFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=4, columns=1) } importfromAccess2007GUI <- function() { initializeDialog(title="Read Community and Environmental data From Access 2007") optionsFrame <- tkframe(top, relief="groove", borderwidth=2) comdsname <- tclVar("CommunityData") entrycomDsname <- tkentry(optionsFrame, width="20", textvariable=comdsname) envdsname <- tclVar("EnvironmentalData") entryenvDsname <- tkentry(optionsFrame, width="20", textvariable=envdsname) dsites <- tclVar("sites") entrysites <- tkentry(optionsFrame, width="20", textvariable=dsites) stackedFrame <- tkframe(top, relief="groove", borderwidth=2) stackedVariable <- tclVar("0") stackedCheckBox <- tkcheckbutton(stackedFrame, variable=stackedVariable) scolumn <- tclVar("species") entrycol <- tkentry(stackedFrame, width="20", textvariable=scolumn) sval <- tclVar("abundance") entryval <- tkentry(stackedFrame, width="20", textvariable=sval) sfactor <- tclVar("all") entryfactor <- tkentry(stackedFrame, width="20", textvariable=sfactor) slevel <- tclVar("all") entrylevel <- tkentry(stackedFrame, width="20", textvariable=slevel) onOK <- function(){ closeDialog() comdsnameValue <- tclvalue(comdsname) envdsnameValue <- tclvalue(envdsname) sitesValue <- tclvalue(dsites) colValue <- tclvalue(scolumn) valValue <- tclvalue(sval) factorValue <- tclvalue(sfactor) levelValue <- tclvalue(slevel) file <- tclvalue(tkgetOpenFile(filetypes='{"Access Files" {".mdbx" ".MDBX"}} {"All Files" {"*"}}')) if (file == "") { if (Rcmdr::getRcmdr("grab.focus")) tkgrab.release(top) tkdestroy(top) return() } justDoIt(paste("library(RODBC)", sep="")) logger(paste("library(RODBC)", sep="")) stacked <- tclvalue(stackedVariable) == "1" if (stacked==F) { command <- paste("import.from.Access2007('", file, "', data.type='community', table='community',sitenames='", sitesValue, "')", sep="") }else{ if (factorValue=="all") { command <- paste("import.from.Access2007('", file, "', data.type='stacked', table='stacked', sitenames='", sitesValue, "',column='", colValue, "',value='", valValue, "')", sep="") }else{ command <- paste("import.from.Access2007('", file, "', data.type='stacked', table='stacked', sitenames='", sitesValue, "',column='", colValue, "',value='", valValue, "',factor='", factorValue, "',level='", levelValue, "')", sep="") } } logger(paste(comdsnameValue, " <- ", command, sep="")) assign(comdsnameValue, justDoIt(command), envir=.GlobalEnv) communityDataSet(comdsnameValue) command <- paste("import.from.Access2007('", file, "', data.type='environmental', table='environmental', sitenames='", sitesValue, "')", sep="") logger(paste(envdsnameValue, " <- ", command, sep="")) assign(envdsnameValue, justDoIt(command), envir=.GlobalEnv) activeDataSet(envdsnameValue) tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('import.from.Access', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(optionsFrame, text="Names for new datasets"), sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for community data set:"), entrycomDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for environmental data set:"), entryenvDsname, sticky="w") tkgrid(tklabel(optionsFrame, text="Enter name for variable with sites:"), entrysites, sticky="w") tkgrid(optionsFrame, sticky="w") tkgrid(tklabel(stackedFrame, text="Options for stacked data entry"), sticky="w") tkgrid(tklabel(stackedFrame, text="Import community dataset from stacked format:"), stackedCheckBox, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for species:"), entrycol, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter variable for abundance:"), entryval, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter factor for subset:"), entryfactor, sticky="w") tkgrid(tklabel(stackedFrame, text="Enter level for subset:"), entrylevel, sticky="w") tkgrid(stackedFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") dialogSuffix(rows=4, columns=1) } samesitesGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Same rows for community and environmental") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() saveFrame <- tkframe(top, relief="groove", borderwidth=2) saveVariable <- tclVar("1") saveCheckBox <- tkcheckbutton(saveFrame, variable=saveVariable) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) sav <- tclvalue(saveVariable) == "1" if (sav==T) { DataSet <- eval(parse(text=paste(.communityDataSet, sep="")), envir=.GlobalEnv) newname <- paste(.communityDataSet, ".orig", sep="") logger(paste(newname, " <- ", .communityDataSet, sep="")) assign(newname,DataSet, envir=.GlobalEnv) } logger(paste(.communityDataSet, " <- ", "same.sites(", .communityDataSet, ", " , .activeDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("same.sites(", .communityDataSet, ", ", .activeDataSet, ")", sep="")), envir=.GlobalEnv) communityDataSet(.communityDataSet) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('same.sites', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(saveCheckBox, tklabel(saveFrame, text="save original community matrix"), sticky="w") tkgrid(saveFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkwait.window(top) } removezeroes <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("removezerospecies(", .communityDataSet, ")", sep="") logger(paste(.communityDataSet, " <- ", command, sep="")) assign(.communityDataSet, justDoIt(command), envir=.GlobalEnv) communityDataSet(.communityDataSet) invisible() } replaceNAs <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("replaceNAcomm(", .communityDataSet, ")", sep="") logger(paste(.communityDataSet, " <- ", command, sep="")) assign(.communityDataSet, justDoIt(command), envir=.GlobalEnv) communityDataSet(.communityDataSet) invisible() } vegemite.table <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("vegemite(", .communityDataSet, ", use=cca(", .communityDataSet, "), scale='Braun.Blanquet')", sep="") doItAndPrint(paste(command)) } tabasco.table <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("tabasco(", .communityDataSet, ", use=cca(", .communityDataSet, "))", sep="") doItAndPrint(paste(command)) } beals.smoothing <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("beals(", .communityDataSet, ", include=F)", sep="") doItAndPrint(paste(command)) } ind.power <- function(){ .communityDataSet <- CommunityDataSet() command <- paste("indpower(", .communityDataSet, ", type=0)", sep="") doItAndPrint(paste(command)) } nested.checks <- function(){ .communityDataSet <- CommunityDataSet() doItAndPrint(paste("nestedchecker(", .communityDataSet, ")", sep="")) doItAndPrint(paste("nestedtemp(", .communityDataSet, ")", sep="")) doItAndPrint(paste("nestednodf(", .communityDataSet, ")", sep="")) doItAndPrint(paste("nestedbetasor(", .communityDataSet, ")", sep="")) doItAndPrint(paste("nestedbetajac(", .communityDataSet, ")", sep="")) } #viewcommunity <- function(){ # command <- justDoIt(paste("invisible(edit(", communityDataSet(), "))", sep="")) #} #editcommunity <- function(){ # .communityDataSet <- CommunityDataSet() # justDoIt(paste("fix(", .communityDataSet, ")", sep="")) # communityDataSet(.communityDataSet) # invisible() #} # view based on R-Commander View button viewcommunity <- function(){ # if (packageAvailable("relimp")) Library("relimp", rmd=FALSE) if (communityDataSet() == FALSE) { tkfocus(CommanderWindow()) return() } suppress <- if(getRcmdr("suppress.X11.warnings")) ", suppress.X11.warnings=FALSE" else "" view.height <- max(getRcmdr("output.height") + getRcmdr("log.height"), 10) dim <- dim(get(CommunityDataSet())) nrows <- dim[1] ncols <- dim[2] threshold <- getRcmdr("showData.threshold") command <- if (nrows <= threshold[1] && ncols <= threshold[2]){ paste("showData(", CommunityDataSet(), ", placement='-20+200', font=getRcmdr('logFont'), maxwidth=", getRcmdr("log.width"), ", maxheight=", view.height, suppress, ")", sep="") } else paste("View(", CommunityDataSet(), ")", sep="") window <- justDoIt(command) if (!is.null(window)){ open.showData.windows <- getRcmdr("open.showData.windows") open.window <- open.showData.windows[[CommunityDataSet()]] if (!is.null(open.window)) tkdestroy(open.window) open.showData.windows[[CommunityDataSet()]] <- window putRcmdr("open.showData.windows", open.showData.windows) } } # edit based on R-Commander Edit button editcommunity <- function(){ if (communityDataSet() == FALSE) { tkfocus(CommanderWindow()) return() } dsnameValue <- CommunityDataSet() size <- eval(parse(text=paste("prod(dim(", dsnameValue, "))", sep=""))) # prod(dim(save.dataset)) if (size < 1 || size > getRcmdr("editDataset.threshold")){ save.dataset <- get(dsnameValue, envir=.GlobalEnv) command <- paste("fix(", dsnameValue, ")", sep="") result <- justDoIt(command) if (class(result)[1] != "try-error"){ if (nrow(get(dsnameValue)) == 0){ errorCondition(window=NULL, message=gettextRcmdr("empty data set.")) justDoIt(paste(dsnameValue, "<- save.dataset")) return() } else{ logger(command, rmd=FALSE) communityDataSet(dsnameValue) } } else{ errorCondition(window=NULL, message=gettextRcmdr("data set edit error.")) return() } } else { command <- paste("editDataset(", dsnameValue, ")", sep="") result <- justDoIt(command) if (class(result)[1] != "try-error"){ logger(command, rmd=FALSE) } else{ errorCondition(window=NULL, message=gettextRcmdr("data set edit error.")) return() } } tkwm.deiconify(CommanderWindow()) tkfocus(CommanderWindow()) } #viewenvironmental <- function(){ # justDoIt(paste("invisible(edit(", ActiveDataSet(), "))", sep="")) #} #editenvironmental <- function(){ # .activeDataSet <- ActiveDataSet() # justDoIt(paste("fix(", .activeDataSet, ")", sep="")) # activeDataSet(.activeDataSet) # invisible() #} # environmental view same as for R-Commander View button viewenvironmental <- function(){ # if (packageAvailable("relimp")) Library("relimp", rmd=FALSE) if (activeDataSet() == FALSE) { tkfocus(CommanderWindow()) return() } suppress <- if(getRcmdr("suppress.X11.warnings")) ", suppress.X11.warnings=FALSE" else "" view.height <- max(getRcmdr("output.height") + getRcmdr("log.height"), 10) dim <- dim(get(ActiveDataSet())) nrows <- dim[1] ncols <- dim[2] threshold <- getRcmdr("showData.threshold") command <- if (nrows <= threshold[1] && ncols <= threshold[2]){ paste("showData(", ActiveDataSet(), ", placement='-20+200', font=getRcmdr('logFont'), maxwidth=", getRcmdr("log.width"), ", maxheight=", view.height, suppress, ")", sep="") } else paste("View(", ActiveDataSet(), ")", sep="") window <- justDoIt(command) if (!is.null(window)){ open.showData.windows <- getRcmdr("open.showData.windows") open.window <- open.showData.windows[[ActiveDataSet()]] if (!is.null(open.window)) tkdestroy(open.window) open.showData.windows[[ActiveDataSet()]] <- window putRcmdr("open.showData.windows", open.showData.windows) } } # environmental edit same for as for R-Commander Edit button editenvironmental <- function(){ if (activeDataSet() == FALSE) { tkfocus(CommanderWindow()) return() } dsnameValue <- ActiveDataSet() size <- eval(parse(text=paste("prod(dim(", dsnameValue, "))", sep=""))) # prod(dim(save.dataset)) if (size < 1 || size > getRcmdr("editDataset.threshold")){ save.dataset <- get(dsnameValue, envir=.GlobalEnv) command <- paste("fix(", dsnameValue, ")", sep="") result <- justDoIt(command) if (class(result)[1] != "try-error"){ if (nrow(get(dsnameValue)) == 0){ errorCondition(window=NULL, message=gettextRcmdr("empty data set.")) justDoIt(paste(dsnameValue, "<- save.dataset")) return() } else{ logger(command, rmd=FALSE) activeDataSet(dsnameValue) } } else{ errorCondition(window=NULL, message=gettextRcmdr("data set edit error.")) return() } } else { command <- paste("editDataset(", dsnameValue, ")", sep="") result <- justDoIt(command) if (class(result)[1] != "try-error"){ logger(command, rmd=FALSE) } else{ errorCondition(window=NULL, message=gettextRcmdr("data set edit error.")) return() } } tkwm.deiconify(CommanderWindow()) tkfocus(CommanderWindow()) } checkdatasets <- function(){ .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) } removeNAGUI <- function(){ top <- tktoplevel() tkwm.title(top, "remove NA cases") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=27, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) var <- .variables[as.numeric(tkcurselection(subsetBox))+1] command <- paste("removeNAcomm(", .communityDataSet, ", ", .activeDataSet, ",'", var, "')", sep="") logger(paste(.communityDataSet, " <- ", command, sep="")) assign(.communityDataSet, justDoIt(command), envir=.GlobalEnv) command <- paste("removezerospecies(", .communityDataSet, ")", sep="") logger(paste(.communityDataSet, " <- ", command, sep="")) assign(.communityDataSet, justDoIt(command), envir=.GlobalEnv) command <- paste("removeNAenv(", .activeDataSet, ",'", var, "')", sep="") logger(paste(.activeDataSet, " <- ", command, sep="")) assign(.activeDataSet, justDoIt(command), envir=.GlobalEnv) activeDataSet(.activeDataSet) communityDataSet(.communityDataSet) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select variable"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } disttransGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Community matrix transformation") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() methodFrame <- tkframe(top, relief="groove", borderwidth=2) methodBox <- tklistbox(methodFrame, width=50, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(methodFrame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("hellinger", "chord", "profiles", "chi.square", "log", "square", "pa", "Braun.Blanquet", "Domin", "Hult", "Hill", "fix", "coverscale.log", "dispweight") for (x in methods) tkinsert(methodBox, "end", x) saveFrame <- tkframe(top, relief="groove", borderwidth=2) saveVariable <- tclVar("1") saveCheckBox <- tkcheckbutton(saveFrame, variable=saveVariable) onOK <- function(){ method <- methods[as.numeric(tkcurselection(methodBox))+1] sav <- tclvalue(saveVariable) == "1" if (sav==T) { DataSet <- eval(parse(text=paste(.communityDataSet, sep="")), envir=.GlobalEnv) newname <- paste(.communityDataSet, ".orig", sep="") logger(paste(newname, " <- ", .communityDataSet, sep="")) assign(newname,DataSet, envir=.GlobalEnv) } logger(paste(.communityDataSet, " <- ", "disttransform(", .communityDataSet, ", method='", method, "')", sep="")) assign(.communityDataSet, justDoIt(paste("disttransform(", .communityDataSet, ", method='", method, "')", sep="")), envir=.GlobalEnv) communityDataSet(.communityDataSet) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(methodFrame, text="Method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(saveCheckBox, tklabel(saveFrame, text="save original community matrix"), sticky="w") tkgrid(saveFrame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(methodScroll, sticky="ns") tkselection.set(methodBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } # BiodiversityR 2.3 reload .activeDataSet envirosummaryGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Summary of environmental variables") .activeDataSet <- ActiveDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=27, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("all",variables) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ var <- variables[as.numeric(tkcurselection(subsetBox))+1] if (var == "all") { doItAndPrint(paste("summary(", .activeDataSet, ")", sep="")) doItAndPrint(paste("str(", .activeDataSet, ")", sep="")) }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] doItAndPrint(paste("summary(", .activeDataSet, "$", var, ")", sep="")) } } onPlot <- function(){ var <- variables[as.numeric(tkcurselection(subsetBox))+1] if (var == "all") { doItAndPrint(paste("pairs(", .activeDataSet, ")", sep="")) }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] varfactor <- eval(parse(text=paste("is.factor(", .activeDataSet, "$", var, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("plot(", .activeDataSet, "$", var,",xlab='", var, "',ylab='n')",sep="")) }else{ doItAndPrint(paste("boxplot(", .activeDataSet, "$", var,",xlab='", var, "')", sep="")) doItAndPrint(paste("points(mean(", .activeDataSet, "$", var,"), pch=19, cex=1.5)",sep="")) } } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select variable"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, plotButton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } boxcoxGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Box-Cox transformation") .activeDataSet <- ActiveDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=27, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ var <- .variables[as.numeric(tkcurselection(subsetBox))+1] doItAndPrint(paste("par(mfrow=c(1,2))", sep="")) doItAndPrint(paste("qqPlot(", .activeDataSet, "$", var, ")", sep="")) doItAndPrint(paste("shapiro.test(", .activeDataSet, "$", var, ")", sep="")) doItAndPrint(paste("ks.test(", .activeDataSet, "$", var, ",pnorm)", sep="")) doItAndPrint(paste("summary(powerTransform(na.omit(", .activeDataSet, ")$", var, "))", sep="")) justDoIt(paste(.activeDataSet, "$", var, ".boxcox <- ", .activeDataSet, "$", var, "^ powerTransform(na.omit(", .activeDataSet, ")$", var, ")$lambda", sep="")) logger(paste(.activeDataSet, "$", var, ".boxcox <- ", .activeDataSet, "$", var, "^ powerTransform(na.omit(", .activeDataSet, ")$", var, ")$lambda", sep="")) activeDataSet(.activeDataSet) doItAndPrint(paste("qqPlot(", .activeDataSet, "$", var, ".boxcox)", sep="")) doItAndPrint(paste("shapiro.test(", .activeDataSet, "$", var, ".boxcox)", sep="")) doItAndPrint(paste("ks.test(", .activeDataSet, "$", var, ".boxcox, pnorm)", sep="")) doItAndPrint(paste("par(mfrow=c(1,1))", sep="")) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select variable"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } accumGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Species accumulation curves") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Factors() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) .svariables <- Numeric() svariables <- paste(.svariables) .cvariables <- CVariables() cvariables <- paste(.cvariables) modelName <- tclVar("Accum.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) choicesFrame <- tkframe(top, relief="groove", borderwidth=2) methodFrame <- tkframe(choicesFrame) method1Frame <- tkframe(methodFrame) method2Frame <- tkframe(methodFrame) methodBox <- tklistbox(method1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(method1Frame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("exact", "exact (unconditioned)", "random", "rarefaction", "coleman", "collector", "arrhenius", "gleason", "gitay", "lomolino", "asymp", "gompertz", "michaelis-menten", "logis", "weibull", "specslope", "poolaccum", "estaccumR", "rarefy", "drarefy", "rareslope", "rarecurve") permVariable <- tclVar("999") permutation <- tkentry(method2Frame, width=10, textvariable=permVariable) for (x in methods) tkinsert(methodBox, "end", x) optionFrame <- tkframe(choicesFrame) ggplotVariable <- tclVar("0") ggplotCheckBox <- tkcheckbutton(optionFrame, variable=ggplotVariable) addVariable <- tclVar("0") addCheckBox <- tkcheckbutton(optionFrame, variable=addVariable) xlist <- tclVar("") xEntry <- tkentry(optionFrame, width=10, textvariable=xlist) ylist <- tclVar("") yEntry <- tkentry(optionFrame, width=10, textvariable=ylist) symbol <- tclVar("1") symbolEntry <- tkentry(optionFrame, width=10, textvariable=symbol) cia <- tclVar("2") ciEntry <- tkentry(optionFrame, width=10, textvariable=cia) cexa <- tclVar("1") cexEntry <- tkentry(optionFrame, width=10, textvariable=cexa) colour <- tclVar("blue") colourEntry <- tkentry(optionFrame, width=10, textvariable=colour) option2Frame <- tkframe(choicesFrame) scaleBox <- tklistbox(option2Frame, width=27, height=6, selectmode="single", background="white", exportselection="FALSE") scaleScroll <- tkscrollbar(option2Frame, repeatinterval=5, command=function(...) tkyview(scaleBox, ...)) tkconfigure(scaleBox, yscrollcommand=function(...) tkset(scaleScroll, ...)) svariables <- c("sites",svariables) for (x in svariables) tkinsert(scaleBox, "end", x) subsetFrame <- tkframe(choicesFrame) subset1Frame <- tkframe(subsetFrame) subset2Frame <- tkframe(subsetFrame) subsetBox <- tklistbox(subset1Frame, width=27, height=8, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(subset1Frame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("all",variables) for (x in variables) tkinsert(subsetBox, "end", x) subset <- tclVar(".") subsetEntry <- tkentry(subset2Frame, width=10, textvariable=subset) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) modelValue <- tclvalue(modelName) method <- methods[as.numeric(tkcurselection(methodBox))+1] var <- variables[as.numeric(tkcurselection(subsetBox))+1] sub <- tclvalue(subset) xlim <- tclvalue(xlist) if (xlim != "") {xlim <- paste(", xlim=c(", xlim, ")", sep="")} ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} perm <- as.numeric(tclvalue(permVariable)) ci <- tclvalue(cia) cex <- tclvalue(cexa) var2 <- svariables[as.numeric(tkcurselection(scaleBox))+1] col <- tclvalue(colour) if (var2 == "sites") { scale <- paste(", scale=''", sep="") xlab <- paste(", xlab='sites'", sep="") }else{ var2 <- .svariables[as.numeric(tkcurselection(scaleBox))] scale <- paste(", scale='", var2, "'", sep="") xlab <- paste(", xlab='", var2, "'", sep="") } if (method %in% c("exact", "exact (unconditioned)", "random", "rarefaction", "coleman", "collector")) { if (var == "all") { if (method == "exact (unconditioned)") { command <- paste("accumresult(", .communityDataSet, ", y=", .activeDataSet, ", method='exact', conditioned =F, gamma = 'boot', permutations=", perm, scale, ")", sep="") }else{ command <- paste("accumresult(", .communityDataSet, ", y=", .activeDataSet, ", method='", method, "', conditioned =T, gamma = 'boot', permutations=", perm, scale, ")", sep="") } }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] if (sub == ".") { if (method == "exact (unconditioned)") { command <- paste("accumcomp(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', method='exact', conditioned =F, gamma = 'boot', permutations=", perm, ", legend=F, rainbow=T, ci=", ci, ", ci.type='bar', cex=", cex, xlab, xlim, ylim, scale, ", cex.lab=0.9, cex.axis=0.7)", sep="") }else{ command <- paste("accumcomp(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', method='", method, "', conditioned =T, gamma = 'boot', permutations=", perm, ", legend=F, rainbow=T, ci=", ci, ", ci.type='bar', cex=", cex, xlab, xlim, ylim, scale, ", cex.lab=0.9, cex.axis=0.7)", sep="") } }else{ if (method == "exact (unconditioned)") { command <- paste("accumresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "', method='exact', conditioned =F, gamma = 'boot' , permutations=", perm , scale, ")", sep="") }else{ command <- paste("accumresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "', method='", method, "', conditioned =T, gamma = 'boot' , permutations=", perm , scale, ")", sep="") } } } } if (method == "arrhenius") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='arrhenius')", sep="") } if (method == "gleason") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='gleason')", sep="") } if (method == "gitay") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='gleason')", sep="") } if (method == "lomolino") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='lomolino')", sep="") } if (method == "asymp") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='asymp')", sep="") } if (method == "gompertz") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='gompertz')", sep="") } if (method == "michaelis-menten") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='michaelis-menten')", sep="") } if (method == "logis") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='logis')", sep="") } if (method == "weibull") { command <- paste("fitspecaccum(specaccum(", .communityDataSet, ", method='exact'), model='weibull')", sep="") } if (method == "specslope") { command <- paste("specslope(specaccum(", .communityDataSet, ", method='exact'), at=5)", sep="") } if (method == "poolaccum") { command <- paste("poolaccum(", .communityDataSet, ", permutations=", perm, ", minsize=2)", sep="") } if (method == "estaccumR") { command <- paste("estaccumR(", .communityDataSet, ", permutations=", perm, ")", sep="") } if (method == "rarefy") { command <- paste("rarefy(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")))", sep="") } if (method == "drarefy") { command <- paste("drarefy(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")))", sep="") } if (method == "rareslope") { command <- paste("rareslope(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")))", sep="") } if (method == "rarecurve") { command <- paste("rarecurve(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), col='", col, "')", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste(modelValue)) if (method %in% c("arrhenius", "gleason", "gitay", "lomolino", "asymp", "gompertz", "michaelis-menten", "logis", "weibull")){ doItAndPrint(paste("coef(", modelValue, ")", sep="")) doItAndPrint(paste("fitted(", modelValue, ")", sep="")) } } onPlot <- function(){ modelValue <- tclvalue(modelName) method <- methods[as.numeric(tkcurselection(methodBox))+1] ggplotit <- tclvalue(ggplotVariable) == "1" if (ggplotit==T) { logger(paste(" ")) logger(paste("Please note that plotting requires a result from function 'accumcomp'.")) logger(paste("Such results are obtained when selecting a factor from the Subset options.")) logger(paste(" ")) justDoIt(paste("library(ggplot2)", sep="")) logger(paste("library(ggplot2)", sep="")) doItAndPrint("BioR.theme <- theme(panel.background = element_blank(), panel.border = element_blank(), panel.grid = element_blank(), axis.line = element_line('gray25'), text = element_text(size = 12), axis.text = element_text(size = 10, colour = 'gray25'), axis.title = element_text(size = 14, colour = 'gray25'), legend.title = element_text(size = 14), legend.text = element_text(size = 14), legend.key = element_blank() )") doItAndPrint(paste("plotgg1 <- ggplot(data=accumcomp.long(", modelValue, "), aes(x = Sites, y = Richness, ymax = UPR, ymin= LWR)) + scale_x_continuous(expand=c(0, 1), sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_line(aes(colour=Grouping), size=2) + geom_point(aes(colour=Grouping, shape=Grouping), size=5) + geom_ribbon(aes(colour=Grouping), alpha=0.2, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(x = 'Sites', y = 'Species', colour = 'Factor', shape= 'Factor')", sep="")) doItAndPrint("plotgg1") }else{ addit <- tclvalue(addVariable) == "1" xlim <- tclvalue(xlist) if (xlim != "") {xlim <- paste(", xlim=c(", xlim, ")", sep="")} ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} pch <- tclvalue(symbol) ci <- tclvalue(cia) cex <- tclvalue(cexa) col <- tclvalue(colour) sub <- tclvalue(subset) if (sub == ".") {sub <- ""} var2 <- svariables[as.numeric(tkcurselection(scaleBox))+1] if (var2 == "sites") { xlab <- paste(", xlab='sites'", sep="") }else{ xlab <- paste(", xlab='", var2, "'", sep="") } if (method %in% c("exact", "exact (unconditioned)", "random", "rarefaction", "coleman", "collector")) { doItAndPrint(paste("accumplot(", modelValue, ", addit=", addit, ", col='", col, "', ci=", ci, ", , ci.col='black', ci.lty=3, ci.length=0.1, cex=", cex, xlab, ", ylab='species richness'", xlim, ylim, ", pch=", pch, ", labels='", sub ,"', cex.lab=0.9, cex.axis=0.7)", sep="")) } if (method %in% c("poolaccum", "estaccumR")) { doItAndPrint(paste("plot(", modelValue, ")", sep="")) } if (method %in% c("arrhenius", "gleason", "gitay", "lomolino", "asymp", "gompertz", "michaelis-menten", "logis", "weibull")){ if (addit == F) { doItAndPrint(paste("plot(", modelValue, "$richness ~ Accum.1$sites, col='black', cex=", cex, xlab, ", ylab='species richness'", xlim, ylim, ", pch=1)", sep="")) doItAndPrint(paste("points(fitted(", modelValue, ") ~ Accum.1$sites, col='", col, "', pch=", pch, ")", sep="")) }else{ doItAndPrint(paste("points(fitted(", modelValue, ") ~ Accum.1$sites, col='", col, "', pch=", pch, ")", sep="")) } } if (method %in% c("rarefy", "drarefy", "rareslope", "rarecurve")) { doItAndPrint(paste("rarecurve(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), col='", col, "')", sep="")) } } # ggplotit activateMenus() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('accumresult', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(method1Frame, text="Accumulation method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(method2Frame, text="permutations", width=10), permutation, sticky="w") tkgrid(method1Frame, sticky="w") tkgrid(method2Frame, sticky="w") tkgrid(tklabel(option2Frame, text="scale of x axis"), sticky="w") tkgrid(scaleBox, scaleScroll, sticky="w") tkgrid(tklabel(subsetFrame, text="Subset options"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(tklabel(subset2Frame, text="subset: ", width=15), subsetEntry, sticky="w") tkgrid(subset1Frame, sticky="w") tkgrid(subset2Frame, sticky="w") tkgrid(tklabel(optionFrame, text="Plot options"), sticky="w") tkgrid(ggplotCheckBox, tklabel(optionFrame, text="ggplot (accumcomp) "), sticky="e") tkgrid(addCheckBox, tklabel(optionFrame, text="add plot "), sticky="e") tkgrid(tklabel(optionFrame, text="x limits: ", width=10), xEntry, sticky="w") tkgrid(tklabel(optionFrame, text="y limits: ", width=10), yEntry, sticky="w") tkgrid(tklabel(optionFrame, text="ci: ", width=10), ciEntry, sticky="w") tkgrid(tklabel(optionFrame, text="symbol: ", width=10), symbolEntry, sticky="w") tkgrid(tklabel(optionFrame, text="cex: ", width=10), cexEntry, sticky="w") tkgrid(tklabel(optionFrame, text="colour: ", width=10), colourEntry, sticky="w") tkgrid(methodFrame, tklabel(choicesFrame, text="", width=1), option2Frame, sticky="w") tkgrid(subsetFrame, tklabel(choicesFrame, text="", width=1), optionFrame, sticky="w") tkgrid(choicesFrame, sticky="w") tkgrid(OKbutton, plotButton, tklabel(buttonsFrame, text=" "), cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(subsetScroll, sticky="ns") tkgrid.configure(scaleScroll, sticky="ns") tkselection.set(methodBox, 0) tkselection.set(subsetBox, 0) tkselection.set(scaleBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } diversityGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Diversity calculation") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Factors() variables <- paste(.variables) .cvariables <- CVariables() cvariables <- paste(.cvariables) modelName <- tclVar("Diversity.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) choicesFrame <- tkframe(top, relief="groove", borderwidth=2) indexFrame <- tkframe(choicesFrame) indexBox <- tklistbox(indexFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") indexScroll <- tkscrollbar(indexFrame, repeatinterval=5, command=function(...) tkyview(indexBox, ...)) tkconfigure(indexBox, yscrollcommand=function(...) tkset(indexScroll, ...)) indices <- c("richness", "abundance", "Shannon", "Simpson", "inverseSimpson", "simpson.unb", "simpson.unb.inverse", "Logalpha", "Berger", "Jevenness", "Eevenness", "jack1", "jack2", "chao", "boot", "richness (contribdiv)", "simpson (contribdiv)", "eventstar") for (x in indices) tkinsert(indexBox, "end", x) methodFrame <- tkframe(choicesFrame) methodBox <- tklistbox(methodFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(methodFrame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("pooled", "each site", "mean", "sd", "jackknife") for (x in methods) tkinsert(methodBox, "end", x) optionFrame <- tkframe(choicesFrame) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(optionFrame, variable=dataVariable) sortVariable <- tclVar("0") sortCheckBox <- tkcheckbutton(optionFrame, variable=sortVariable) labelVariable <- tclVar("0") labelCheckBox <- tkcheckbutton(optionFrame, variable=labelVariable) addVariable <- tclVar("0") addCheckBox <- tkcheckbutton(optionFrame, variable=addVariable) ylist <- tclVar("0,5") yEntry <- tkentry(optionFrame, width=10, textvariable=ylist) symbol <- tclVar("1") symbolEntry <- tkentry(optionFrame, width=10, textvariable=symbol) subsetFrame <- tkframe(choicesFrame) subset1Frame <- tkframe(subsetFrame) subset2Frame <- tkframe(subsetFrame) subsetBox <- tklistbox(subset1Frame, width=27, height=7, selectmode="multiple", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(subset1Frame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("(none)", variables) for (x in variables) tkinsert(subsetBox, "end", x) subset <- tclVar(".") subsetEntry <- tkentry(subset2Frame, width=10, textvariable=subset) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) modelValue <- tclvalue(modelName) index <- indices[as.numeric(tkcurselection(indexBox))+1] method <- methods[as.numeric(tkcurselection(methodBox))+1] data1 <- tclvalue(dataVariable) == "1" sortit <- tclvalue(sortVariable) == "1" if (data1==T) {sortit <- F} var <- variables[as.numeric(tkcurselection(subsetBox))+1] if (length(var) > 2) { logger(paste("more than 2 factors selected, whereas only 1 or 2 allowed")) logger(paste("only first 2 will be used")) var <- var[c(1:2)] } if ("(none)" %in% var) {var <- "(none)"} sub <- tclvalue(subset) if (index %in% c("richness", "abundance", "Shannon", "Simpson", "inverseSimpson", "simpson.unb", "simpson.unb.inverse", "Logalpha", "Berger", "Jevenness", "Eevenness", "jack1", "jack2", "chao", "boot")) { if (var[1] == "(none)") { command <- paste("diversityresult(", .communityDataSet, ", index='", index, "' ,method='", method, "', sortit=", sortit, ", digits=6)", sep="") }else{ if (length(var) == 1) { if (sub == ".") { command <- paste("diversitycomp(", .communityDataSet, ", y=", .activeDataSet, ", factor1='", var, "', index='", index, "' , method='", method, "', sortit=", sortit, ", digits=6)", sep="") }else{ command <- paste("diversityresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "', index='", index, "' , method='", method, "', sortit=", sortit, ", digits=6)", sep="") } } if (length(var) == 2) { command <- paste("diversitycomp(", .communityDataSet, ", y=", .activeDataSet, ", factor1='", var[1], "', factor2='", var[2], "', index='", index, "' , method='", method, "', sortit=", sortit, ", digits=6)", sep="") } } } if (index == "richness (contribdiv)") { command <- paste("contribdiv(", .communityDataSet, ", index='richness')", sep="") } if (index == "simpson (contribdiv)") { command <- paste("contribdiv(", .communityDataSet, ", index='simpson')", sep="") } if (index == "eventstar") { command <- paste("eventstar(", .communityDataSet, ", qmax=5)", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste(modelValue)) if (data1==T && method=="each site" && index %in% c("richness", "abundance", "Shannon", "Simpson", "inverseSimpson", "simpson.unb", "simpson.unb.inverse", "Logalpha", "Berger", "Jevenness", "Eevenness", "jack1", "jack2", "chao", "boot")) { justDoIt(paste(.activeDataSet, "$", index, " <- diversityresult(", .communityDataSet, ", index='", index,"', method='each site')[,1]", sep="")) logger(paste(.activeDataSet, "$", index, " <- diversityresult(", .communityDataSet, ", index='", index,"', method='each site')[,1]", sep="")) activeDataSet(.activeDataSet) } } onPlot <- function() { modelValue <- tclvalue(modelName) index <- indices[as.numeric(tkcurselection(indexBox))+1] method <- methods[as.numeric(tkcurselection(methodBox))+1] if (method == "all (pooled)") {method <- "all"} var <- variables[as.numeric(tkcurselection(subsetBox))+1] labelit <- tclvalue(labelVariable) == "1" addit <- tclvalue(addVariable) == "1" ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} pch <- tclvalue(symbol) sub <- tclvalue(subset) if (index %in% c("richness", "abundance", "Shannon", "Simpson", "inverseSimpson", "simpson.unb", "simpson.unb.inverse", "Logalpha", "Berger", "Jevenness", "Eevenness")) { if (var!="all" && sub=="." && method!="separate per site") { if (addit==F) { justDoIt(paste("plot(rep(-90, nrow(", modelValue, ")) ~ as.factor(rownames(", modelValue, ")), xlab='", method, "', ylab=colnames(", modelValue, "), type='n'", ylim, ")", sep="")) logger(paste("plot(rep(-90, nrow(", modelValue, ")) ~ as.factor(rownames(", modelValue, ")), xlab='", method, "', ylab=colnames(", modelValue, "), type='n'", ylim, ")", sep="")) } doItAndPrint(paste("points(", modelValue, "[,2] ~ c(1:nrow(", modelValue, ")), pch=", pch, ")", sep="")) if (labelit==T) {doItAndPrint(paste("text(c(1:nrow(", modelValue, "))," , modelValue, "[,2], labels=rownames(", modelValue, "), pos=3)", sep="")) } if (labelit==T) {doItAndPrint(paste("text(c(1:nrow(", modelValue, "))," , modelValue, "[,2], labels=", modelValue, "[,1], pos=1)", sep="")) } } } if (index %in% c("richness (contribdiv)", "simpson (contribdiv)")) { doItAndPrint(paste("plot(", modelValue, ")", sep="")) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('diversityresult', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(indexFrame, text="Diversity index"), sticky="w") tkgrid(indexBox, indexScroll,sticky="w") tkgrid(tklabel(methodFrame, text="Calculation method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(subsetFrame, text="Subset options"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(tklabel(subset2Frame, text="subset: ", width=15), subsetEntry, sticky="w") tkgrid(subset1Frame, sticky="w") tkgrid(subset2Frame, sticky="w") tkgrid(tklabel(optionFrame, text="Output options"), sticky="w") tkgrid(dataCheckBox, tklabel(optionFrame, text="save results"), sticky="w") tkgrid(sortCheckBox, tklabel(optionFrame, text="sort results"), sticky="w") tkgrid(labelCheckBox, tklabel(optionFrame, text="label results"), sticky="w") tkgrid(addCheckBox, tklabel(optionFrame, text="add plot"), sticky="w") tkgrid(tklabel(optionFrame, text="y limits: ", width=20), yEntry, sticky="w") tkgrid(tklabel(optionFrame, text="symbol: ", width=20), symbolEntry, sticky="w") tkgrid(indexFrame, tklabel(choicesFrame, text="", width=1), methodFrame, sticky="w") tkgrid(subsetFrame, tklabel(choicesFrame, text="", width=1), optionFrame, sticky="w") tkgrid(choicesFrame, sticky="w") tkgrid(OKbutton, plotButton, tklabel(buttonsFrame, text=" "), cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(indexScroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(methodBox, 0) tkselection.set(indexBox, 0) tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } diversityvars <- function(){ .communityDataSet <- CommunityDataSet() .activeDataSet <- ActiveDataSet() justDoIt(paste(.activeDataSet, " <- diversityvariables(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) logger(paste(.activeDataSet, " <- diversityvariables(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) activeDataSet(.activeDataSet) } rankabunGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Rank abundance curves") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Factors() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) .cvariables <- CVariables() cvariables <- paste(.cvariables) modelName <- tclVar("RankAbun.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) choicesFrame <- tkframe(top, relief="groove", borderwidth=2) optionFrame <- tkframe(choicesFrame) option1Frame <- tkframe(optionFrame) option2Frame <- tkframe(optionFrame) scaleBox <- tklistbox(option1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") scaleScroll <- tkscrollbar(option1Frame, repeatinterval=5, command=function(...) tkyview(scaleBox, ...)) tkconfigure(scaleBox, yscrollcommand=function(...) tkset(scaleScroll, ...)) scales <- c("abundance", "proportion", "logabun", "accumfreq") for (x in scales) tkinsert(scaleBox, "end", x) ggplotVariable <- tclVar("0") ggplotCheckBox <- tkcheckbutton(option2Frame, variable=ggplotVariable) radVariable <- tclVar("0") radCheckBox <- tkcheckbutton(option2Frame, variable=radVariable) addVariable <- tclVar("0") addCheckBox <- tkcheckbutton(option2Frame, variable=addVariable) xlist <- tclVar("") xEntry <- tkentry(option2Frame, width=10, textvariable=xlist) ylist <- tclVar("") yEntry <- tkentry(option2Frame, width=10, textvariable=ylist) subsetFrame <- tkframe(choicesFrame) subset1Frame <- tkframe(subsetFrame) subset2Frame <- tkframe(subsetFrame) subsetBox <- tklistbox(subset1Frame, width=27, height=9, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(subset1Frame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("all",variables) for (x in variables) tkinsert(subsetBox, "end", x) subset <- tclVar(".") subsetEntry <- tkentry(subset2Frame, width=10, textvariable=subset) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) modelValue <- tclvalue(modelName) var <- variables[as.numeric(tkcurselection(subsetBox))+1] sub <- tclvalue(subset) scale <- scales[as.numeric(tkcurselection(scaleBox))+1] xlim <- tclvalue(xlist) if (xlim != "") {xlim <- paste(", xlim=c(", xlim, ")", sep="")} ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} if (var == "all") { command <- paste("rankabundance(", .communityDataSet, ")", sep="") }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] if (sub == ".") { command <- paste("rankabuncomp(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', scale='", scale, "'", xlim, ylim, ", legend=F, rainbow=T)", sep="") }else{ command <- paste("rankabundance(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "')", sep="") } } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste(modelValue)) } onPlot <- function(){ modelValue <- tclvalue(modelName) var <- variables[as.numeric(tkcurselection(subsetBox))+1] ggplotit <- tclvalue(ggplotVariable) == "1" if (ggplotit==T) { logger(paste(" ")) logger(paste("Please note that plotting requires a result from function 'rankabuncomp'.")) logger(paste("Such results are obtained when selecting a factor from the Subset options.")) logger(paste(" ")) justDoIt(paste("library(ggplot2)", sep="")) logger(paste("library(ggplot2)", sep="")) doItAndPrint("BioR.theme <- theme(panel.background = element_blank(), panel.border = element_blank(), panel.grid = element_blank(), axis.line = element_line('gray25'), text = element_text(size = 12), axis.text = element_text(size = 10, colour = 'gray25'), axis.title = element_text(size = 14, colour = 'gray25'), legend.title = element_text(size = 14), legend.text = element_text(size = 14), legend.key = element_blank() )") doItAndPrint(paste("plotgg1 <- ggplot(data=", modelValue, ", aes(x = rank, y = abundance)) + scale_x_continuous(expand=c(0, 1), sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(expand=c(0, 1), sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_line(aes(colour=Grouping), size=1) + geom_point(aes(colour=Grouping, shape=Grouping), size=5, alpha=0.5) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(x = 'rank', y = 'abundance', colour = 'Factor', shape= 'Factor')", sep="")) doItAndPrint("plotgg1") }else{ radfit <- tclvalue(radVariable) == "1" addit <- tclvalue(addVariable) == "1" scale <- scales[as.numeric(tkcurselection(scaleBox))+1] xlim <- tclvalue(xlist) if (xlim != "") {xlim <- paste(", xlim=c(", xlim, ")", sep="")} ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} sub <- tclvalue(subset) if (radfit==T) { if (var == "all") { doItAndPrint(paste("radfitresult(", .communityDataSet, ")", sep="")) }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] doItAndPrint(paste("radfitresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "')", sep="")) } }else{ if (sub == ".") { doItAndPrint(paste("rankabunplot(", modelValue, ",scale='", scale, "', addit=", addit, xlim, ylim, ", specnames=c(1,2,3))", sep="")) }else{ doItAndPrint(paste("rankabunplot(", modelValue, ",scale='", scale, "', addit=", addit, ", labels='", sub, "'", xlim, ylim, ", specnames=c(1,2,3))", sep="")) } } } # ggplotit } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('rankabundance', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(subsetFrame, text="Subset options"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(tklabel(subset2Frame, text="subset: ", width=15), subsetEntry, sticky="w") tkgrid(subset1Frame, sticky="w") tkgrid(subset2Frame, sticky="w") tkgrid(tklabel(option1Frame, text="Plot options"), sticky="w") tkgrid(scaleBox, scaleScroll,sticky="w") tkgrid(ggplotCheckBox, tklabel(option2Frame, text="ggplot (rankabuncomp) "), sticky="e") tkgrid(radCheckBox, tklabel(option2Frame, text="fit RAD"), sticky="e") tkgrid(addCheckBox, tklabel(option2Frame, text="add plot"), sticky="e") tkgrid(tklabel(option2Frame, text="x limits: ", width=10), xEntry, sticky="w") tkgrid(tklabel(option2Frame, text="y limits: ", width=10), yEntry, sticky="w") tkgrid(option1Frame, sticky="w") tkgrid(option2Frame, sticky="w") tkgrid(subsetFrame, tklabel(choicesFrame, text="", width=1), optionFrame, sticky="w") tkgrid(choicesFrame, sticky="w") tkgrid(OKbutton, plotButton, tklabel(buttonsFrame, text=" "), cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(scaleScroll, sticky="ns") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(scaleBox, 0) tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkwait.window(top) } renyiGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Renyi diversity profile") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Factors() variables <- paste(.variables) .cvariables <- CVariables() cvariables <- paste(.cvariables) modelName <- tclVar("Renyi.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) choicesFrame <- tkframe(top, relief="groove", borderwidth=2) methodFrame <- tkframe(choicesFrame) methodBox <- tklistbox(methodFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(methodFrame, repeatinterval=3, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("renyiall", "renyi separate per site", "renyi accumulation", "tsallis", "tsallis accumulation") for (x in methods) tkinsert(methodBox, "end", x) scalelist <- tclVar("0, 0.25, 0.5, 1, 2, 4, 8, Inf") scaleFrame <- tkframe(choicesFrame) scaleEntry <- tkentry(scaleFrame, width=40, textvariable=scalelist) permVariable <- tclVar("999") permutation <- tkentry(scaleFrame, width=10, textvariable=permVariable) optionFrame <- tkframe(choicesFrame) evenVariable <- tclVar("0") evenCheckBox <- tkcheckbutton(optionFrame, variable=evenVariable) ggplotVariable <- tclVar("0") ggplotCheckBox <- tkcheckbutton(optionFrame, variable=ggplotVariable) addVariable <- tclVar("0") addCheckBox <- tkcheckbutton(optionFrame, variable=addVariable) ylist <- tclVar("") yEntry <- tkentry(optionFrame, width=40, textvariable=ylist) symbol <- tclVar("1") symbolEntry <- tkentry(optionFrame, width=40, textvariable=symbol) colour <- tclVar("1") colourEntry <- tkentry(optionFrame, width=40, textvariable=colour) cexa <- tclVar("1") cexEntry <- tkentry(optionFrame, width=40, textvariable=cexa) subsetFrame <- tkframe(choicesFrame) subset1Frame <- tkframe(subsetFrame) subset2Frame <- tkframe(subsetFrame) subsetBox <- tklistbox(subset1Frame, width=27, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(subset1Frame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) variables <- c("(none)", variables) for (x in variables) tkinsert(subsetBox, "end", x) subset <- tclVar(".") subsetEntry <- tkentry(subset2Frame, width=10, textvariable=subset) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) modelValue <- tclvalue(modelName) method <- methods[as.numeric(tkcurselection(methodBox))+1] method1 <- method if (method1 == "renyiall") {method1 <- "all"} if (method1 == "renyi separate per site") {method1 <- "s"} scales <- tclvalue(scalelist) evenness <- tclvalue(evenVariable) == "1" ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} var <- variables[as.numeric(tkcurselection(subsetBox))+1] sub <- tclvalue(subset) perm <- as.numeric(tclvalue(permVariable)) if (method %in% c("renyiall", "renyi separate per site", "renyi accumulation")) { if (var == "(none)") { if (method=="renyi accumulation") { command <- paste("renyiaccum(", .communityDataSet, ", scales=c(", scales, "), permutations=", perm, ")", sep="") }else{ command <- paste("renyiresult(", .communityDataSet, ", scales=c(", scales, "), method='", method1, "')", sep="") } }else{ var <- .variables[as.numeric(tkcurselection(subsetBox))] if (sub == ".") { command <- paste("renyicomp(", .communityDataSet, ", evenness=", evenness, ", y=", .activeDataSet, ", factor='", var, "', scales=c(", scales, "), permutations=", perm, ylim, ", legend=F)", sep="") }else{ if (method=="renyi accumulation") { command <- paste("renyiaccumresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "', scales=c(", scales, "), permutations=", perm, ")", sep="") }else{ command <- paste("renyiresult(", .communityDataSet, ", y=", .activeDataSet, ", factor='", var, "', level='", sub, "', scales=c(", scales, "), method='", method1, "')", sep="") } } } } if (method=="tsallis") { command <- paste("tsallis(", .communityDataSet, ", scales=c(0, 0.25, 0.5, 1, 2, 4))", sep="") } if (method=="tsallis accumulation") { command <- paste("tsallisaccum(", .communityDataSet, ", scales=c(0, 0.25, 0.5, 1, 2, 4), permutations=", perm, ")", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste(modelValue)) } onPlot <- function(){ modelValue <- tclvalue(modelName) method <- methods[as.numeric(tkcurselection(methodBox))+1] evenness <- tclvalue(evenVariable) == "1" ggplotit <- tclvalue(ggplotVariable) == "1" addit <- tclvalue(addVariable) == "1" ylim <- tclvalue(ylist) if (ylim != "") {ylim <- paste(", ylim=c(", ylim, ")", sep="")} pch <- tclvalue(symbol) col <- tclvalue(colour) cex <- tclvalue(cexa) var <- variables[as.numeric(tkcurselection(subsetBox))+1] sub <- tclvalue(subset) if (ggplotit==T) { logger(paste(" ")) logger(paste("Please note that plotting requires a result from function 'renyicomp'.")) logger(paste("Such results are obtained when selecting a factor from the Subset options and the 'renyi accumulation' method from the Methods options.")) logger(paste(" ")) justDoIt(paste("library(ggplot2)", sep="")) logger(paste("library(ggplot2)", sep="")) doItAndPrint("BioR.theme <- theme(panel.background = element_blank(), panel.border = element_blank(), panel.grid = element_blank(), axis.line = element_line('gray25'), text = element_text(size = 12), axis.text = element_text(size = 10, colour = 'gray25'), axis.title = element_text(size = 14, colour = 'gray25'), legend.title = element_text(size = 14), legend.text = element_text(size = 14), legend.key = element_blank() )") doItAndPrint(paste("plotgg1 <- ggplot(data=renyicomp.long(", modelValue, "), aes(x = Scales, y = Diversity, ymax = UPR, ymin= LWR)) + scale_x_discrete() + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_line(data=renyicomp.long(", modelValue, "), aes(x=Obs, colour=Grouping), size=2) + geom_point(aes(colour=Grouping, shape=Grouping), size=5) + geom_ribbon(data=renyicomp.long(", modelValue, "), aes(x=Obs, colour=Grouping), alpha=0.2, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(x = expression(alpha), y = 'Diversity', colour = 'Factor', shape= 'Factor')", sep="")) doItAndPrint("plotgg1") }else{ if (method=="renyi accumulation" || method=="tsallis accumulation") { justDoIt(paste("persp(", modelValue, ")", sep="")) logger(paste("persp(", modelValue, ")", sep="")) logger(paste("for interactive 3d plot, use vegan3d::rgl.renyiaccum", sep="")) } if (method=="renyi" || method=="renyi separate per site") { if (var != "none" || sub != ".") { if (evenness == F) { justDoIt(paste("renyiplot(", modelValue, ", xlab='alpha', ylab='H-alpha', evenness=F, addit=", addit, ", rainbow=T, legend=F, pch=", pch, ",col='", col, "', cex=", cex, ylim, ")", sep="")) logger(paste("renyiplot(", modelValue, ", xlab='alpha', ylab='H-alpha', evenness=F, addit=", addit, ", rainbow=T, legend=F, pch=", pch, ",col='", col, "', cex=", cex, ylim, ")", sep="")) }else{ justDoIt(paste("renyiplot(", modelValue, ", xlab='alpha', ylab='E-alpha', evenness=T, addit=", addit, ", rainbow=T, legend=F, pch=", pch, ",col='", col, "', cex=", cex, ylim, ")", sep="")) logger(paste("renyiplot(", modelValue, ", xlab='alpha', ylab='H-alpha', evenness=T, addit=", addit, ", rainbow=T, legend=F, pch=", pch, ",col='", col, "', cex=", cex, ylim, ")", sep="")) } } } if (method=="tsallis") { doItAndPrint(paste("dev.new()", sep="")) justDoIt(paste("plot(", modelValue, ")", sep="")) logger(paste("plot(", modelValue, ")", sep="")) } } # ggplotit } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('renyiresult', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(methodFrame, text="Calculation method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(scaleFrame, text=" "), sticky="w") tkgrid(tklabel(scaleFrame, text="scale parameters: ", width=20), scaleEntry, sticky="w") tkgrid(tklabel(scaleFrame, text="permutations", width=10), permutation, sticky="w") tkgrid(tklabel(subsetFrame, text="Subset options"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(tklabel(subset2Frame, text="subset: ", width=15), subsetEntry, sticky="w") tkgrid(subset1Frame, sticky="w") tkgrid(subset2Frame, sticky="w") tkgrid(tklabel(optionFrame, text="Plot options"), sticky="w") tkgrid(ggplotCheckBox, tklabel(optionFrame, text="ggplot (renyicomp)"), sticky="w") tkgrid(evenCheckBox, tklabel(optionFrame, text="evenness profile"), sticky="w") tkgrid(addCheckBox, tklabel(optionFrame, text="add plot"), sticky="w") tkgrid(tklabel(optionFrame, text="y limits: ", width=10), yEntry, sticky="w") tkgrid(tklabel(optionFrame, text="symbol: ", width=10), symbolEntry, sticky="w") tkgrid(tklabel(optionFrame, text="colour: ", width=10), colourEntry, sticky="w") tkgrid(tklabel(optionFrame, text="cex: ", width=10), cexEntry, sticky="w") tkgrid(methodFrame, tklabel(choicesFrame, text="", width=1), scaleFrame, sticky="w") tkgrid(subsetFrame, tklabel(choicesFrame, text="", width=1), optionFrame, sticky="w") tkgrid(choicesFrame, sticky="w") tkgrid(OKbutton, plotButton, tklabel(buttonsFrame, text=" "), cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(methodBox, 0) tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } countGUI <- function(){ contrasts <- c("contr.treatment", "contr.poly") checkAddOperator <- function(rhs){ rhs.chars <- rev(strsplit(rhs, "")[[1]]) if (length(rhs.chars) < 1) return(FALSE) check.char <- if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] !is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%")) } top <- tktoplevel() tkwm.title(top, "Analysis of species abundance") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) allvars <- "" if (length(.variables) > 1) { for (i in 1:(length(.variables)-1)) { allvars <- paste(allvars, .variables[i], "+") } allvars <- paste(allvars, .variables[length(.variables)]) }else{ allvars <- paste(allvars, .variables[1]) } .cvariables <- CVariables() cvariables <- paste(.cvariables) xFrame <- tkframe(top, relief="groove", borderwidth=2) x1Frame <- tkframe(xFrame) x4Frame <- tkframe(xFrame) x2Frame <- tkframe(x4Frame) x3Frame <- tkframe(x4Frame) xBox <- tklistbox(x2Frame, width=28, height=5, selectmode="single", background="white", exportselection="FALSE") xScroll <- tkscrollbar(x2Frame, repeatinterval=5, command=function(...) tkyview(xBox, ...)) tkconfigure(xBox, yscrollcommand=function(...) tkset(xScroll, ...)) for (x in variables) tkinsert(xBox, "end", x) resFrame <- tkframe(top, relief="groove", borderwidth=2) yFrame <- tkframe(resFrame) yBox <- tklistbox(yFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") yScroll <- tkscrollbar(yFrame, repeatinterval=5, command=function(...) tkyview(yBox, ...), width=18) tkconfigure(yBox, yscrollcommand=function(...) tkset(yScroll, ...)) for (x in cvariables) tkinsert(yBox, "end", x) lhsVariable <- tclVar("") lhsFrame <- tkframe(resFrame) lhsEntry <- tkentry(lhsFrame, width=28, textvariable=lhsVariable) rhsVariable <- tclVar("") rhsEntry <- tkentry(x1Frame, width=60, textvariable=rhsVariable) modelName <- tclVar("Count.model1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) subsetVariable <- tclVar("") subsetFrame <- tkframe(top, relief="groove", borderwidth=2) subsetEntry <- tkentry(subsetFrame, width=40, textvariable=subsetVariable) plotFrame <- tkframe(top, relief="groove", borderwidth=2) plot1Frame <- tkframe(plotFrame) plot2Frame <- tkframe(plotFrame) typeBox <- tklistbox(plot1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") typeScroll <- tkscrollbar(plot1Frame, repeatinterval=5, command=function(...) tkyview(typeBox, ...)) tkconfigure(typeBox, yscrollcommand=function(...) tkset(typeScroll, ...)) types <- c("diagnostic plots", "levene test (factor)", "term plot", "effect plot", "qq plot", "result plot (new)", "result plot (add)", "result plot (interpolate)", "cr plot", "av plot", "influence plot", "multcomp (factor)", "rpart") for (x in types) tkinsert(typeBox, "end", x) axisBox <- tklistbox(plot2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") axisScroll <- tkscrollbar(plot2Frame, repeatinterval=5, command=function(...) tkyview(axisBox, ...)) tkconfigure(axisBox, yscrollcommand=function(...) tkset(axisScroll, ...)) for (x in variables) tkinsert(axisBox, "end", x) optionFrame <- tkframe(top, relief="groove", borderwidth=2) option1Frame <- tkframe(optionFrame) option2Frame <- tkframe(optionFrame) optionBox <- tklistbox(option1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") optionScroll <- tkscrollbar(option1Frame, repeatinterval=5, command=function(...) tkyview(optionBox, ...)) tkconfigure(optionBox, yscrollcommand=function(...) tkset(optionScroll, ...)) options <- c("summarySE", "linear model", "Poisson model", "quasi-Poisson model", "negative binomial model", "gam model", "gam negbinom model", "glmmPQL", "rpart") for (x in options) tkinsert(optionBox, "end", x) standardVariable <- tclVar("0") standardCheckBox <- tkcheckbutton(option2Frame, variable=standardVariable) summaryVariable <- tclVar("1") summaryCheckBox <- tkcheckbutton(option2Frame, variable=summaryVariable) anovaVariable <- tclVar("0") anovaCheckBox <- tkcheckbutton(option2Frame, variable=anovaVariable) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(option2Frame, variable=dataVariable) onDoubleClick <- function(){ var <- as.character(tkget(xBox, "active"))[1] tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) rhs.chars <- rev(strsplit(rhs, "")[[1]]) check.char <- if (length(rhs.chars) > 0){ if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] } else "" tclvalue(rhsVariable) <- if (rhs == "" || is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%"))) paste(rhs, var, sep="") else paste(rhs, "+", var) tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onDoubleClick2 <- function(){ var <- as.character(tkget(yBox, "active"))[1] lhs <- tclvalue(lhsVariable) tclvalue(lhsVariable) <- var } onPlus <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "+ ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onTimes <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "*", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onColon <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ":", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onSlash <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "/", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onIn <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "%in% ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onMinus <- function(){ rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "+I(") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onMinus2 <- function(){ rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "s(") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onPower <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "^", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onLeftParen <- function(){ tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "(", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onRightParen <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ")", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) check.empty <- gsub(" ", "", tclvalue(lhsVariable)) if ("" == check.empty) { tkmessageBox(message="Left-hand side of model empty.", icon="error", type="ok") } check.empty <- gsub(" ", "", tclvalue(rhsVariable)) if ("" == check.empty) { tkmessageBox(message="Right-hand side of model empty.", icon="error", type="ok") } modelValue <- tclvalue(modelName) if (!is.valid.name(modelValue)){ tkmessageBox(message=paste('"', modelValue, '" is not a valid name.', sep=""), icon="error", type="ok") } right <- tclvalue(rhsVariable) if (right == ".") right <- allvars formula <- paste(tclvalue(lhsVariable), right, sep=" ~ ") subsetval <- tclvalue(subsetVariable) if (subsetval != "") { DataSet1 <- eval(parse(text=paste(.activeDataSet, sep="")), envir=.GlobalEnv) DataSet2 <- eval(parse(text=paste(.communityDataSet, sep="")), envir=.GlobalEnv) list <- (rownames(DataSet1) != subsetval) DataSet1 <- DataSet1[list,] DataSet2 <- DataSet2[list,] name1 <- paste(.activeDataSet,".m.", subsetval,sep="") name2 <- paste(.communityDataSet,".m.", subsetval,sep="") assign(name1,DataSet1, envir=.GlobalEnv) assign(name2,DataSet2, envir=.GlobalEnv) activeDataSet(name1) communityDataSet(name2) } stan <- tclvalue(standardVariable) == "1" if (stan==T) { DataSet1 <- eval(parse(text=paste(.activeDataSet, sep="")), envir=.GlobalEnv) standard <- paste(.activeDataSet, ".standard",sep="") for (j in 1:ncol(DataSet1)) { if (is.factor(DataSet1[,j]) == F) {DataSet1[,j] <- scale(DataSet1[,j])} } assign(standard,DataSet1, envir=.GlobalEnv) activeDataSet(standard) } justDoIt(paste(.activeDataSet, "$", tclvalue(lhsVariable), "<- ", .communityDataSet, "$",tclvalue(lhsVariable), sep="")) logger(paste(.activeDataSet, "$", tclvalue(lhsVariable), "<- ", .communityDataSet, "$",tclvalue(lhsVariable), sep="")) option <- options[as.numeric(tkcurselection(optionBox))+1] if (option=="negative binomial model") { justDoIt(paste("library(MASS)")) logger(paste("library(MASS)")) } if (option=="gam model" || option=="gam negbinom model") { justDoIt(paste("library(mgcv)")) logger(paste("library(mgcv)")) } if (option=="rpart") { justDoIt(paste("library(rpart)")) logger(paste("library(rpart)")) } if (option == "summarySE"){ doItAndPrint(paste("Rmisc::summarySE(data=", .activeDataSet, ", measurevar='", tclvalue(lhsVariable), "', groupvars='", tclvalue(rhsVariable), "', na.rm=F, conf.interval=0.95)", sep="")) } if (option == "linear model"){ command <- paste("lm(", formula, ", data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "Poisson model"){ command <- paste("glm(", formula, ", family=poisson(link=log), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "quasi-Poisson model"){ command <- paste("glm(", formula, ", family=quasipoisson(link=log), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "negative binomial model"){ command <- paste("glm.nb(", formula, ", init.theta=1, data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "gam model"){ command <- paste("gam(", formula, ", family=poisson(link=log), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "gam negbinom model"){ command <- paste("gam(", formula, ", family=negbin(1), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "glmmPQL"){ command <- paste("glmmPQL(", formula, ", family=quasipoisson(link=log), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "rpart"){ command <- paste("rpart(", formula, ", data=",.activeDataSet, ", na.action=na.rpart, method='anova')", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) sum <- tclvalue(summaryVariable) == "1" if (sum==T) { doItAndPrint(paste("summary(", modelValue, ")", sep="")) if (option != "linear model" && option!="gam model" && option!="gam negbinom model" && option!="glmmPQL" && option!="rpart") { doItAndPrint(paste("deviancepercentage(", modelValue, ", na.omit(", .activeDataSet, "), digits=2)", sep="")) } } anov <- tclvalue(anovaVariable) == "1" if (anov==T && option!="glmmPQL" && option!="rpart") { doItAndPrint(paste("anova(", modelValue, ",test='F')", sep="")) doItAndPrint(paste("car::vif(lm(", formula, ", data=na.omit(",.activeDataSet, ")))", sep="")) if (option=="linear model") { doItAndPrint(paste("drop1(", modelValue, ",test='F')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II')", sep="")) } if (option=="Poisson model" || option=="quasi-Poisson model" || option=="negative binomial model") { doItAndPrint(paste("drop1(", modelValue, ",test='F')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='F', error.estimate='deviance')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='Wald')", sep="")) } } data <- tclvalue(dataVariable) =="1" if (data==T) { if (option=="rpart") { justDoIt(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='vector', na.action=na.fail)[,2]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='vector', na.action=na.fail)[,2]", sep="")) }else{ justDoIt(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='response')", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='response')", sep="")) } activeDataSet(.activeDataSet) } } onPlot <- function(){ modelValue <- tclvalue(modelName) y <- tclvalue(lhsVariable) right <- tclvalue(rhsVariable) if (right == ".") right <- allvars formula <- paste(tclvalue(lhsVariable), right, sep=" ~ ") axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) option <- options[as.numeric(tkcurselection(optionBox))+1] plottype <- types[as.numeric(tkcurselection(typeBox))+1] if (plottype == "diagnostic plots"){ if (option=="gam model" || option=="gam negbinom model") { doItAndPrint(paste("gam.check(", modelValue, ")", sep="")) } if (option=="rpart"){ doItAndPrint(paste("plot(predict(", modelValue, ",type='vector'),residuals(", modelValue, "), xlab='predictions', ylab='residuals')", sep="")) doItAndPrint(paste("abline(h=0,lty=3)")) } if (option=="linear model" || option=="Poisson model" || option=="quasi-Poisson model" || option=="negative binomial model") { doItAndPrint(paste("par(mfrow=c(2,2))")) doItAndPrint(paste("plot(", modelValue, ")", sep="")) doItAndPrint(paste("par(mfrow=c(1,1))")) } } if (plottype == "levene test (factor)" && option !="rpart" && varfactor==T) { doItAndPrint(paste("leveneTest(residuals(", modelValue, "), ", .activeDataSet ,"$", axisvar, ")", sep="")) justDoIt(paste("plot(residuals(", modelValue, ") ~ ", .activeDataSet ,"$", axisvar, ")", sep="")) logger(paste("plot(residuals(", modelValue, ") ~ ", .activeDataSet ,"$", axisvar, ")", sep="")) doItAndPrint(paste("points(", .activeDataSet ,"$", axisvar, ",residuals(", modelValue, "))", sep="")) } if (plottype == "term plot" && option !="rpart"){ if (option == "gam model" || option == "gam negbinom model"){ doItAndPrint(paste("plot(", modelValue, ", se=T, rug=T)", sep="")) }else{ doItAndPrint(paste("termplot(", modelValue, ", se=T, partial.resid=T, rug=T, terms='", axisvar, "')", sep="")) } } if (plottype == "qq plot" && option !="rpart") { doItAndPrint(paste("qqPlot(residuals(", modelValue, "))", sep="")) doItAndPrint(paste("shapiro.test(residuals(", modelValue, "))", sep="")) doItAndPrint(paste("ks.test(residuals(", modelValue, "), pnorm)", sep="")) } if (plottype == "effect plot" && option !="rpart") { justDoIt(paste("library(effects)", sep="")) logger(paste("library(effects)", sep="")) doItAndPrint(paste("as.data.frame(effect('", axisvar, "', ", modelValue, "))", sep="")) doItAndPrint(paste("plot(effect('", axisvar, "', ", modelValue, ", xlevels=500))", sep="")) } if (plottype == "result plot (new)" || plottype =="result plot (add)" || plottype == "result plot (interpolate)"){ if (plottype == "result plot (new)"){ justDoIt(paste("plot(", .activeDataSet, "$", y, "~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", y, "')", sep="")) logger(paste("plot(", .activeDataSet, "$", y, "~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", y, "')", sep="")) } if (plottype=="result plot (interpolate)" && varfactor==F) { varmin <- eval(parse(text=paste("min(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) varmax <- eval(parse(text=paste("max(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) prdata <- paste(.activeDataSet, ".pred", sep="") prdatacont <- data.frame(seq(varmin,varmax,length=1000)) colnames(prdatacont) <- axisvar assign(prdata, prdatacont, envir=.GlobalEnv) } if (option=="rpart") { if (plottype=="result plot (interpolate)" && varfactor==F) { doItAndPrint(paste("points(predict(", modelValue, ", newdata=", prdata, ", type='vector') ~ ", prdata, "$", axisvar, ", type='l', lwd=2, col='red')", sep="")) }else{ doItAndPrint(paste("points(predict(", modelValue, ", newdata=", .activeDataSet, ", type='vector') ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) } } if (option=="linear model" && plottype!="result plot (interpolate)") { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- data.frame(predict(", modelValue, ", newdata=", .activeDataSet, ", interval='confidence'))", sep="")) assign(prmodel, justDoIt(paste("data.frame(predict(", modelValue, ", newdata=", .activeDataSet, ", interval='confidence'))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) doItAndPrint(paste("segments(as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$upr, as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$lwr, lty=2, col='red')", sep="")) } if (option=="linear model" && plottype=="result plot (interpolate)" && varfactor==F) { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- data.frame(predict(", modelValue, ", newdata=", prdata, ", interval='confidence'))", sep="")) assign(prmodel, justDoIt(paste("data.frame(predict(", modelValue, ", newdata=", prdata, ", interval='confidence'))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", prdata, "$", axisvar, ", type='l', lwd=2, col='red')", sep="")) doItAndPrint(paste("points(", prmodel, "$upr ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) doItAndPrint(paste("points(", prmodel, "$lwr ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) } if (option!="rpart" && option!="linear model" && plottype!="result plot (interpolate)") { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- predict(", modelValue, ", newdata=", .activeDataSet, ", type='response', se.fit=T)", sep="")) assign(prmodel, justDoIt(paste("predict(", modelValue, ", newdata=", .activeDataSet, ", type='response', se.fit=T)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) doItAndPrint(paste("segments(as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$fit + 2*", prmodel, "$se.fit, as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$fit - 2*", prmodel, "$se.fit, lty=2, col='red')", sep="")) } if (option!="rpart" && option!="linear model" && plottype=="result plot (interpolate)" && varfactor==F) { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- predict(", modelValue, ", newdata=", prdata, ", type='response', se.fit=T)", sep="")) assign(prmodel, justDoIt(paste("predict(", modelValue, ", newdata=", prdata, ", type='response', se.fit=T)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", prdata, "$", axisvar, ", type='l', lwd=2, col='red')", sep="")) doItAndPrint(paste("points((", prmodel, "$fit + 2*", prmodel, "$se.fit) ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) doItAndPrint(paste("points((", prmodel, "$fit - 2*", prmodel, "$se.fit) ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) } } if (plottype == "cr plot" && option !="rpart") { doItAndPrint(paste("crPlots(", modelValue, ",'", axisvar, "')", sep="")) } if (plottype == "av plot" && option !="rpart") { doItAndPrint(paste("avPlots(", modelValue, ", ask=F, identify.points=F)", sep="")) } if (plottype == "influence plot" && option !="rpart") { doItAndPrint(paste("influencePlot(", modelValue, ", labels=F)", sep="")) doItAndPrint(paste("influence.measures(", modelValue, ")", sep="")) } if (plottype == "multcomp (factor)" && option !="rpart" && varfactor==T) { justDoIt(paste("library(multcomp)", sep="")) logger(paste("library(multcomp)", sep="")) doItAndPrint(paste("plot(print(confint(glht(", modelValue, ", linfct = mcp(", axisvar, "= 'Tukey')))))", sep="")) } if (plottype == "rpart" && option =="rpart") { justDoIt(paste("par(xpd=NA)")) logger(paste("par(xpd=NA)")) justDoIt(paste("plot(", modelValue, ", compress=T, uniform=F, branch=0.7)", sep="")) logger(paste("plot(", modelValue, ", compress=T, uniform=F, branch=0.7)", sep="")) doItAndPrint(paste("text(", modelValue, ", use.n=T, all=T, col='blue', cex=1, pretty=0, fancy=T, fwidth=0.99, fheight=0.99)", sep="")) justDoIt(paste("par(xpd=F)")) logger(paste("par(xpd=F)")) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } .operatorFont <- Rcmdr::getRcmdr("operatorFont") plusButton <- tkbutton(x3Frame, text="+", width="3", command=onPlus, font=.operatorFont) timesButton <- tkbutton(x3Frame, text="*", width="3", command=onTimes, font=.operatorFont) colonButton <- tkbutton(x3Frame, text=":", width="3", command=onColon, font=.operatorFont) slashButton <- tkbutton(x3Frame, text="/", width="3", command=onSlash, font=.operatorFont) inButton <- tkbutton(xFrame, text="%in%", width="3", command=onIn, font=.operatorFont) minusButton <- tkbutton(x3Frame, text="I(", width="3", command=onMinus, font=.operatorFont) minus2Button <- tkbutton(x3Frame, text="s(", width="3", command=onMinus2, font=.operatorFont) powerButton <- tkbutton(x3Frame, text="^", width="3", command=onPower, font=.operatorFont) leftParenButton <- tkbutton(x3Frame, text="(", width="3", command=onLeftParen, font=.operatorFont) rightParenButton <- tkbutton(x3Frame, text=")", width="3", command=onRightParen, font=.operatorFont) buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) help(generalizedLinearModel) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) tkgrid(tklabel(modelFrame, text="Save model as: ", width=20), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(option1Frame, text="Model options"), sticky="w") tkgrid(optionBox, optionScroll,sticky="w") tkgrid(standardCheckBox, tklabel(option2Frame, text="standardise"), sticky="w") tkgrid(summaryCheckBox, tklabel(option2Frame, text="print summary"), sticky="w") tkgrid(anovaCheckBox, tklabel(option2Frame, text="print anova"), sticky="w") tkgrid(dataCheckBox, tklabel(option2Frame, text="add predictions to dataframe"), sticky="w") tkgrid(option1Frame, tklabel(optionFrame, text="", width=1), option2Frame, sticky="w") tkgrid(optionFrame, sticky="w") tkgrid(tklabel(lhsFrame, text="Response"), sticky="w") tkgrid(lhsEntry, sticky="nw") tkgrid(yBox, yScroll, sticky="nw") tkgrid(lhsFrame,tklabel(resFrame, text="", width=1), yFrame) tkgrid(resFrame, sticky="w") tkgrid(rhsEntry, sticky="w") tkgrid(xBox, xScroll,sticky="w") tkgrid(plusButton, timesButton, colonButton, slashButton, inButton, sticky="w") tkgrid(minusButton,powerButton, leftParenButton, rightParenButton, minus2Button, sticky="w") tkgrid(tklabel(xFrame, text="Explanatory"), sticky="w") tkgrid(x1Frame, sticky="w") tkgrid(x2Frame, tklabel(xFrame, text="", width=1), x3Frame, sticky="w") tkgrid(x4Frame, sticky="w") tkgrid(xFrame, sticky="w") tkgrid(tklabel(subsetFrame, text="Remove site with name", width=20), subsetEntry, sticky="w") tkgrid(subsetFrame, sticky="w") tkgrid(tklabel(plot1Frame, text="Plot options"), sticky="w") tkgrid(typeBox, typeScroll, sticky="nw") tkgrid(tklabel(plot2Frame, text="Plot variable"), sticky="w") tkgrid(axisBox, axisScroll, sticky="nw") tkgrid(plot1Frame, tklabel(plotFrame, text="", width=1), plot2Frame, sticky="w") tkgrid(plotFrame, sticky="w") tkgrid(OKbutton, plotButton, cancelButton, tklabel(buttonsFrame, text=" "), helpButton, sticky="w") tkgrid(buttonsFrame, sticky="w") tkgrid.configure(xScroll, sticky="ns") tkgrid.configure(yScroll, sticky="ns") tkgrid.configure(typeScroll, sticky="ns") tkgrid.configure(axisScroll, sticky="ns") tkgrid.configure(optionScroll, sticky="ns") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkselection.set(typeBox, 0) tkselection.set(optionBox, 0) tkselection.set(axisBox, 0) tkbind(top, "<Return>", onOK) tkbind(xBox, "<Double-ButtonPress-1>", onDoubleClick) tkbind(yBox, "<Double-ButtonPress-1>", onDoubleClick2) tkwm.deiconify(top) tkgrab.set(top) tkfocus(lhsEntry) tkwait.window(top) } presabsGUI <- function(){ contrasts <- c("contr.treatment", "contr.poly") checkAddOperator <- function(rhs){ rhs.chars <- rev(strsplit(rhs, "")[[1]]) if (length(rhs.chars) < 1) return(FALSE) check.char <- if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] !is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%")) } top <- tktoplevel() tkwm.title(top, "Analysis of presence/absence") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) allvars <- "" if (length(.variables) > 1) { for (i in 1:(length(.variables)-1)) { allvars <- paste(allvars, .variables[i], "+") } allvars <- paste(allvars, .variables[length(.variables)]) }else{ allvars <- paste(allvars, .variables[1]) } .cvariables <- CVariables() cvariables <- paste(.cvariables) xFrame <- tkframe(top, relief="groove", borderwidth=2) x1Frame <- tkframe(xFrame) x4Frame <- tkframe(xFrame) x2Frame <- tkframe(x4Frame) x3Frame <- tkframe(x4Frame) xBox <- tklistbox(x2Frame, width=28, height=5, selectmode="single", background="white", exportselection="FALSE") xScroll <- tkscrollbar(x2Frame, repeatinterval=5, command=function(...) tkyview(xBox, ...)) tkconfigure(xBox, yscrollcommand=function(...) tkset(xScroll, ...)) for (x in variables) tkinsert(xBox, "end", x) resFrame <- tkframe(top, relief="groove", borderwidth=2) yFrame <- tkframe(resFrame) yBox <- tklistbox(yFrame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") yScroll <- tkscrollbar(yFrame, repeatinterval=5, command=function(...) tkyview(yBox, ...), width=18) tkconfigure(yBox, yscrollcommand=function(...) tkset(yScroll, ...)) for (x in cvariables) tkinsert(yBox, "end", x) lhsVariable <- tclVar("") lhsFrame <- tkframe(resFrame) lhsEntry <- tkentry(lhsFrame, width=28, textvariable=lhsVariable) rhsVariable <- tclVar("") rhsEntry <- tkentry(x1Frame, width=60, textvariable=rhsVariable) modelName <- tclVar("Presabs.model1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) subsetVariable <- tclVar("") subsetFrame <- tkframe(top, relief="groove", borderwidth=2) subsetEntry <- tkentry(subsetFrame, width=40, textvariable=subsetVariable) plotFrame <- tkframe(top, relief="groove", borderwidth=2) plot1Frame <- tkframe(plotFrame) plot2Frame <- tkframe(plotFrame) typeBox <- tklistbox(plot1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") typeScroll <- tkscrollbar(plot1Frame, repeatinterval=5, command=function(...) tkyview(typeBox, ...)) tkconfigure(typeBox, yscrollcommand=function(...) tkset(typeScroll, ...)) types <- c("tabular", "diagnostic plots", "levene test (factor)", "term plot", "effect plot", "qq plot", "result plot (new)", "result plot (add)", "result plot (interpolate)", "cr plot", "av plot", "influence plot", "multcomp (factor)", "rpart") for (x in types) tkinsert(typeBox, "end", x) axisBox <- tklistbox(plot2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") axisScroll <- tkscrollbar(plot2Frame, repeatinterval=5, command=function(...) tkyview(axisBox, ...)) tkconfigure(axisBox, yscrollcommand=function(...) tkset(axisScroll, ...)) for (x in variables) tkinsert(axisBox, "end", x) optionFrame <- tkframe(top, relief="groove", borderwidth=2) option1Frame <- tkframe(optionFrame) option2Frame <- tkframe(optionFrame) optionBox <- tklistbox(option1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") optionScroll <- tkscrollbar(option1Frame, repeatinterval=5, command=function(...) tkyview(optionBox, ...)) tkconfigure(optionBox, yscrollcommand=function(...) tkset(optionScroll, ...)) options <- c("crosstab", "binomial model", "quasi-binomial model", "gam model", "gam quasi-binomial model", "rpart", "nnetrandom", "GBM (gbm)", "RF (randomForest)", "CF (cforest)", "EARTH (earth)", "RPART (rpart)", "NNET (nnet)", "FDA (fda)", "SVM (ksvm)", "SVME (svm)") for (x in options) tkinsert(optionBox, "end", x) standardVariable <- tclVar("0") standardCheckBox <- tkcheckbutton(option2Frame, variable=standardVariable) summaryVariable <- tclVar("1") summaryCheckBox <- tkcheckbutton(option2Frame, variable=summaryVariable) anovaVariable <- tclVar("0") anovaCheckBox <- tkcheckbutton(option2Frame, variable=anovaVariable) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(option2Frame, variable=dataVariable) onDoubleClick <- function(){ var <- as.character(tkget(xBox, "active"))[1] tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) rhs.chars <- rev(strsplit(rhs, "")[[1]]) check.char <- if (length(rhs.chars) > 0){ if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] } else "" tclvalue(rhsVariable) <- if (rhs == "" || is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%"))) paste(rhs, var, sep="") else paste(rhs, "+", var) tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onDoubleClick2 <- function(){ var <- as.character(tkget(yBox, "active"))[1] lhs <- tclvalue(lhsVariable) tclvalue(lhsVariable) <- var } onPlus <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "+ ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onTimes <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "*", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onColon <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ":", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onSlash <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "/", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onIn <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "%in% ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onMinus <- function(){ rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "+I(") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onMinus2 <- function(){ rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "s(") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onPower <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "^", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onLeftParen <- function(){ tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "(", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onRightParen <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ")", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) check.empty <- gsub(" ", "", tclvalue(lhsVariable)) if ("" == check.empty) { tkmessageBox(message="Left-hand side of model empty.", icon="error", type="ok") tkgrab.release(top) tkdestroy(top) generalizedLinearModel() return() } check.empty <- gsub(" ", "", tclvalue(rhsVariable)) if ("" == check.empty) { tkmessageBox(message="Right-hand side of model empty.", icon="error", type="ok") } modelValue <- tclvalue(modelName) if (!is.valid.name(modelValue)){ tkmessageBox(message=paste('"', modelValue, '" is not a valid name.', sep=""), icon="error", type="ok") } y <- paste(tclvalue(lhsVariable), ">0", sep="") right <- tclvalue(rhsVariable) if (right == ".") right <- allvars option <- options[as.numeric(tkcurselection(optionBox))+1] if (option %in% c("GBM (gbm)", "RF (randomForest)", "CF (cforest)", "EARTH (earth)", "RPART (rpart)", "NNET (nnet)", "FDA (fda)", "SVM (ksvm)", "SVME (svm)")){ justDoIt(paste(.activeDataSet, "$pb <- as.numeric(", .communityDataSet, "$",tclvalue(lhsVariable), ">0)", sep="")) logger(paste(.activeDataSet, "$pb <- as.numeric(", .communityDataSet, "$",tclvalue(lhsVariable), ">0)", sep="")) justDoIt(paste("attach(", .activeDataSet, ", pos=2)",sep="")) logger(paste("attach(", .activeDataSet, ", pos=2)",sep="")) activeDataSet(.activeDataSet) } formula <- paste(y, right, sep=" ~ ") subsetval <- tclvalue(subsetVariable) if (subsetval != "") { DataSet1 <- eval(parse(text=paste(.activeDataSet, sep="")), envir=.GlobalEnv) DataSet2 <- eval(parse(text=paste(.communityDataSet, sep="")), envir=.GlobalEnv) list <- (rownames(DataSet1) != subsetval) DataSet1 <- DataSet1[list,] DataSet2 <- DataSet2[list,] name1 <- paste(.activeDataSet,".m.", subsetval,sep="") name2 <- paste(.communityDataSet,".m.", subsetval,sep="") assign(name1,DataSet1, envir=.GlobalEnv) assign(name2,DataSet2, envir=.GlobalEnv) activeDataSet(name1) communityDataSet(name2) } stan <- tclvalue(standardVariable) == "1" if (stan==T) { DataSet1 <- eval(parse(text=paste(.activeDataSet, sep="")), envir=.GlobalEnv) standard <- paste(.activeDataSet, ".standard",sep="") for (j in 1:ncol(DataSet1)) { if (is.factor(DataSet1[,j]) == F) {DataSet1[,j] <- scale(DataSet1[,j])} } assign(standard,DataSet1, envir=.GlobalEnv) activeDataSet(standard) } justDoIt(paste(.activeDataSet, "$", tclvalue(lhsVariable), "<- ", .communityDataSet, "$",tclvalue(lhsVariable), sep="")) logger(paste(.activeDataSet, "$", tclvalue(lhsVariable), "<- ", .communityDataSet, "$",tclvalue(lhsVariable), sep="")) # option <- options[as.numeric(tkcurselection(optionBox))+1] if (option=="gam model" || option=="gam quasi-binomial model") { justDoIt(paste("library(mgcv)")) logger(paste("library(mgcv)")) } if (option=="rpart") { justDoIt(paste("library(rpart)")) logger(paste("library(rpart)")) } if (option=="nnetrandom") { justDoIt(paste("library(nnet)")) logger(paste("library(nnet)")) justDoIt(paste(.activeDataSet, "$presence <- as.numeric(", .communityDataSet, "$",tclvalue(lhsVariable), ">0)", sep="")) logger(paste(.activeDataSet, "$presence <- as.numeric(", .communityDataSet, "$",tclvalue(lhsVariable), ">0)", sep="")) justDoIt(paste("attach(", .activeDataSet, ", pos=2)",sep="")) logger(paste("attach(", .activeDataSet, ", pos=2)",sep="")) activeDataSet(.activeDataSet) formula <- paste("presence", right, sep=" ~ ") } if (option == "crosstab"){ y <- tclvalue(lhsVariable) command <- paste("crosstabanalysis(na.omit(", .activeDataSet, "),'", y, "','", right, "')", sep="") } if (option == "binomial model"){ command <- paste("glm(", formula, ", family=binomial(link=logit), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "quasi-binomial model"){ command <- paste("glm(", formula, ", family=quasibinomial(link=logit), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "gam model"){ command <- paste("gam(", formula, ", family=binomial(link=logit), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "gam quasi-binomial model"){ command <- paste("gam(", formula, ", family=quasibinomial(link=logit), data=",.activeDataSet, ", na.action=na.exclude)", sep="") } if (option == "rpart"){ command <- paste("rpart(", formula, ", data=",.activeDataSet, ", method='class', na.action=na.rpart)", sep="") } if (option == "nnetrandom"){ command <- paste("nnetrandom(", formula, ", data=",.activeDataSet, ", size=2, skip=T, entropy=T, trace=F, maxit=1000, tries=500, leave.one.out=F)", sep="") } if (option == "GBM (gbm)"){ justDoIt(paste("library(gbm)")) logger(paste("library(gbm)")) formula <- paste("pb", right, sep=" ~ ") command <- paste("gbm::gbm(", formula, ", data=", .activeDataSet, ", distribution='bernoulli', interaction.depth=7, shrinkage=0.001, bag.fraction=0.5, train.fraction=1, n.trees=2001, verbose=F, cv.folds=5)", sep="") } if (option == "RF (randomForest)"){ justDoIt(paste("library(randomForest)")) logger(paste("library(randomForest)")) formula <- paste("pb", right, sep=" ~ ") command <- paste("randomForest::randomForest(", formula, ", data=", .activeDataSet, ", ntree=751, mtry=floor(sqrt(ncol(", .activeDataSet, "))), na.action=na.omit)", sep="") } if (option == "CF (cforest)"){ justDoIt(paste("library(party)")) logger(paste("library(party)")) formula <- paste("as.factor(pb)", right, sep=" ~ ") command <- paste("party::cforest(", formula, ", data=", .activeDataSet, ", control=party::cforest_unbiased(ntree=751, mtry=floor(sqrt(ncol(", .activeDataSet, ")))))", sep="") } if (option == "EARTH (earth)"){ justDoIt(paste("library(earth)")) logger(paste("library(earth)")) formula <- paste("as.factor(pb)", right, sep=" ~ ") command <- paste("earth::earth(", formula, ", data=", .activeDataSet, ", glm=list(family=binomial(link='logit'), maxit=100), degree=2)", sep="") } if (option == "RPART (rpart)"){ justDoIt(paste("library(rpart)")) logger(paste("library(rpart)")) formula <- paste("as.factor(pb)", right, sep=" ~ ") command <- paste("rpart::rpart(", formula, ", data=", .activeDataSet, ", control=rpart::rpart.control(xval=50, minbucket=5, minsplit=5, cp=0.001, maxdepth=25))", sep="") } if (option == "NNET (nnet)"){ justDoIt(paste("library(nnet)")) logger(paste("library(nnet)")) formula <- paste("as.factor(pb)", right, sep=" ~ ") command <- paste("nnet::nnet(", formula, ", data=", .activeDataSet, ", size=8, decay=0.01, rang=0.1, maxit=100, trace=F)", sep="") } if (option == "FDA (fda)"){ justDoIt(paste("library(mda)")) logger(paste("library(mda)")) formula <- paste("pb", right, sep=" ~ ") command <- paste("mda::fda(", formula, ", data=", .activeDataSet, ", method=mda::mars)", sep="") } if (option == "SVM (ksvm)"){ justDoIt(paste("library(kernlab)")) logger(paste("library(kernlab)")) formula <- paste("pb", right, sep=" ~ ") command <- paste("kernlab::ksvm(", formula, ", data=", .activeDataSet, ", type='C-svc', prob.model=T)", sep="") } if (option == "SVME (svm)"){ justDoIt(paste("library(e1071)")) logger(paste("library(e1071)")) formula <- paste("as.factor(pb)", right, sep=" ~ ") command <- paste("e1071::svm(", formula, ", data=", .activeDataSet, ", type='C-classification', kernel='polynomial', degree=3, probability=TRUE)", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) sum <- tclvalue(summaryVariable) == "1" if (sum==T && option!="crosstab" && option !="RF (randomForest)" &&option!="GBM (gbm)" && option!="CF (cforest)" && option!="FDA (fda)" && option!="SVM (ksvm)") { doItAndPrint(paste("summary(", modelValue, ")", sep="")) if (option=="binomial model") { doItAndPrint(paste("deviancepercentage(", modelValue, ", na.omit(", .activeDataSet, "), test='Chi', digits=2)", sep="")) } if (option=="quasi-binomial model") { doItAndPrint(paste("deviancepercentage(", modelValue, ", na.omit(", .activeDataSet, "), test='F', digits=2)", sep="")) } } if (sum==T && option=="crosstab") { doItAndPrint(paste(modelValue)) doItAndPrint(paste(modelValue, "$observed", sep="")) doItAndPrint(paste(modelValue, "$expected", sep="")) } if (sum==T && option=="GBM (gbm)") { doItAndPrint(pensemaste(modelValue)) doItAndPrint(paste("summary(", modelValue, ")", sep="")) doItAndPrint(paste("gbm.perf(", modelValue, ", oobag.curve=TRUE, method='OOB')", sep="")) } if (sum==T && option=="RF (randomForest)") { doItAndPrint(paste(modelValue)) doItAndPrint(paste("importance(", modelValue, ")", sep="")) } if (sum==T && option=="CF (cforest)") { doItAndPrint(paste(modelValue)) doItAndPrint(paste("varimp(", modelValue, ", conditional=FALSE)", sep="")) } if (sum==T && option=="EARTH (earth)") { doItAndPrint(paste("evimp(", modelValue, ")", sep="")) } if (sum==T && option=="FDA (fda)") { doItAndPrint(paste(modelValue)) } if (sum==T && option=="SVM (ksvm)") { doItAndPrint(paste(modelValue)) } anov <- tclvalue(anovaVariable) == "1" if (anov==T && (option=="binomial model" || option=="gam model")) { doItAndPrint(paste("anova(", modelValue, ",test='Chi')", sep="")) doItAndPrint(paste("car::vif(lm(", formula, ", data=na.omit(",.activeDataSet, ")))", sep="")) doItAndPrint(paste("drop1(", modelValue, ",test='Chi')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='F', error.estimate='deviance')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='Wald')", sep="")) } if (anov==T && (option=="quasi-binomial model" || option=="gam quasi-binomial model")) { doItAndPrint(paste("anova(", modelValue, ",test='F')", sep="")) doItAndPrint(paste("car::vif(lm(", formula, ", data=na.omit(",.activeDataSet, ")))", sep="")) doItAndPrint(paste("drop1(", modelValue, ",test='F')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='F', error.estimate='deviance')", sep="")) doItAndPrint(paste("car::Anova(", modelValue, ",type='II', test='Wald')", sep="")) } data <- tclvalue(dataVariable) =="1" if (data==T) { if (option %in% c("GBM (gbm)", "RF (randomForest)", "CF (cforest)", "EARTH (earth)", "RPART (rpart)", "NNET (nnet)", "FDA (fda)", "SVM (ksvm)", "SVME (svm)")){ if (option=="GBM (gbm)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".GBM.pred <- as.numeric(predict(", modelValue, ", n.trees=2001, type='response'))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".GBM.pred <- as.numeric(predict(", modelValue, ", n.trees=2001, type='response'))", sep="")) } if (option=="RF (randomForest)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".RF.pred <- as.numeric(predict(", modelValue, ", type='response'))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".RF.pred <- as.numeric(predict(", modelValue, ", type='response'))", sep="")) } if (option=="CF (cforest)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".CF.pred <- as.numeric(predict(", modelValue, ", type='prob')[2])", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".CF.pred <- as.numeric(predict(", modelValue, ", type='prob')[2])", sep="")) } if (option=="EARTH (earth)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".EARTH.pred <- as.numeric(predict(", modelValue, ", type='response'))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".EARTH.pred <- as.numeric(predict(", modelValue, ", type='response'))", sep="")) } if (option=="RPART (rpart)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".RPART.pred <- as.numeric(predict(", modelValue, ", type='prob')[, 2])", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".RPART.pred <- as.numeric(predict(", modelValue, ", type='prob')[, 2])", sep="")) } if (option=="NNET (nnet)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".NNET.pred <- as.numeric(predict(", modelValue, ", type='raw'))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".NNET.pred <- as.numeric(predict(", modelValue, ", type='raw'))", sep="")) } if (option=="FDA (fda)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".FDA.pred <- as.numeric(predict(", modelValue, ", type='posterior')[, 2])", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".FDA.pred <- as.numeric(predict(", modelValue, ", type='posterior')[, 2])", sep="")) } if (option=="SVM (ksvm)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".SVM.pred <- as.numeric(predict(", modelValue, ", newdata=", .activeDataSet, ", type='probabilities')[, 2])", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".SVM.pred <- as.numeric(predict(", modelValue, ", newdata=", .activeDataSet, ", type='probabilities')[,2])", sep="")) } if (option=="SVME (svm)") { justDoIt(paste(.activeDataSet, "$", modelValue, ".SVME.pred <- as.numeric(attr(predict(", modelValue, ", newdata=", .activeDataSet, ", probability=TRUE), 'probabilities')[, 1])", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".SVME.pred <- as.numeric(attr(predict(", modelValue, ", newdata=", .activeDataSet, ", probability=TRUE), 'probabilities')[, 1])", sep="")) } }else{ if (option=="rpart") { justDoIt(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='prob', na.action=na.fail)[,2]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='prob', na.action=na.fail)[,2]", sep="")) } if (option=="nnetrandom") { justDoIt(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", newdata=", .activeDataSet, ", type='raw', na.action=na.fail)", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", newdata=", .activeDataSet, ", type='raw', na.action=na.fail)", sep="")) } if (option!="rpart" && option!="nnetrandom" && option!="crosstab") { justDoIt(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='response')", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".fit <- predict(", modelValue, ", type='response')", sep="")) } } activeDataSet(.activeDataSet) } } onPlot <- function(){ modelValue <- tclvalue(modelName) axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) option <- options[as.numeric(tkcurselection(optionBox))+1] plottype <- types[as.numeric(tkcurselection(typeBox))+1] y <- paste(tclvalue(lhsVariable), ">0", sep="") if (plottype == "tabular") { doItAndPrint(paste("plot(as.factor(", y, ") ~", axisvar, ", na.omit(", .activeDataSet, "))", sep="")) } if (plottype == "diagnostic plots"){ if (option=="gam model" || option=="gam quasi-binomial model") { doItAndPrint(paste("gam.check(", modelValue, ")", sep="")) } if (option=="binomial model" || option=="quasi-binomial model"){ doItAndPrint(paste("par(mfrow=c(2,2))")) doItAndPrint(paste("plot(", modelValue, ")", sep="")) doItAndPrint(paste("par(mfrow=c(1,1))")) } if (option=="rpart" || option=="nnetrandom"){ doItAndPrint(paste("plot(as.factor(predict(", modelValue, ",newdata=na.omit(", .activeDataSet, "), type='class')) ~ as.factor(na.omit(", .activeDataSet, ")$", y, "), xlab='observed',ylab='predicted')", sep="")) } } if (plottype == "levene test (factor)" && option !="crosstab" && option !="rpart" && option !="nnetrandom" && varfactor==T) { doItAndPrint(paste("leveneTest(residuals(", modelValue, "), ", .activeDataSet ,"$", axisvar, ")", sep="")) justDoIt(paste("plot(residuals(", modelValue, ") ~ ", .activeDataSet ,"$", axisvar, ")", sep="")) logger(paste("plot(residuals(", modelValue, ") ~ ", .activeDataSet ,"$", axisvar, ")", sep="")) doItAndPrint(paste("points(", .activeDataSet ,"$", axisvar, ",residuals(", modelValue, "))", sep="")) } if (plottype == "term plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom"){ if (option == "gam model" || option=="gam quasi-binomial model"){ doItAndPrint(paste("plot(", modelValue, ", se=T, rug=T)", sep="")) }else{ doItAndPrint(paste("termplot(", modelValue, ", se=T, partial.resid=T, rug=T, terms='", axisvar, "')", sep="")) } } if (plottype == "effect plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom") { justDoIt(paste("library(effects)", sep="")) logger(paste("library(effects)", sep="")) doItAndPrint(paste("as.data.frame(effect('", axisvar, "', ", modelValue, "))", sep="")) doItAndPrint(paste("plot(effect('", axisvar, "', ", modelValue, ", xlevels=500))", sep="")) } if (plottype == "qq plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom") { doItAndPrint(paste("qqPlot(residuals(", modelValue, "))", sep="")) doItAndPrint(paste("shapiro.test(residuals(", modelValue, "))", sep="")) doItAndPrint(paste("ks.test(residuals(", modelValue, "), pnorm)", sep="")) } if (plottype == "result plot (new)" || plottype =="result plot (add)" || plottype == "result plot (interpolate)"){ if (plottype == "result plot (new)"){ if (varfactor==T){ justDoIt(paste("plot(rep(-9, nrow(", .activeDataSet, ")) ~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", tclvalue(lhsVariable), " (presence-absence)', type='n', ylim=c(0,1))", sep="")) logger(paste("plot(rep(-9, nrow(", .activeDataSet, ")) ~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", tclvalue(lhsVariable), " (presence-absence)', type='n', ylim=c(0,1))", sep="")) }else{ justDoIt(paste("plot(", .activeDataSet, "$", y, "~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", tclvalue(lhsVariable), " (presence-absence)', ylim=c(0,1))", sep="")) logger(paste("plot(", .activeDataSet, "$", y, "~ ", .activeDataSet, "$", axisvar, ", xlab='", axisvar, "', ylab='", tclvalue(lhsVariable), " (presence-absence)', ylim=c(0,1))", sep="")) } doItAndPrint(paste("abline(h=0,lty=3)")) doItAndPrint(paste("abline(h=0.5,lty=3)")) doItAndPrint(paste("abline(h=1,lty=3)")) } if (plottype=="result plot (interpolate)" && varfactor==F) { varmin <- eval(parse(text=paste("min(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) varmax <- eval(parse(text=paste("max(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) prdata <- paste(.activeDataSet, ".pred", sep="") prdatacont <- data.frame(seq(varmin,varmax,length=1000)) colnames(prdatacont) <- axisvar assign(prdata, prdatacont, envir=.GlobalEnv) } if (option=="rpart") { if (plottype=="result plot (interpolate)" && varfactor==F) { doItAndPrint(paste("points(predict(", modelValue, ", newdata=", prdata, ", type='prob')[,2] ~ ", prdata, "$", axisvar, ", type='l', lwd=2, col='red')", sep="")) }else{ doItAndPrint(paste("points(predict(", modelValue, ", newdata=", .activeDataSet, ", type='prob')[,2] ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) } } if (option=="nnetrandom") { if (plottype=="result plot (interpolate)" && varfactor==F) { doItAndPrint(paste("points(predict(", modelValue, ",newdata=", prdata, ", type='raw') ~ ", prdata, "$", axisvar, ", col='red', type='l', lwd=2)", sep="")) }else{ doItAndPrint(paste("points(predict(", modelValue, ",newdata=", .activeDataSet, ", type='raw') ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) } } if (option!="nnetrandom" && option!="rpart" && option!="crosstab" && plottype!="result plot (interpolate)") { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- predict(", modelValue, ", newdata=", .activeDataSet, ", type='response', se.fit=T)", sep="")) assign(prmodel, justDoIt(paste("predict(", modelValue, ", newdata=", .activeDataSet, ", type='response', se.fit=T)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", .activeDataSet, "$", axisvar, ", col='red', cex=1.5)", sep="")) doItAndPrint(paste("segments(as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$fit + 2*", prmodel, "$se.fit, as.numeric(", .activeDataSet, "$", axisvar, "),", prmodel, "$fit - 2*", prmodel, "$se.fit, lty=2, col='red')", sep="")) } if (option!="nnetrandom" && option!="rpart" && option!="crosstab" && plottype=="result plot (interpolate)" && varfactor==F) { prmodel <- paste(modelValue, ".pred", sep="") logger(paste(prmodel, " <- predict(", modelValue, ", newdata=", prdata, ", type='response', se.fit=T)", sep="")) assign(prmodel, justDoIt(paste("predict(", modelValue, ", newdata=", prdata, ", type='response', se.fit=T)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("points(", prmodel, "$fit ~ ", prdata, "$", axisvar, ", type='l', lwd=2, col='red')", sep="")) doItAndPrint(paste("points((", prmodel, "$fit + 2*", prmodel, "$se.fit) ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) doItAndPrint(paste("points((", prmodel, "$fit - 2*", prmodel, "$se.fit) ~ ", prdata, "$", axisvar, ", type='l', lty=2, col='red')", sep="")) } } if (plottype == "cr plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom") { doItAndPrint(paste("crPlots(", modelValue, ",'", axisvar, "')", sep="")) } if (plottype == "av plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom") { doItAndPrint(paste("avPlots(", modelValue, ", ask=F, identify.points=F)", sep="")) } if (plottype == "influence plot" && option !="crosstab" && option !="rpart" && option !="nnetrandom") { doItAndPrint(paste("influencePlot(", modelValue, ", labels=F)", sep="")) doItAndPrint(paste("influence.measures(", modelValue, ")", sep="")) } if (plottype == "multcomp (factor)" && option !="crosstab" && option !="rpart" && option !="nnetrandom" && varfactor==T) { justDoIt(paste("library(multcomp)", sep="")) logger(paste("library(multcomp)", sep="")) doItAndPrint(paste("plot(print(confint(glht(", modelValue, ", linfct = mcp(", axisvar, "= 'Tukey')))))", sep="")) } if (plottype == "rpart" && option=="rpart") { justDoIt(paste("par(xpd=NA)")) logger(paste("par(xpd=NA)")) justDoIt(paste("plot(", modelValue, ", compress=T, uniform=F, branch=0.7)", sep="")) logger(paste("plot(", modelValue, ", compress=T, uniform=F, branch=0.7)", sep="")) doItAndPrint(paste("text(", modelValue, ", use.n=T, all=T, col='blue', cex=1, pretty=0, fancy=T, fwidth=0.99, fheight=0.99)", sep="")) justDoIt(paste("par(xpd=F)")) logger(paste("par(xpd=F)")) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } .operatorFont <- Rcmdr::getRcmdr("operatorFont") plusButton <- tkbutton(x3Frame, text="+", width="3", command=onPlus, font=.operatorFont) timesButton <- tkbutton(x3Frame, text="*", width="3", command=onTimes, font=.operatorFont) colonButton <- tkbutton(x3Frame, text=":", width="3", command=onColon, font=.operatorFont) slashButton <- tkbutton(x3Frame, text="/", width="3", command=onSlash, font=.operatorFont) inButton <- tkbutton(xFrame, text="%in%", width="3", command=onIn, font=.operatorFont) minusButton <- tkbutton(x3Frame, text="I(", width="3", command=onMinus, font=.operatorFont) minus2Button <- tkbutton(x3Frame, text="s(", width="3", command=onMinus2, font=.operatorFont) powerButton <- tkbutton(x3Frame, text="^", width="3", command=onPower, font=.operatorFont) leftParenButton <- tkbutton(x3Frame, text="(", width="3", command=onLeftParen, font=.operatorFont) rightParenButton <- tkbutton(x3Frame, text=")", width="3", command=onRightParen, font=.operatorFont) buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) help(generalizedLinearModel) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) tkgrid(tklabel(modelFrame, text="Save model as: ", width=20), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(option1Frame, text="Model options"), sticky="w") tkgrid(optionBox, optionScroll,sticky="w") tkgrid(standardCheckBox, tklabel(option2Frame, text="standardise"), sticky="w") tkgrid(summaryCheckBox, tklabel(option2Frame, text="print summary"), sticky="w") tkgrid(anovaCheckBox, tklabel(option2Frame, text="print anova"), sticky="w") tkgrid(dataCheckBox, tklabel(option2Frame, text="add predictions to dataframe"), sticky="w") tkgrid(option1Frame, tklabel(optionFrame, text="", width=1), option2Frame, sticky="w") tkgrid(optionFrame, sticky="w") tkgrid(tklabel(lhsFrame, text="Response"), sticky="w") tkgrid(lhsEntry, sticky="nw") tkgrid(yBox, yScroll, sticky="nw") tkgrid(lhsFrame,tklabel(resFrame, text="", width=1), yFrame) tkgrid(resFrame, sticky="w") tkgrid(rhsEntry, sticky="w") tkgrid(xBox, xScroll,sticky="w") tkgrid(plusButton, timesButton, colonButton, slashButton, inButton, sticky="w") tkgrid(minusButton,powerButton, leftParenButton, rightParenButton, minus2Button, sticky="w") tkgrid(tklabel(xFrame, text="Explanatory"), sticky="w") tkgrid(x1Frame, sticky="w") tkgrid(x2Frame, tklabel(xFrame, text="", width=1), x3Frame, sticky="w") tkgrid(x4Frame, sticky="w") tkgrid(xFrame, sticky="w") tkgrid(tklabel(subsetFrame, text="Remove sites with name: ", width=20), subsetEntry, sticky="w") tkgrid(subsetFrame, sticky="w") tkgrid(tklabel(plot1Frame, text="Plot options"), sticky="w") tkgrid(typeBox, typeScroll, sticky="nw") tkgrid(tklabel(plot2Frame, text="Plot variable"), sticky="w") tkgrid(axisBox, axisScroll, sticky="nw") tkgrid(plot1Frame, tklabel(plotFrame, text="", width=1), plot2Frame, sticky="w") tkgrid(plotFrame, sticky="w") tkgrid(OKbutton, plotButton, cancelButton, tklabel(buttonsFrame, text=" "), helpButton, sticky="w") tkgrid(buttonsFrame, sticky="w") tkgrid.configure(xScroll, sticky="ns") tkgrid.configure(yScroll, sticky="ns") tkgrid.configure(typeScroll, sticky="ns") tkgrid.configure(axisScroll, sticky="ns") tkgrid.configure(optionScroll, sticky="ns") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkselection.set(typeBox, 0) tkselection.set(optionBox, 0) tkselection.set(axisBox, 0) tkbind(top, "<Return>", onOK) tkbind(xBox, "<Double-ButtonPress-1>", onDoubleClick) tkbind(yBox, "<Double-ButtonPress-1>", onDoubleClick2) tkwm.deiconify(top) tkgrab.set(top) tkfocus(lhsEntry) tkwait.window(top) } diversityresponse <- function(){ .communityDataSet <- CommunityDataSet() .activeDataSet <- ActiveDataSet() justDoIt(paste(.activeDataSet, " <- diversityvariables(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) logger(paste(.activeDataSet, " <- diversityvariables(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) activeDataSet(.activeDataSet) communityDataSet(.activeDataSet) logger(paste("environmental data set (with added diversity variables) is now also the community data set", sep="")) logger(paste("use menu option of 'Species abundance as response...' now to analyze diversity as response", sep="")) } distmatrixGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Distance matrix calculation") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() modelName <- tclVar("Distmatrix.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=50, textvariable=modelName) method2Frame <- tkframe(top, relief="groove", borderwidth=2) distBox <- tklistbox(method2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") distScroll <- tkscrollbar(method2Frame, repeatinterval=5, command=function(...) tkyview(distBox, ...)) printVariable <- tclVar("0") printCheckBox <- tkcheckbutton(method2Frame, variable=printVariable) treatasdistVariable <- tclVar("0") treatasdistCheckBox <- tkcheckbutton(method2Frame, variable=treatasdistVariable) tkconfigure(distBox, yscrollcommand=function(...) tkset(distScroll, ...)) distances <- c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger", "aitchison", "robust.aitchison", "Hellinger", "scaled Hellinger", "chord", "scaled chord", "w", "-1", "c", "wb", "r", "I", "e", "t", "me", "j", "sor", "m", "-2", "co", "cc", "g", "-3", "l", "19", "hk", "rlb", "sim", "gl", "z", "designdist", "chaodist", "averaged euclidean", "averaged manhattan", "averaged canberra", "averaged clark", "averaged bray", "averaged kulczynski", "averaged jaccard", "averaged gower", "averaged altGower", "averaged morisita", "averaged horn", "averaged mountford", "averaged raup" , "averaged binomial", "averaged chao", "averaged cao", "averaged mahalanobis") for (x in distances) tkinsert(distBox, "end", x) onOK <- function(){ dist <- distances[as.numeric(tkcurselection(distBox))+1] modelValue <- tclvalue(modelName) if (dist %in% c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger")) { logger(paste(modelValue, " <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T)", sep="")) assign(modelValue, justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (dist %in% c("aitchison", "robust.aitchison")) { logger(paste(modelValue, " <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign(modelValue, justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) } if (dist == "hellinger") { logger(paste(modelValue, " <- vegdist(disttransform(", .communityDataSet, ", method='hellinger'), method='euclidean', na.rm=T)", sep="")) assign(modelValue, justDoIt(paste("vegdist(disttransform(", .communityDataSet, ", method='hellinger'), method='euclidean', na.rm=T)", sep="")), envir=.GlobalEnv) } if (dist == "scaled hellinger") { logger(paste(modelValue, " <- vegdist(disttransform(", .communityDataSet, ", method='hellinger'), method='euclidean', na.rm=T)/sqrt(2)", sep="")) assign(modelValue, justDoIt(paste("vegdist(disttransform(", .communityDataSet, ", method='hellinger'), method='euclidean', na.rm=T)/sqrt(2)", sep="")), envir=.GlobalEnv) } if (dist == "chord") { logger(paste(modelValue, " <- vegdist(disttransform(", .communityDataSet, ", method='chord'), method='euclidean', na.rm=T)", sep="")) assign(modelValue, justDoIt(paste("vegdist(disttransform(", .communityDataSet, ", method='chord'), method='euclidean', na.rm=T)", sep="")), envir=.GlobalEnv) } if (dist == "scaled chord") { logger(paste(modelValue, " <- vegdist(disttransform(", .communityDataSet, ", method='chord'), method='euclidean', na.rm=T)/sqrt(2)", sep="")) assign(modelValue, justDoIt(paste("vegdist(disttransform(", .communityDataSet, ", method='chord'), method='euclidean', na.rm=T)/sqrt(2)", sep="")), envir=.GlobalEnv) } if (dist %in% c("w", "-1", "c", "wb", "r", "I", "e", "t", "me", "j", "sor", "m", "-2", "co", "cc", "g", "-3", "l", "19", "hk", "rlb", "sim", "gl", "z")) { logger(paste(modelValue, " <- betadiver(", .communityDataSet, ", method='", dist, "')", sep="")) assign(modelValue, justDoIt(paste("betadiver(",.communityDataSet, ", method='",dist, "')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (dist == "designdist") { logger(paste(modelValue, " <- designdist(", .communityDataSet, ", method='(A+B-2*J)/(A+B)', terms='minimum')", sep="")) assign(modelValue, justDoIt(paste("designdist(", .communityDataSet, ", method='(A+B-2*J)/(A+B)', terms='minimum')", sep="")), envir=.GlobalEnv) } if (dist == "chaodist") { logger(paste(modelValue, " <- designdist(", .communityDataSet, ", method='1 - 2*U*V/(U+V)')", sep="")) assign(modelValue, justDoIt(paste("designdist(", .communityDataSet, ", method='1 - 2*U*V/(U+V)')", sep="")), envir=.GlobalEnv) } if (dist == "averaged manhattan") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='manhattan', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='manhattan', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged euclidean") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='euclidean', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='euclidean', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged canberra") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='canberra', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='canberra', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged clark") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='clark', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='clark', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged bray") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='bray', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='bray', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged kulczynski") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='kulczynski', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='kulczynski', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged jaccard") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='jaccard', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='jaccard', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged gower") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='gower', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='gower', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged altGower") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='altGower', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='altGower', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged morisita") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='morisita', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='morisita', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged horn") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='horn', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='horn', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged mountford") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='mountford', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='mountford', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged raup") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='raup', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='raup', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged binomial") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='binomial', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='binomial', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged chao") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='chao', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='chao', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged cao") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='cao', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='cao', iterations=1000)", sep="")), envir=.GlobalEnv) } if (dist == "averaged mahalanobis") { logger(paste(modelValue, " <- avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='mahalanobis', iterations=1000)", sep="")) assign(modelValue, justDoIt(paste("avgdist(", .communityDataSet, ", sample=min(rowSums(", .communityDataSet, ")), meanfun=mean, distfun=vegdist, dmethod='mahalanobis', iterations=1000)", sep="")), envir=.GlobalEnv) } printdist <- tclvalue(printVariable)==1 if (printdist==T) {doItAndPrint(paste(modelValue))} treatasdist <- tclvalue(treatasdistVariable)==1 if (treatasdist==T) { logger(paste(modelValue, " <- data.frame(as.matrix(", modelValue, "))", sep="")) assign(modelValue, justDoIt(paste("data.frame(as.matrix(", modelValue, "))", sep="")), envir=.GlobalEnv) communityDataSet(modelValue) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save data as:", width=10), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(method2Frame, text="Distance"), sticky="w") tkgrid(distBox, distScroll,sticky="w") tkgrid(tklabel(method2Frame, text="Print distance matrix", width=25), printCheckBox, sticky="w") tkgrid(tklabel(method2Frame, text="Make community dataset", width=25), treatasdistCheckBox, sticky="w") tkgrid(method2Frame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(distScroll, sticky="ns") tkselection.set(distBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(distBox) tkwait.window(top) } unconordiGUI <- function(){ contrasts <- c("contr.treatment", "contr.poly") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) .cvariables <- CVariables() cvariables <- paste(.cvariables) top <- tktoplevel() tkwm.title(top, "Unconstrained ordination") modelName <- tclVar("Ordination.model1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) methodFrame <- tkframe(top, relief="groove", borderwidth=2) method1Frame <- tkframe(methodFrame) method2Frame <- tkframe(methodFrame) method3Frame <- tkframe(methodFrame) method4Frame <- tkframe(methodFrame) methodBox <- tklistbox(method1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(method1Frame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("PCA", "PCA (prcomp)", "PCoA", "PCoA (Cailliez)", "CA", "DCA", "metaMDS", "monoMDS", "wcmdscale", "wcmdscale (lingoes)", "wcmdscale (cailliez)", "pcnm", "NMS (standard)", "isomap") for (x in methods) tkinsert(methodBox, "end", x) distBox <- tklistbox(method2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") distScroll <- tkscrollbar(method2Frame, repeatinterval=5, command=function(...) tkyview(distBox, ...)) tkconfigure(distBox, yscrollcommand=function(...) tkset(distScroll, ...)) distances <- c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger") for (x in distances) tkinsert(distBox, "end", x) summaryVariable <- tclVar("1") summaryCheckBox <- tkcheckbutton(method4Frame, variable=summaryVariable) scalingVariable <- tclVar("species") scale <- tkentry(method4Frame, width=10, textvariable=scalingVariable) NMSVariable <- tclVar("2") NMSa <- tkentry(method3Frame, width=10, textvariable=NMSVariable) NMSpermVariable <- tclVar("1") NMSperm <- tkentry(method3Frame, width=10, textvariable=NMSpermVariable) addspecVariable <- tclVar("0") addspecCheckBox <- tkcheckbutton(method3Frame, variable=addspecVariable) treatasdistVariable <- tclVar("0") treatasdistCheckBox <- tkcheckbutton(method4Frame, variable=treatasdistVariable) plotFrame <- tkframe(top, relief="groove", borderwidth=2) plot1Frame <- tkframe(plotFrame) plot2Frame <- tkframe(plotFrame) plot3Frame <- tkframe(plotFrame) plot4Frame <- tkframe(plotFrame) typeBox <- tklistbox(plot1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") typeScroll <- tkscrollbar(plot1Frame, repeatinterval=5, command=function(...) tkyview(typeBox, ...)) tkconfigure(typeBox, yscrollcommand=function(...) tkset(typeScroll, ...)) types <- c("plot", "ordiplot", "ordiplot3d", "ordirgl", "ordiplot empty", "origin axes", "identify sites", "identify species", "text sites", "text species", "points sites", "points species", "label sites", "label species", "orditorp sites", "orditorp species", "envfit", "ordihull (factor)", "ordihull (factor, rainbow)", "ordihull (factor, polygon)", "ordiarrows (factor)", "ordiarrows (factor, rainbow)", "ordisegments (factor)", "ordisegments (factor, rainbow)", "ordispider (factor)", "ordispider (factor, rainbow)", "ordibar (factor)", "ordibar (factor, rainbow)", "ordiellipse (factor)", "ordiellipse (factor, rainbow)", "ordiellipse (factor, ehull)", "ordiellipse (factor, polygon)", "ordisurf (continuous)", "ordibubble (continuous)", "ordisymbol (factor)", "ordisymbol (factor, legend)", "ordisymbol (factor, large)", "ordivector (species)", "ordivector interpretation", "ordicluster", "ordicluster2", "ordispantree", "ordinearest", "ordiequilibriumcircle", "screeplot", "distance displayed", "coenocline", "stressplot", "orditkplot sites", "orditkplot species", "orditkplot pointlabel", "orglspider (factor)", "orglellipse (factor)", "ordiplot", "ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (ordiellipse)", "ggplot (add species)", "ggplot (add vector)") for (x in types) tkinsert(typeBox, "end", x) choicesVariable <- tclVar("1,2") choice <- tkentry(plot3Frame, width=10, textvariable=choicesVariable) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(plot3Frame, variable=dataVariable) axisBox <- tklistbox(plot2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") axisScroll <- tkscrollbar(plot2Frame, repeatinterval=5, command=function(...) tkyview(axisBox, ...)) tkconfigure(axisBox, yscrollcommand=function(...) tkset(axisScroll, ...)) for (x in variables) tkinsert(axisBox, "end", x) cexVariable <- tclVar("1") cexa <- tkentry(plot4Frame, width=10, textvariable=cexVariable) colVariable <- tclVar("blue") cola <- tkentry(plot4Frame, width=10, textvariable=colVariable) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) method <- methods[as.numeric(tkcurselection(methodBox))+1] dist <- distances[as.numeric(tkcurselection(distBox))+1] k <- tclvalue(NMSVariable) perm <- tclvalue(NMSpermVariable) treatasdist <- tclvalue(treatasdistVariable)==1 addspec <- tclvalue(addspecVariable) == "1" if (method=="PCA") { command <- paste("rda(", .communityDataSet, ")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'euclidean')", sep="")) } if (method=="PCA (prcomp)") { command <- paste("prcomp(", .communityDataSet, ")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'euclidean')", sep="")) } if (method=="CA") { command <- paste("cca(", .communityDataSet, ")", sep="") } if (method=="DCA") { command <- paste("decorana(", .communityDataSet, ")", sep="") } if (method=="PCoA") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ",method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("cmdscale(distmatrix, k=", k, ", eig=T, add=F)", sep="") } if (method=="PCoA (Cailliez)") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("cmdscale(distmatrix, k=", k, ", eig=T, add=T)", sep="") } if (method=="metaMDS") { addspec <- F command <- paste("metaMDS(", .communityDataSet, ", distance='", dist, "', k=", k, ", trymax=", perm, ", autotransform=T, noshare=0.1, expand=T, trace=1, plot=F)", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (method=="monoMDS") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("monoMDS(distmatrix, k=", k, ", model='local')", sep="") } if (method=="wcmdscale") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("wcmdscale(distmatrix, k=", k, ", eig=T, add=F)", sep="") } if (method=="wcmdscale (lingoes)") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("wcmdscale(distmatrix, k=", k, ", eig=T, add='lingoes')", sep="") } if (method=="wcmdscale (cailliez)") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("wcmdscale(distmatrix, k=", k, ", eig=T, add='cailliez')", sep="") } if (method=="pcnm") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("pcnm(distmatrix)", sep="") } if (method=="NMS (standard)") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("NMSrandom(distmatrix, perm=", perm,", k=", k, ")", sep="") } if (method=="isomap") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("isomap(distmatrix, k=4)", sep="") } modelValue <- tclvalue(modelName) # if (!is.valid.name(modelValue)){ # tkmessageBox(message=paste('"', modelValue, '" is not a valid name.', # sep=""), icon="error", type="ok") # } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) sum <- tclvalue(summaryVariable) == "1" scaling <- tclvalue(scalingVariable) if (method == "PCoA" || method == "PCoA (Cailliez)") { doItAndPrint(paste("rownames(", modelValue, "$points) <- rownames(", .communityDataSet, ")", sep="")) } if (addspec==T) { if (method=="PCoA") { doItAndPrint(paste(modelValue, "<- add.spec.scores(", modelValue, ", ", .communityDataSet, ", method='pcoa.scores', Rscale=T, scaling='sites', multi=1)", sep="")) } if (method=="PCoA (Cailliez)") { doItAndPrint(paste(modelValue, "<- add.spec.scores(", modelValue, ", ", .communityDataSet, ", method='pcoa.scores', Rscale=T, scaling='sites', multi=1)", sep="")) } if (method=="NMS (standard)") { doItAndPrint(paste(modelValue, "<- add.spec.scores(", modelValue, ", ", .communityDataSet, ", method='wa.scores')", sep="")) } } if (method == "PCA" || method == "PCA (prcomp)" || method == "CA" || method == "DCA" || addspec == T) { doItAndPrint(paste("check.ordiscores(", .communityDataSet, ", ", modelValue, ", check.species=T)", sep="")) }else{ doItAndPrint(paste("check.ordiscores(", .communityDataSet, ", ", modelValue, ", check.species=F)", sep="")) } if (sum==T) { if (method %in% c("PCA", "CA")) { doItAndPrint(paste("summary(", modelValue, ", scaling='", scaling, "')", sep="")) doItAndPrint(paste("eigenvals(", modelValue, ")", sep="")) if (method=="PCA") {doItAndPrint(paste("PCAsignificance(", modelValue, ")", sep=""))} doItAndPrint(paste("goodness(", modelValue, ", display='sites', model='CA'", sep="")) doItAndPrint(paste("inertcomp(", modelValue, ", display='sites', unity=T)", sep="")) } if (method %in% c("PCA (prcomp)", "PCoA", "PCoA (Cailliez)", "metaMDS", "monoMDS", "wcmdscale", "wcmdscale (lingoes)", "wcmdscale (cailliez)", "pcnm", "NMS (standard)")) { doItAndPrint(paste(modelValue, sep="")) if (method=="metaMDS" || method=="monoMDS") {doItAndPrint(paste("goodness(", modelValue, ")", sep=""))} } if (method %in% c("DCA", "isomap")) {doItAndPrint(paste("summary(", modelValue, ")", sep=""))} } } onPlot <- function(){ method <- methods[as.numeric(tkcurselection(methodBox))+1] modelValue <- tclvalue(modelName) plottype <- types[as.numeric(tkcurselection(typeBox))+1] scaling <- tclvalue(scalingVariable) perm <- tclvalue(NMSpermVariable) choices <- tclvalue(choicesVariable) dist <- distances[as.numeric(tkcurselection(distBox))+1] col <- tclvalue(colVariable) cex <- tclvalue(cexVariable) treatasdist <- tclvalue(treatasdistVariable)==1 addspec <- tclvalue(addspecVariable) == "1" if (plottype == "plot"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) } if (method=="metaMDS" || method=='monoMDS') { justDoIt(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "))", sep="")) logger(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "))", sep="")) } if (method=="PCoA" || method=="PCoA (Cailliez)" || method=="NMS (standard)" || method=="wcmdscale" || method=="wcmdscale (lingoes)"|| method=="wcmdscale (cailliez)"|| method=="pcnm") { justDoIt(paste("plot1 <- plot(scores(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- plot(scores(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) if (addspec==T) { justDoIt(paste("plot1 <- plot(scores(", modelValue, ", display='species', choices=c(", choices, ")), pch='+', col='red')", sep="")) logger(paste("plot1 <- plot(scores(", modelValue, ", display='species', choices=c(", choices, ")), pch='+', col='red')", sep="")) } justDoIt(paste("text(scores(", modelValue, ", display='sites', choices=c(", choices, ")), rownames(", .communityDataSet, "), pos=3)", sep="")) logger(paste("text(scores(", modelValue, ", display='sites', choices=c(", choices, ")), rownames(", .communityDataSet, "), pos=3)", sep="")) } } if (plottype == "ordiplot"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "))", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "))", sep="")) } } if (plottype == "ordiplot3d"){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3))", sep="")) logger(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3))", sep="")) } } if (plottype == "ordirgl"){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) }else{ justDoIt(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3))", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3))", sep="")) } } if (plottype == "ordiplot empty"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "), scaling='", scaling, "')", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "))", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "))", sep="")) } } if (plottype == "identify sites"){ doItAndPrint(paste("identify(plot1, 'sites', col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "identify species"){ doItAndPrint(paste("identify(plot1, 'species', col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "text sites"){ doItAndPrint(paste("text(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "text species"){ doItAndPrint(paste("text(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "points sites"){ doItAndPrint(paste("points(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "points species"){ doItAndPrint(paste("points(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "label sites"){ doItAndPrint(paste("ordilabel(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "label species"){ doItAndPrint(paste("ordilabel(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "orditorp sites"){ doItAndPrint(paste("orditorp(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "orditorp species"){ doItAndPrint(paste("orditorp(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "origin axes"){ doItAndPrint(paste("abline(h = 0, lty = 3)", sep="")) doItAndPrint(paste("abline(v = 0, lty = 3)", sep="")) } if (plottype == "screeplot"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) # if (method=="PCA" || method=="PCA (prcomp)") { doItAndPrint(paste("plot1 <- screeplot(", modelValue, ", bstick=T)", sep="")) # } } axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) if (plottype %in% c("envfit", "ordihull (factor)", "ordihull (factor, rainbow)", "ordihull (factor, polygon)", "ordiarrows (factor)", "ordiarrows (factor, rainbow)", "ordisegments (factor)", "ordisegments (factor, rainbow)", "ordispider (factor)", "ordispider (factor, rainbow)", "ordibar (factor)", "ordibar (factor, rainbow)", "ordiellipse (factor)", "ordiellipse (factor, rainbow)", "ordiellipse (factor, ehull)", "ordiellipse (factor, polygon)", "ordisurf (continuous)", "ordibubble (continuous)", "ordisymbol (factor)", "ordisymbol (factor, legend)", "ordisymbol (factor, large)", "ordivector (species)", "ordivector interpretation", "ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (add species)", "ggplot (ordiellipse)", "ggplot (add vector)")){ justDoIt(paste("attach(", .activeDataSet, ", warn.conflicts=F)", sep="")) logger(paste("attach(", .activeDataSet, ", warn.conflicts=F)",sep="")) } if (plottype == "envfit"){ doItAndPrint(paste("fitted <- envfit(plot1, data.frame(", axisvar, "), permutations=", perm, ")", sep="")) doItAndPrint(paste("fitted", sep="")) doItAndPrint(paste("plot(fitted, col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "ordihull (factor)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='lines', col='", col, "')", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordihull (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='lines', label=F, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordihull (factor, polygon)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='polygon', alpha=127, label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordiarrows (factor)" && varfactor==T){ doItAndPrint(paste("ordiarrows(plot1, groups=", axisvar, ", col='", col, "')", sep="")) } if (plottype == "ordiarrows (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordiarrows(plot1, groups=", axisvar, ", col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordisegments (factor)" && varfactor==T){ doItAndPrint(paste("ordisegments(plot1, groups=", axisvar, ", col='", col, "')", sep="")) } if (plottype == "ordisegments (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordisegments(plot1, groups=", axisvar, ", col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordispider (factor)" && varfactor==T){ doItAndPrint(paste("ordispider(plot1, groups=", axisvar, ", spiders='centroid', col='", col, "')", sep="")) } if (plottype == "ordispider (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordispider(plot1, groups=", axisvar, ", spiders='centroid', label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordibar (factor)" && varfactor==T){ doItAndPrint(paste("ordibar(plot1, groups=", axisvar, ", col='", col, "', conf=0.9, kind='se')", sep="")) } if (plottype == "ordibar (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordibar(plot1, groups=", axisvar, ", label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordiellipse (factor)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se', draw='lines', col='", col, "')", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='se', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se', draw='lines', label=T, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='se', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, ehull)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull', draw='lines', label=T, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='ehull', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, polygon)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull', draw='polygon', alpha=127, label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='ehull', permutations=", perm, ")", sep="")) } if (plottype == "ordisurf (continuous)" && varfactor==F){ doItAndPrint(paste("ordisurf(plot1, y=", axisvar, ", add=T, col='", col, "')", sep="")) } if (plottype == "ordibubble (continuous)" && varfactor==F){ doItAndPrint(paste("ordibubble(plot1, var=", axisvar, ", fg='", col, "')", sep="")) } if (plottype == "ordisymbol (factor)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, rainbow_hcl=T, cex=", cex, ")", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, rainbow_hcl=T, cex=", cex, ")", sep="")) } if (plottype == "ordisymbol (factor, legend)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=T, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=", cex, ")", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=T, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=", cex, ")", sep="")) } if (plottype == "ordisymbol (factor, large)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=4, lwd=2)", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=4, lwd=2)", sep="")) } if (plottype == "ordivector (species)"){ realspecies <- eval(parse(text=paste("any(colnames(", .communityDataSet, ")=='", axisvar, "')", sep="")), envir=.GlobalEnv) if (realspecies == T) { doItAndPrint(paste("ordivector(plot1,'", axisvar, "',lty=0, angle=5, length=0.5)", sep="")) } } if (plottype == "ordivector interpretation"){ realspecies <- eval(parse(text=paste("any(colnames(", .communityDataSet, ")=='", axisvar, "')", sep="")), envir=.GlobalEnv) if (realspecies == T) { doItAndPrint(paste("ordivector(plot1,'", axisvar, "',lty=2)", sep="")) } } if (plottype == "ordiequilibriumcircle" && method == "PCA"){ doItAndPrint(paste("ordiequilibriumcircle(", modelValue, ", plot1, col='", col, "')", sep="")) } if (plottype == "ordicluster"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ",method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ",method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } logger(paste("cluster <- hclust(distmatrix, method='single')", sep="")) assign("cluster", justDoIt(paste("hclust(distmatrix, method='single')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("ordicluster(plot1, cluster, prune=1, col='", col, "')", sep="")) } if (plottype == "ordicluster2"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ",method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ",method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } logger(paste("cluster <- hclust(distmatrix, method='single')", sep="")) assign("cluster", justDoIt(paste("hclust(distmatrix, method='single')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("ordicluster2(plot1, cluster, mingroups=1, col='", col, "')", sep="")) } if (plottype == "ordinearest"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("ordinearest(plot1, distmatrix, col='", col, "')", sep="")) } if (plottype == "ordispantree"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("lines(spantree(distmatrix,toolong=0), plot1, col='", col, "')", sep="")) } if (plottype == "distance displayed"){ if(treatasdist==F){ doItAndPrint(paste("distdisplayed(", .communityDataSet ,", plot1, distx='", dist, "', plotit=T)", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(", .communityDataSet, ")", sep="")), envir=.GlobalEnv) doItAndPrint(paste("distdisplayed(distmatrix, plot1, plotit=T)", sep="")) } } if (plottype == "coenocline"){ doItAndPrint(paste("ordicoeno(", .communityDataSet ,", ordiplot=plot1, axis=1, legend=T, cex=0.8, ncol=4)", sep="")) } if (plottype == "stressplot"){ doItAndPrint(paste("stressplot(", modelValue ,")", sep="")) } if (plottype == "orditkplot sites"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) } } if (plottype == "orditkplot species"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, ")))", sep="")) } } if (plottype == "orditkplot pointlabel"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method=="PCA" || method=="CA" || method=="DCA") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, ")))", sep="")) } } if (plottype == "orglspider (factor)" && varfactor==T){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) logger(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglspider(", modelValue, ", groups=", axisvar, ", col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) logger(paste("with(", .activeDataSet, ", orglspider(", modelValue, ", groups=", axisvar, ", col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) } if (plottype == "orglellipse (factor)" && varfactor==T){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) logger(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglellipse(", modelValue, ", groups=", axisvar, ", kind='ehull', col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) logger(paste("with(", .activeDataSet, ", orglellipse(", modelValue, ", groups=", axisvar, ", kind='ehull', col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) } if (plottype %in% c("ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (ordiellipse)")){ logger(paste(" ")) logger(paste("Note that ggplot options use the 'ordiplot' plot named 'plot1'")) logger(paste("More examples are available from the documentation for 'sites.long'")) logger(paste(" ")) justDoIt(paste("library(ggplot2)", sep="")) logger(paste("library(ggplot2)", sep="")) doItAndPrint("BioR.theme <- theme(panel.background = element_blank(), panel.border = element_blank(), panel.grid = element_blank(), axis.line = element_line('gray25'), text = element_text(size = 12), axis.text = element_text(size = 10, colour = 'gray25'), axis.title = element_text(size = 14, colour = 'gray25'), legend.title = element_text(size = 14), legend.text = element_text(size = 14), legend.key = element_blank() )") logger(paste(" ")) logger(paste("sites1 <- sites.long(plot1, env.data=", .activeDataSet, ")", sep="")) assign("sites1", justDoIt(paste("sites.long(plot1, env.data=", .activeDataSet, ")", sep="")), envir=.GlobalEnv) if (method %in% c("PCoA", "PCoA (Cailliez)")) { logger(paste("axislabs <- axis.long(", modelValue, ", choices=c(", choices, "), cmdscale.model=T)", sep="")) assign("axislabs", justDoIt(paste("axis.long(", modelValue, ", choices=c(", choices, "), cmdscale.model=T)", sep="")), envir=.GlobalEnv) }else{ logger(paste("axislabs <- axis.long(", modelValue, ", choices=c(", choices, "))", sep="")) assign("axislabs", justDoIt(paste("axis.long(", modelValue, ", choices=c(", choices, "))", sep="")), envir=.GlobalEnv) } } if (plottype == "ggplot (ordisymbol1)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, shape=", axisvar, ", colour=", axisvar, "), size=5) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisymbol2)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, shape=", axisvar, ", colour=", axisvar, "), size=5) + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels, colour=", axisvar, "), size=4, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordispider1)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + geom_point(data=centroids.long(sites1, grouping=", axisvar, ", centroids.only=TRUE), aes(x=axis1c, y=axis2c, colour=Centroid, shape=Centroid), size=10, show.legend=FALSE) + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordispider2)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisurf1)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("axis.grid <- ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")) assign("axis.grid", justDoIt(paste("ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_contour_filled(data=axis.grid, aes(x=x, y=y, z=z)) + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, size=", axisvar, "), shape=21, colour='black', fill='red') + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels), colour='red', size=4) + BioR.theme + scale_fill_viridis_d() + scale_size(range=c(1, 10)) + labs(fill='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisurf2)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("axis.grid <- ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")) assign("axis.grid", justDoIt(paste("ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_contour(data=axis.grid, aes(x=x, y=y, z=z, colour=factor(after_stat(level))), size=2) + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, size=", axisvar, "), shape=21, colour='black', fill='red') + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels), colour='black', size=4) + BioR.theme + scale_colour_viridis_d() + scale_size(range=c(1, 10)) + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordiellipse)"){ logger(paste("factor.ellipses <- ordiellipse(plot1, groups=", axisvar, ", display='sites', kind='sd')", sep="")) assign("factor.ellipses", justDoIt(paste("ordiellipse(plot1, groups=", axisvar, ", display='sites', kind='sd')", sep="")), envir=.GlobalEnv) logger(paste("factor.ellipses.data <- ordiellipse.long(factor.ellipses, grouping.name='", axisvar, "')", sep="")) assign("factor.ellipses.data", justDoIt(paste("ordiellipse.long(factor.ellipses, grouping.name='", axisvar, "')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_polygon(data=factor.ellipses.data, aes(x=axis1, y=axis2, colour=", axisvar, ", fill=after_scale(alpha(colour, 0.2))), size=0.2, show.legend=FALSE) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + BioR.theme + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1) + labs(colour='", axisvar, "')", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (add species)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("spec.envfit <- envfit(plot1, env=", .communityDataSet, ")", sep="")) assign("spec.envfit", justDoIt(paste("envfit(plot1, env=", .communityDataSet, ")", sep="")), envir=.GlobalEnv) logger(paste("spec.data1 <- data.frame(r=spec.envfit$vectors$r, p=spec.envfit$vectors$pvals)", sep="")) assign("spec.data1", justDoIt(paste("data.frame(r=spec.envfit$vectors$r, p=spec.envfit$vectors$pvals)", sep="")), envir=.GlobalEnv) logger(paste("species1 <- species.long(plot1, spec.data=spec.data1)", sep="")) assign("species1", justDoIt(paste("species.long(plot1, spec.data=spec.data1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg2 <- plotgg1 + geom_segment(data=subset(species1, r > 0.6), aes(x=0, y=0, xend=axis1*2, yend=axis2*2), colour='black', size=1.2, arrow=arrow()) + geom_label_repel(data=subset(species1, r > 0.6), aes(x=axis1*2, y=axis2*2, label=labels), colour='black')", sep="")) doItAndPrint(paste("plotgg2")) } if (plottype == "ggplot (add vector)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("env.envfit <- envfit(plot1, env=", .activeDataSet, ")", sep="")) assign("env.envfit", justDoIt(paste("envfit(plot1, env=", .activeDataSet, ")", sep="")), envir=.GlobalEnv) logger(paste("vectors1 <- vectorfit.long(env.envfit)", sep="")) assign("vectors1", justDoIt(paste("vectorfit.long(env.envfit)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg2 <- plotgg1 + geom_segment(data=subset(vectors1, vector = ", axisvar, "), aes(x=0, y=0, xend=axis1*1.1, yend=axis2*1.1), colour='black', size=1.2, arrow=arrow()) + geom_label_repel(data=subset(vectors1, vector = ", axisvar, "), aes(x=axis1*1.1, y=axis2*1.1, label=vector), colour='black')", sep="")) doItAndPrint(paste("plotgg2")) } data <- tclvalue(dataVariable) =="1" if (data==T) { justDoIt(paste(.activeDataSet, "$", modelValue, ".ax1 <- scores(plot1, display='sites')[,1]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".ax1 <- scores(plot1, display='sites')[,1]", sep="")) justDoIt(paste(.activeDataSet, "$", modelValue, ".ax2 <- scores(plot1, display='sites')[,2]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".ax2 <- scores(plot1, display='sites')[,2]", sep="")) activeDataSet(.activeDataSet) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save model as: ", width=20), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(method1Frame, text="Ordination method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(method2Frame, text="Distance"), sticky="w") tkgrid(distBox, distScroll,sticky="w") tkgrid(tklabel(method3Frame, text="PCoA/NMS axes", width=15), NMSa, sticky="w") tkgrid(tklabel(method3Frame, text="NMS permutations", width=15), NMSperm, sticky="w") tkgrid(addspecCheckBox, tklabel(method3Frame, text="PCoa/NMS species", width=15), sticky="w") tkgrid(summaryCheckBox, tklabel(method4Frame, text="model summary"), sticky="w") tkgrid(tklabel(method4Frame, text="scaling", width=10), scale, sticky="w") tkgrid(treatasdistCheckBox, tklabel(method4Frame, text="as.dist(Community)", width=15), sticky="w") tkgrid(method1Frame, tklabel(methodFrame, text="", width=1), method2Frame, sticky="w") tkgrid(method3Frame, tklabel(methodFrame, text="", width=1), method4Frame, sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(tklabel(plot1Frame, text="Plot method"), sticky="w") tkgrid(typeBox, typeScroll, sticky="nw") tkgrid(tklabel(plot2Frame, text="Plot variable"), sticky="w") tkgrid(axisBox, axisScroll, sticky="nw") tkgrid(tklabel(plot3Frame, text="axes", width=10), choice, sticky="w") tkgrid(dataCheckBox, tklabel(plot3Frame, text="add scores to dataframe"), sticky="w") tkgrid(tklabel(plot4Frame, text="cex", width=10), cexa, sticky="w") tkgrid(tklabel(plot4Frame, text="colour", width=10), cola, sticky="w") tkgrid(plot1Frame, tklabel(plotFrame, text="", width=1), plot2Frame, sticky="w") tkgrid(plot3Frame, tklabel(plotFrame, text="", width=1), plot4Frame, sticky="w") tkgrid(plotFrame, sticky="w") tkgrid(OKbutton, plotButton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(typeScroll, sticky="ns") tkgrid.configure(axisScroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(distScroll, sticky="ns") tkselection.set(typeBox, 0) tkselection.set(methodBox, 0) tkselection.set(axisBox, 0) tkselection.set(distBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } conordiGUI <- function(){ contrasts <- c("contr.treatment", "contr.poly") checkAddOperator <- function(rhs){ rhs.chars <- rev(strsplit(rhs, "")[[1]]) if (length(rhs.chars) < 1) return(FALSE) check.char <- if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] !is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%")) } top <- tktoplevel() tkwm.title(top, "Constrained ordination") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) .cvariables <- CVariables() cvariables <- paste(.cvariables) modelName <- tclVar("Ordination.model1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) methodFrame <- tkframe(top, relief="groove", borderwidth=2) method1Frame <- tkframe(methodFrame) method2Frame <- tkframe(methodFrame) method3Frame <- tkframe(methodFrame) method4Frame <- tkframe(methodFrame) methodBox <- tklistbox(method1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(method1Frame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("RDA", "CCA", "capscale", "capscale (lingoes)", "capscale (cailliez)", "dbrda", "dbrda (lingoes)", "dbrda (cailliez)", "CAPdiscrim", "prc", "multiconstrained (RDA)", "multiconstrained (CCA)", "multiconstrained (capscale)", "multiconstrained (capscale add)", "multiconstrained (dbrda)") for (x in methods) tkinsert(methodBox, "end", x) distBox <- tklistbox(method2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") distScroll <- tkscrollbar(method2Frame, repeatinterval=5, command=function(...) tkyview(distBox, ...)) tkconfigure(distBox, yscrollcommand=function(...) tkset(distScroll, ...)) distances <- c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger", "aitchison", "robust.aitchison") for (x in distances) tkinsert(distBox, "end", x) summaryVariable <- tclVar("1") summaryCheckBox <- tkcheckbutton(method3Frame, variable=summaryVariable) treatasdistVariable <- tclVar("0") treatasdistCheckBox <- tkcheckbutton(method3Frame, variable=treatasdistVariable) scalingVariable <- tclVar("species") scale <- tkentry(method4Frame, width=10, textvariable=scalingVariable) permVariable <- tclVar("999") permutation <- tkentry(method4Frame, width=10, textvariable=permVariable) xFrame <- tkframe(top, relief="groove", borderwidth=2) x1Frame <- tkframe(xFrame) x4Frame <- tkframe(xFrame) x2Frame <- tkframe(x4Frame) x3Frame <- tkframe(x4Frame) xBox <- tklistbox(x2Frame, width=28, height=5, selectmode="single", background="white", exportselection="FALSE") xScroll <- tkscrollbar(x2Frame, repeatinterval=5, command=function(...) tkyview(xBox, ...)) tkconfigure(xBox, yscrollcommand=function(...) tkset(xScroll, ...)) for (x in variables) tkinsert(xBox, "end", x) rhsVariable <- tclVar("") rhsEntry <- tkentry(x1Frame, width=60, textvariable=rhsVariable) plotFrame <- tkframe(top, relief="groove", borderwidth=2) plot1Frame <- tkframe(plotFrame) plot2Frame <- tkframe(plotFrame) plot3Frame <- tkframe(plotFrame) plot4Frame <- tkframe(plotFrame) typeBox <- tklistbox(plot1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") typeScroll <- tkscrollbar(plot1Frame, repeatinterval=5, command=function(...) tkyview(typeBox, ...)) tkconfigure(typeBox, yscrollcommand=function(...) tkset(typeScroll, ...)) types <- c("plot", "ordiplot", "ordiplot3d", "ordirgl", "ordiplot empty", "origin axes", "identify sites", "identify species", "identify centroids", "text sites", "text species", "text centroids", "points sites", "points species", "points centroids", "label sites", "label species", "label centroids", "orditorp sites", "orditorp species", "orditorp centroids", "envfit", "ordihull (factor)", "ordihull (factor, rainbow)", "ordihull (factor, polygon)", "ordiarrows (factor)", "ordiarrows (factor, rainbow)", "ordisegments (factor)", "ordisegments (factor, rainbow)", "ordispider (factor)", "ordispider (factor, rainbow)", "ordibar (factor)", "ordibar (factor, rainbow)", "ordiellipse (factor)", "ordiellipse (factor, rainbow)", "ordiellipse (factor, ehull)", "ordiellipse (factor, polygon)", "ordisurf (continuous)", "ordibubble (continuous)", "ordisymbol (factor)", "ordisymbol (factor, legend)", "ordisymbol (factor, large)", "ordivector (species)", "ordivector interpretation", "ordicluster", "ordicluster2", "ordinearest", "ordispantree", "ordiresids", "distance displayed", "coenocline", "screeplot", "stressplot", "orditkplot sites", "orditkplot species", "orditkplot pointlabel", "orglspider (factor)", "orglellipse (factor)", "ordiplot", "ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (ordiellipse)", "ggplot (add species)", "ggplot (add vector)") for (x in types) tkinsert(typeBox, "end", x) choicesVariable <- tclVar("1,2") choice <- tkentry(plot3Frame, width=10, textvariable=choicesVariable) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(plot3Frame, variable=dataVariable) axisBox <- tklistbox(plot2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") axisScroll <- tkscrollbar(plot2Frame, repeatinterval=5, command=function(...) tkyview(axisBox, ...)) tkconfigure(axisBox, yscrollcommand=function(...) tkset(axisScroll, ...)) for (x in variables) tkinsert(axisBox, "end", x) cexVariable <- tclVar("1") cexa <- tkentry(plot4Frame, width=10, textvariable=cexVariable) colVariable <- tclVar("blue") cola <- tkentry(plot4Frame, width=10, textvariable=colVariable) onDoubleClick <- function(){ var <- as.character(tkget(xBox, "active"))[1] tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) rhs.chars <- rev(strsplit(rhs, "")[[1]]) check.char <- if (length(rhs.chars) > 0){ if ((rhs.chars[1] != " ") || (length(rhs.chars) == 1)) rhs.chars[1] else rhs.chars[2] } else "" tclvalue(rhsVariable) <- if (rhs == "" || is.element(check.char, c("+", "*", ":", "/", "-", "^", "(", "%"))) paste(rhs, var, sep="") else paste(rhs, "+", var) tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onPlus <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "+ ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onTimes <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "*", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onColon <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ":", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onSlash <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "/", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onIn <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "%in% ") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onMinus <- function(){ rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "+Condition(") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onPower <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, "^", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onLeftParen <- function(){ tkfocus(rhsEntry) rhs <- tclvalue(rhsVariable) tclvalue(rhsVariable) <- paste(rhs, "(", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onRightParen <- function(){ rhs <- tclvalue(rhsVariable) if (!checkAddOperator(rhs)) return() tclvalue(rhsVariable) <- paste(rhs, ")", sep="") tkicursor(rhsEntry, "end") tkxview.moveto(rhsEntry, "1") } onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) method <- methods[as.numeric(tkcurselection(methodBox))+1] perm <- as.numeric(tclvalue(permVariable)) dist <- distances[as.numeric(tkcurselection(distBox))+1] treatasdist <- tclvalue(treatasdistVariable)==1 check.empty <- gsub(" ", "", tclvalue(rhsVariable)) if ("" == check.empty) { tkmessageBox(message="Right-hand side of model empty.", icon="error", type="ok") } formula <- paste(.communityDataSet, tclvalue(rhsVariable), sep=" ~ ") if (method=="RDA") { command <- paste("rda(", formula, ", data=", .activeDataSet, ")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'euclidean')", sep="")) } if (method=="CCA") { command <- paste("cca(", formula, ", data=", .activeDataSet, ")", sep="") } if (method=="capscale") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("capscale(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add=F)", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="capscale (lingoes)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("capscale(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add='lingoes')", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="capscale (cailliez)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("capscale(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add='cailliez')", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="dbrda") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("dbrda(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add=F)", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="dbrda (lingoes)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("dbrda(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add='lingoes')", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="dbrda (cailliez)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("dbrda(", formula, ", data=", .activeDataSet, ", distance='", dist, "', sqrt.dist=F, add='cailliez')", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } doItAndPrint(paste("adonis2(", formula, ", data=" , .activeDataSet, ", by='terms', method='", dist, "', permutations=", perm, ")", sep="")) } if (method=="prc") { command <- paste("prc(", .communityDataSet, ", " ,tclvalue(rhsVariable), ")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (method=="CAPdiscrim") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("CAPdiscrim(", formula, ", ", .activeDataSet, ", dist='", dist, "', permutations=", perm,")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (method=="multiconstrained (RDA)") { command <- paste("multiconstrained(method='rda',", formula, ", ", .activeDataSet, ", contrast=0, step=", perm, ")", sep="") doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } if (method=="multiconstrained (CCA)") { command <- paste("multiconstrained(method='cca',", formula, ", ", .activeDataSet, ", contrast=0, step=", perm, ")", sep="") } if (method=="multiconstrained (capscale)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("multiconstrained(method='capscale',", formula, ", ", .activeDataSet, ",dist='", dist, "', add=F, contrast=0, step=", perm, ")", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } } if (method=="multiconstrained (capscale add)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("multiconstrained(method='capscale',", formula, ", ", .activeDataSet, ",dist='", dist, "', add=T, contrast=0, step=", perm, ")", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } } if (method=="multiconstrained (dbrda)") { if(treatasdist==T){ logger(paste(.communityDataSet, " <- as.dist(", .communityDataSet, ")", sep="")) assign(.communityDataSet, justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } command <- paste("multiconstrained(method='dbrda',", formula, ", ", .activeDataSet, ",dist='", dist, "', add=F, contrast=0, step=", perm, ")", sep="") if(treatasdist==F){ doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) } } modelValue <- tclvalue(modelName) # if (!is.valid.name(modelValue)){ # tkmessageBox(message=paste('"', modelValue, '" is not a valid name.', # sep=""), icon="error", type="ok") # } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) if (method == "RDA" || method == "CCA" || method == "capscale" || method == "capscale (lingoes)" || method == "capscale (cailliez)" || method == "dbrda" || method == "dbrda (lingoes)" || method == "dbrda (cailliez)") { doItAndPrint(paste("check.ordiscores(", .communityDataSet, ", ", modelValue, ", check.species=T)", sep="")) } if (method == "CAPdiscrim") { doItAndPrint(paste("check.ordiscores(", .communityDataSet, ", ", modelValue, ", check.species=F)", sep="")) } sum <- tclvalue(summaryVariable) == "1" scaling <- tclvalue(scalingVariable) if (sum==T) { if (method %in% c("CAPdiscrim", "multiconstrained (RDA)", "multiconstrained (CCA)", "multiconstrained (capscale)", "multiconstrained (capscale add)", "multiconstrained (dbrda)")) { doItAndPrint(paste(modelValue, sep="")) }else{ doItAndPrint(paste("summary(", modelValue, ", scaling='", scaling, "')", sep="")) } if (method=="RDA" || method=="CCA" || method=="capscale" || method=="capscale (lingoes)" || method=="capscale (cailliez)" || method=="dbrda" || method=="dbrda (lingoes)" || method=="dbrda (cailliez)") { doItAndPrint(paste("eigenvals(", modelValue, ")", sep="")) doItAndPrint(paste("RsquareAdj(", modelValue, ")", sep="")) doItAndPrint(paste("deviance(", modelValue, ")", sep="")) doItAndPrint(paste("vif.cca(", modelValue, ")", sep="")) if (method=="RDA" || method=="CCA") { doItAndPrint(paste("goodness(", modelValue, ", display='sites', model='CCA')", sep="")) doItAndPrint(paste("inertcomp(", modelValue, ", display='sites', proportional=T)", sep="")) } } if (perm>0 && method !="CAPdiscrim" && method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)") { doItAndPrint(paste("permutest(", modelValue, ", permutations=", perm, ")", sep="")) doItAndPrint(paste("permutest(", modelValue, ", permutations=", perm, ", first=T)", sep="")) if (method !="prc") {doItAndPrint(paste("anova.cca(", modelValue, ", step=", perm, ", by='terms')", sep=""))} if (method !="prc") {doItAndPrint(paste("anova.cca(", modelValue, ", step=", perm, ", by='margin')", sep=""))} if (method !="prc") {doItAndPrint(paste("anova.cca(", modelValue, ", step=", perm, ", by='onedf')", sep=""))} } } if(treatasdist==T && method!="RDA" && method!="CCA" && method!="prc" && method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)"){ logger(paste(.communityDataSet, " <- data.frame(as.matrix(", .communityDataSet, "))", sep="")) assign(.communityDataSet, justDoIt(paste("data.frame(as.matrix(",.communityDataSet, "))", sep="")), envir=.GlobalEnv) } } onPlot <- function(){ method <- methods[as.numeric(tkcurselection(methodBox))+1] modelValue <- tclvalue(modelName) perm <- as.numeric(tclvalue(permVariable)) plottype <- types[as.numeric(tkcurselection(typeBox))+1] scaling <- tclvalue(scalingVariable) choices <- tclvalue(choicesVariable) dist <- distances[as.numeric(tkcurselection(distBox))+1] cex <- tclvalue(cexVariable) col <- tclvalue(colVariable) treatasdist <- tclvalue(treatasdistVariable)==1 if (plottype == "plot" && method != "CAPdiscrim" && method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- plot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) } if (plottype == "ordiplot"){ if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method == "CAPdiscrim") { justDoIt(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "))", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "))", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "')", sep="")) } } } if (plottype == "ordiplot3d"){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method == "CAPdiscrim") { justDoIt(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3))", sep="")) logger(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3))", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot3d(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) } } } if (plottype == "ordirgl"){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method == "CAPdiscrim") { justDoIt(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3))", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3))", sep="")) }else{ justDoIt(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", choices=c(1, 2, 3), scaling='", scaling, "')", sep="")) } } } if (plottype == "ordiplot empty"){ if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method == "CAPdiscrim") { justDoIt(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "))", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", type='none', choices=c(", choices, "))", sep="")) }else{ justDoIt(paste("plot1 <- ordiplot(", modelValue, ", type='none',choices=c(", choices, "), scaling='", scaling, "')", sep="")) logger(paste("plot1 <- ordiplot(", modelValue, ", type='none',choices=c(", choices, "), scaling='", scaling, "')", sep="")) } } } if (plottype == "identify sites"){ doItAndPrint(paste("identify(plot1, 'sites', col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "identify species"){ doItAndPrint(paste("identify(plot1, 'species', col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "identify centroids"){ doItAndPrint(paste("identify(plot1, 'centroids', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "text sites"){ doItAndPrint(paste("text(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "text species"){ doItAndPrint(paste("text(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "text centroids"){ doItAndPrint(paste("text(plot1, 'centroids', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "points sites"){ doItAndPrint(paste("points(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "points species"){ doItAndPrint(paste("points(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "points centroids"){ doItAndPrint(paste("points(plot1, 'centroids', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "label sites"){ doItAndPrint(paste("ordilabel(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "label species"){ doItAndPrint(paste("ordilabel(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "label centroids"){ doItAndPrint(paste("ordilabel(plot1, 'centroids', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "orditorp sites"){ doItAndPrint(paste("orditorp(plot1, 'sites', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "orditorp species"){ doItAndPrint(paste("orditorp(plot1, 'species', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "orditorp centroids"){ doItAndPrint(paste("orditorp(plot1, 'centroids', col='", col,"', cex=", cex, ")",sep="")) } if (plottype == "origin axes"){ doItAndPrint(paste("abline(h = 0, lty = 3)", sep="")) doItAndPrint(paste("abline(v = 0, lty = 3)", sep="")) } if (plottype == "ordiresids"){ if (method != "CAPdiscrim" && method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("ordiresids(", modelValue, ", kind='residuals')", sep="")) logger(paste("ordiresids(", modelValue, ", kind='residuals')", sep="")) } } if (plottype == "orditkplot sites"){ if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method != "CAPdiscrim") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='sites', choices=c(", choices, ")))", sep="")) } } } if (plottype == "orditkplot species"){ if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method != "CAPdiscrim") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", display='species', choices=c(", choices, ")))", sep="")) } } } if (plottype == "orditkplot pointlabel"){ if (method!="multiconstrained (RDA)" && method!="multiconstrained (CCA)" && method!="multiconstrained (capscale)" && method!="multiconstrained (capscale add)" && method!="multiconstrained (dbrda)"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (method != "CAPdiscrim") { justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "'))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, "), scaling='", scaling, "'))", sep="")) }else{ justDoIt(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, ")))", sep="")) logger(paste("plot1 <- orditkplot(ordipointlabel(", modelValue, ", choices=c(", choices, ")))", sep="")) } } } axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", axisvar, ")", sep="")), envir=.GlobalEnv) if (plottype %in% c("envfit", "ordihull (factor)", "ordihull (factor, rainbow)", "ordihull (factor, polygon)", "ordiarrows (factor)", "ordiarrows (factor, rainbow)", "ordisegments (factor)", "ordisegments (factor, rainbow)", "ordispider (factor)", "ordispider (factor, rainbow)", "ordibar (factor)", "ordibar (factor, rainbow)", "ordiellipse (factor)", "ordiellipse (factor, rainbow)", "ordiellipse (factor, ehull)", "ordiellipse (factor, polygon)", "ordisurf (continuous)", "ordibubble (continuous)", "ordisymbol (factor)", "ordisymbol (factor, legend)", "ordisymbol (factor, large)", "ordivector (species)", "ordivector interpretation", "ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (ordiellipse)", "ggplot (add species)", "ggplot (add vector)")){ justDoIt(paste("attach(", .activeDataSet, ", warn.conflicts=F)", sep="")) logger(paste("attach(", .activeDataSet, ", warn.conflicts=F)",sep="")) } if (plottype == "envfit"){ doItAndPrint(paste("fitted <- envfit(plot1, data.frame(", axisvar, "), permutations=", perm, ")", sep="")) doItAndPrint(paste("fitted", sep="")) doItAndPrint(paste("plot(fitted, col='", col,"', cex=", cex, ")", sep="")) } if (plottype == "ordihull (factor)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='lines', col='", col, "')", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordihull (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='lines', label=F, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordihull (factor, polygon)" && varfactor==T){ doItAndPrint(paste("ordihull(plot1, groups=", axisvar, ", draw='polygon', alpha=127, label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordihull(plot1, groups=", axisvar, "))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='hull', permutations=", perm, ")", sep="")) } if (plottype == "ordiarrows (factor)" && varfactor==T){ doItAndPrint(paste("ordiarrows(plot1,", axisvar, ", col='", col, "')", sep="")) } if (plottype == "ordiarrows (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordiarrows(plot1,", axisvar, ", col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordisegments (factor)" && varfactor==T){ doItAndPrint(paste("ordisegments(plot1,", axisvar, ", col='", col, "')", sep="")) } if (plottype == "ordisegments (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordisegments(plot1,", axisvar, ", col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordispider (factor)" && varfactor==T){ doItAndPrint(paste("ordispider(plot1, groups=", axisvar, ", spiders='centroid', col='", col, "')", sep="")) } if (plottype == "ordispider (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordispider(plot1, groups=", axisvar, ", spiders='centroid', label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordibar (factor)" && varfactor==T){ doItAndPrint(paste("ordibar(plot1, groups=", axisvar, ", conf=0.9, kind='se', col='", col, "')", sep="")) } if (plottype == "ordibar (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordibar(plot1, groups=", axisvar, ", conf=0.9, kind='se', label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) } if (plottype == "ordiellipse (factor)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se', draw='lines', col='", col, "')", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='se', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, rainbow)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se', draw='lines', label=T, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='se'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='se', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, ehull)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull', draw='lines', label=T, lwd=3, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50), border=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='ehull', permutations=", perm, ")", sep="")) } if (plottype == "ordiellipse (factor, polygon)" && varfactor==T){ doItAndPrint(paste("ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull', draw='polygon', alpha=127, label=T, col=colorspace::rainbow_hcl(length(levels(", axisvar, ")), c=90, l=50))", sep="")) doItAndPrint(paste("summary(ordiellipse(plot1, groups=", axisvar, ", conf=0.9, kind='ehull'))", sep="")) doItAndPrint(paste("ordiareatest(plot1, groups=", axisvar, ", area='ellipse', kind='ehull', permutations=", perm, ")", sep="")) } if (plottype == "ordisurf (continuous)" && varfactor==F){ doItAndPrint(paste("ordisurf(plot1, y=", axisvar, ", add=T, col='", col, "')", sep="")) } if (plottype == "ordibubble (continuous)" && varfactor==F){ doItAndPrint(paste("ordibubble(plot1, var=", axisvar, ", fg='", col, "')", sep="")) } if (plottype == "ordisymbol (factor)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', rainbow_hcl=T, legend=F, cex=", cex, ")", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', rainbow_hcl=T, legend=F, cex=", cex, ")", sep="")) } if (plottype == "ordisymbol (factor, legend)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', rainbow_hcl=T, legend=T, legend.x='topleft', legend.ncol=1, cex=", cex, ")", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', rainbow_hcl=T, legend=T, legend.x='topleft', legend.ncol=1, cex=", cex, ")", sep="")) } if (plottype == "ordisymbol (factor, large)" && varfactor==T){ justDoIt(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=4, lwd=2)", sep="")) logger(paste("ordisymbol(plot1, y=", .activeDataSet, ", factor='", axisvar, "', legend=F, legend.x='topleft', legend.ncol=1, rainbow_hcl=T, cex=4, lwd=2)", sep="")) } if (plottype == "orglspider (factor)" && varfactor==T){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) logger(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglspider(", modelValue, ", groups=", axisvar, ", col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) logger(paste("with(", .activeDataSet, ", orglspider(", modelValue, ", groups=", axisvar, ", col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) } if (plottype == "orglellipse (factor)" && varfactor==T){ justDoIt(paste("library(vegan3d)", sep="")) logger(paste("library(vegan3d)", sep="")) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) logger(paste("plot1 <- ordirgl(", modelValue, ", ax.col='darkgrey', type='n', envfit=NA)", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) logger(paste("with(", .activeDataSet, ", orglpoints(", modelValue, ", col=as.numeric(", axisvar, ")))", sep="")) justDoIt(paste("with(", .activeDataSet, ", orglellipse(", modelValue, ", groups=", axisvar, ", kind='ehull', col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) logger(paste("with(", .activeDataSet, ", orglellipse(", modelValue, ", groups=", axisvar, ", kind='ehull', col=c(1:max(as.numeric(", axisvar, ")))))", sep="")) } if (plottype == "ordivector (species)"){ realspecies <- eval(parse(text=paste("any(colnames(", .communityDataSet, ")=='", axisvar, "')", sep="")), envir=.GlobalEnv) if (realspecies == T) { doItAndPrint(paste("ordivector(plot1, '", axisvar, "', lty=0, angle=5, length=0.5)", sep="")) } } if (plottype == "ordivector interpretation"){ realspecies <- eval(parse(text=paste("any(colnames(", .communityDataSet, ")=='", axisvar, "')", sep="")), envir=.GlobalEnv) if (realspecies == T) { doItAndPrint(paste("ordivector(plot1, '", axisvar, "', lty=2)", sep="")) } } if (plottype == "ordicluster"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ",method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ",method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } logger(paste("cluster <- hclust(distmatrix, method='single')", sep="")) assign("cluster", justDoIt(paste("hclust(distmatrix, method='single')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("ordicluster(plot1, cluster, prune=1, col='", col, "')", sep="")) } if (plottype == "ordicluster2"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ",method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(", .communityDataSet, ",method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(", .communityDataSet, ")", sep="")), envir=.GlobalEnv) } logger(paste("cluster <- hclust(distmatrix, method='single')", sep="")) assign("cluster", justDoIt(paste("hclust(distmatrix, method='single')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("ordicluster2(plot1, cluster, mingroups=1, col='", col, "')", sep="")) } if (plottype == "ordinearest"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("ordinearest(plot1, distmatrix,col='", col, "')", sep="")) } if (plottype == "ordispantree"){ if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("lines(spantree(distmatrix,toolong=0),plot1,col='", col, "')", sep="")) } if (plottype == "distance displayed"){ if(treatasdist==F){ doItAndPrint(paste("distdisplayed(", .communityDataSet ,",plot1, distx='", dist, "',plotit=T)", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) doItAndPrint(paste("distdisplayed(distmatrix, plot1, plotit=T)", sep="")) } } if (plottype == "coenocline"){ doItAndPrint(paste("ordicoeno(", .communityDataSet, ", ordiplot=plot1, axis=1, legend=T, cex=0.8, ncol=4)", sep="")) } if (plottype == "screeplot"){ justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) justDoIt(paste("plot1 <- screeplot(", modelValue, ")", sep="")) logger(paste("plot1 <- screeplot(", modelValue, ")", sep="")) } if (plottype == "stressplot"){ doItAndPrint(paste("stressplot(", modelValue ,")", sep="")) } if (plottype %in% c("ggplot (ordisymbol1)", "ggplot (ordisymbol2)", "ggplot (ordispider1)", "ggplot (ordispider2)", "ggplot (ordisurf1)", "ggplot (ordisurf2)", "ggplot (ordiellipse)")){ logger(paste(" ")) logger(paste("Note that ggplot options use the 'ordiplot' plot named 'plot1'")) logger(paste("More examples are available from the documentation for 'sites.long'")) logger(paste(" ")) justDoIt(paste("library(ggplot2)", sep="")) logger(paste("library(ggplot2)", sep="")) doItAndPrint("BioR.theme <- theme(panel.background = element_blank(), panel.border = element_blank(), panel.grid = element_blank(), axis.line = element_line('gray25'), text = element_text(size = 12), axis.text = element_text(size = 10, colour = 'gray25'), axis.title = element_text(size = 14, colour = 'gray25'), legend.title = element_text(size = 14), legend.text = element_text(size = 14), legend.key = element_blank() )") logger(paste(" ")) logger(paste("sites1 <- sites.long(plot1, env.data=", .activeDataSet, ")", sep="")) assign("sites1", justDoIt(paste("sites.long(plot1, env.data=", .activeDataSet, ")", sep="")), envir=.GlobalEnv) if (method %in% c("CAPdiscrim")) { logger(paste("axislabs <- axis.long(", modelValue, ", choices=c(", choices, "), CAPdiscrim.model=T)", sep="")) assign("axislabs", justDoIt(paste("axis.long(", modelValue, ", choices=c(", choices, "), CAPdiscrim.model=T)", sep="")), envir=.GlobalEnv) }else{ logger(paste("axislabs <- axis.long(", modelValue, ", choices=c(", choices, "))", sep="")) assign("axislabs", justDoIt(paste("axis.long(", modelValue, ", choices=c(", choices, "))", sep="")), envir=.GlobalEnv) } } if (plottype == "ggplot (ordisymbol1)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, shape=", axisvar, ", colour=", axisvar, "), size=5) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisymbol2)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, shape=", axisvar, ", colour=", axisvar, "), size=5) + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels, colour=", axisvar, "), size=4, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordispider1)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + geom_point(data=centroids.long(sites1, grouping=", axisvar, ", centroids.only=TRUE), aes(x=axis1c, y=axis2c, colour=Centroid, shape=Centroid), size=10, show.legend=FALSE) + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordispider2)"){ doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + BioR.theme + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisurf1)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("axis.grid <- ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")) assign("axis.grid", justDoIt(paste("ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_contour_filled(data=axis.grid, aes(x=x, y=y, z=z)) + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, size=", axisvar, "), shape=21, colour='black', fill='red') + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels), colour='red', size=4) + BioR.theme + scale_fill_viridis_d() + scale_size(range=c(1, 10)) + labs(fill='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordisurf2)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("axis.grid <- ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")) assign("axis.grid", justDoIt(paste("ordisurfgrid.long(ordisurf(plot1, y=", axisvar, "))", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_contour(data=axis.grid, aes(x=x, y=y, z=z, colour=factor(after_stat(level))), size=2) + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_point(data=sites1, aes(x=axis1, y=axis2, size=", axisvar, "), shape=21, colour='black', fill='red') + geom_label_repel(data=sites1, aes(x=axis1, y=axis2, label=labels), colour='black', size=4) + BioR.theme + scale_colour_viridis_d() + scale_size(range=c(1, 10)) + labs(colour='", axisvar, "') + coord_fixed(ratio=1)", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (ordiellipse)"){ logger(paste("factor.ellipses <- ordiellipse(plot1, groups=", axisvar, ", display='sites', kind='sd')", sep="")) assign("factor.ellipses", justDoIt(paste("ordiellipse(plot1, groups=", axisvar, ", display='sites', kind='sd')", sep="")), envir=.GlobalEnv) logger(paste("factor.ellipses.data <- ordiellipse.long(factor.ellipses, grouping.name='", axisvar, "')", sep="")) assign("factor.ellipses.data", justDoIt(paste("ordiellipse.long(factor.ellipses, grouping.name='", axisvar, "')", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg1 <- ggplot() + geom_vline(xintercept = c(0), color = 'grey70', linetype = 2) + geom_hline(yintercept = c(0), color = 'grey70', linetype = 2) + xlab(axislabs[1, 'label']) + ylab(axislabs[2, 'label']) + scale_x_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + scale_y_continuous(sec.axis = dup_axis(labels=NULL, name=NULL)) + geom_polygon(data=factor.ellipses.data, aes(x=axis1, y=axis2, colour=", axisvar, ", fill=after_scale(alpha(colour, 0.2))), size=0.2, show.legend=FALSE) + geom_point(data=sites1, aes(x=axis1, y=axis2, colour=", axisvar, ", shape=", axisvar, "), size=5) + BioR.theme + geom_segment(data=centroids.long(sites1, grouping=", axisvar, "), aes(x=axis1c, y=axis2c, xend=axis1, yend=axis2, colour=", axisvar, "), size=1, show.legend=FALSE) + scale_color_brewer(palette = 'Set1') + coord_fixed(ratio=1) + labs(colour='", axisvar, "')", sep="")) doItAndPrint(paste("plotgg1")) } if (plottype == "ggplot (add species)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("spec.envfit <- envfit(plot1, env=", .communityDataSet, ")", sep="")) assign("spec.envfit", justDoIt(paste("envfit(plot1, env=", .communityDataSet, ")", sep="")), envir=.GlobalEnv) logger(paste("spec.data1 <- data.frame(r=spec.envfit$vectors$r, p=spec.envfit$vectors$pvals)", sep="")) assign("spec.data1", justDoIt(paste("data.frame(r=spec.envfit$vectors$r, p=spec.envfit$vectors$pvals)", sep="")), envir=.GlobalEnv) logger(paste("species1 <- species.long(plot1, spec.data=spec.data1)", sep="")) assign("species1", justDoIt(paste("species.long(plot1, spec.data=spec.data1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg2 <- plotgg1 + geom_segment(data=subset(species1, r > 0.6), aes(x=0, y=0, xend=axis1*2, yend=axis2*2), colour='black', size=1.2, arrow=arrow()) + geom_label_repel(data=subset(species1, r > 0.6), aes(x=axis1*2, y=axis2*2, label=labels), colour='black')", sep="")) doItAndPrint(paste("plotgg2")) } if (plottype == "ggplot (add vector)"){ justDoIt(paste("library(ggrepel)", sep="")) logger(paste("library(ggrepel)", sep="")) logger(paste("env.envfit <- envfit(plot1, env=", .activeDataSet, ")", sep="")) assign("env.envfit", justDoIt(paste("envfit(plot1, env=", .activeDataSet, ")", sep="")), envir=.GlobalEnv) logger(paste("vectors1 <- vectorfit.long(env.envfit)", sep="")) assign("vectors1", justDoIt(paste("vectorfit.long(env.envfit)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plotgg2 <- plotgg1 + geom_segment(data=subset(vectors1, vector = ", axisvar, "), aes(x=0, y=0, xend=axis1*1.1, yend=axis2*1.1), colour='black', size=1.2, arrow=arrow()) + geom_label_repel(data=subset(vectors1, vector = ", axisvar, "), aes(x=axis1*1.1, y=axis2*1.1, label=vector), colour='black')", sep="")) doItAndPrint(paste("plotgg2")) } data <- tclvalue(dataVariable) =="1" if (data==T) { justDoIt(paste(.activeDataSet, "$", modelValue, ".ax1 <- scores(plot1,display='sites')[,1]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".ax1 <- scores(plot1,display='sites')[,1]", sep="")) justDoIt(paste(.activeDataSet, "$", modelValue, ".ax2 <- scores(plot1,display='sites')[,2]", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".ax2 <- scores(plot1,display='sites')[,2]", sep="")) activeDataSet(.activeDataSet) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } .operatorFont <- Rcmdr::getRcmdr("operatorFont") plusButton <- tkbutton(x3Frame, text="+", width="3", command=onPlus, font=.operatorFont) timesButton <- tkbutton(x3Frame, text="*", width="3", command=onTimes, font=.operatorFont) colonButton <- tkbutton(x3Frame, text=":", width="3", command=onColon, font=.operatorFont) slashButton <- tkbutton(x3Frame, text="/", width="3", command=onSlash, font=.operatorFont) inButton <- tkbutton(xFrame, text="%in%", width="3", command=onIn, font=.operatorFont) minusButton <- tkbutton(x3Frame, text="Cond", width="3", command=onMinus, font=.operatorFont) powerButton <- tkbutton(x3Frame, text="^", width="3", command=onPower, font=.operatorFont) leftParenButton <- tkbutton(x3Frame, text="(", width="3", command=onLeftParen, font=.operatorFont) rightParenButton <- tkbutton(x3Frame, text=")", width="3", command=onRightParen, font=.operatorFont) buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save model as: ", width=20), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(method1Frame, text="Ordination method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(method2Frame, text="Distance"), sticky="w") tkgrid(distBox, distScroll,sticky="w") tkgrid(summaryCheckBox, tklabel(method3Frame, text="model summary"), sticky="w") tkgrid(treatasdistCheckBox, tklabel(method3Frame, text="as.dist(Community)", width=15), sticky="w") tkgrid(tklabel(method4Frame, text="scaling", width=10), scale, sticky="w") tkgrid(tklabel(method4Frame, text="permutations", width=10), permutation, sticky="w") tkgrid(method1Frame, tklabel(methodFrame, text="", width=1), method2Frame, sticky="w") tkgrid(method3Frame, tklabel(methodFrame, text="", width=1), method4Frame, sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(rhsEntry, sticky="w") tkgrid(xBox, xScroll,sticky="w") tkgrid(plusButton, timesButton, colonButton, slashButton, inButton, sticky="w") tkgrid(minusButton,powerButton, leftParenButton, rightParenButton, sticky="w") tkgrid(tklabel(xFrame, text="Explanatory"), sticky="w") tkgrid(x1Frame, sticky="w") tkgrid(x2Frame, tklabel(xFrame, text="", width=1), x3Frame, sticky="w") tkgrid(x4Frame, sticky="w") tkgrid(xFrame, sticky="w") tkgrid(tklabel(plot1Frame, text="Plot method"), sticky="w") tkgrid(typeBox, typeScroll, sticky="nw") tkgrid(tklabel(plot2Frame, text="Plot variable"), sticky="w") tkgrid(axisBox, axisScroll, sticky="nw") tkgrid(tklabel(plot3Frame, text="axes", width=10), choice, sticky="w") tkgrid(dataCheckBox, tklabel(plot3Frame, text="add scores to dataframe"), sticky="w") tkgrid(tklabel(plot4Frame, text="cex", width=10), cexa, sticky="w") tkgrid(tklabel(plot4Frame, text="colour", width=10), cola, sticky="w") tkgrid(plot1Frame, tklabel(plotFrame, text="", width=1), plot2Frame, sticky="w") tkgrid(plot3Frame, tklabel(plotFrame, text="", width=1), plot4Frame, sticky="w") tkgrid(plotFrame, sticky="w") tkgrid(OKbutton, plotButton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(xScroll, sticky="ns") tkgrid.configure(typeScroll, sticky="ns") tkgrid.configure(axisScroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(distScroll, sticky="ns") tkselection.set(xBox, 0) tkselection.set(typeBox, 0) tkselection.set(distBox, 0) tkselection.set(methodBox, 0) tkselection.set(axisBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkbind(xBox, "<Double-ButtonPress-1>", onDoubleClick) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } clusterGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Cluster analysis") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) modelName <- tclVar("Cluster.1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) methodFrame <- tkframe(top, relief="groove", borderwidth=2) method1Frame <- tkframe(methodFrame) method2Frame <- tkframe(methodFrame) method3Frame <- tkframe(methodFrame) method4Frame <- tkframe(methodFrame) methodBox <- tklistbox(method1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(method1Frame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("hclust", "hclust (fastcluster)", "agnes", "diana", "kmeans", "cmeans", "kkmeans", "specc", "cascadeKM", "pam", "clara", "fanny", "NbClust (not kmeans)", "NbClust (kmeans)") for (x in methods) tkinsert(methodBox, "end", x) distBox <- tklistbox(method2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") distScroll <- tkscrollbar(method2Frame, repeatinterval=5, command=function(...) tkyview(distBox, ...)) tkconfigure(distBox, yscrollcommand=function(...) tkset(distScroll, ...)) distances <- c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger", "aitchison", "robust.aitchison") for (x in distances) tkinsert(distBox, "end", x) treatasdistVariable <- tclVar("0") treatasdistCheckBox <- tkcheckbutton(method3Frame, variable=treatasdistVariable) summaryVariable <- tclVar("1") summaryCheckBox <- tkcheckbutton(method3Frame, variable=summaryVariable) copheneticVariable <- tclVar("0") copheneticCheckBox <- tkcheckbutton(method3Frame, variable=copheneticVariable) clustersVariable <- tclVar("5") clustersa <- tkentry(method3Frame, width=10, textvariable=clustersVariable) dataVariable <- tclVar("0") dataCheckBox <- tkcheckbutton(method3Frame, variable=dataVariable) algoBox <- tklistbox(method4Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") algoScroll <- tkscrollbar(method4Frame, repeatinterval=5, command=function(...) tkyview(algoBox, ...)) tkconfigure(algoBox, yscrollcommand=function(...) tkset(algoScroll, ...)) algos <- c("average", "single", "complete", "ward", "ward.D", "ward.D2", "mcquitty", "weighted", "median", "centroid") for (x in algos) tkinsert(algoBox, "end", x) plotFrame <- tkframe(top, relief="groove", borderwidth=2) plot1Frame <- tkframe(plotFrame) plot2Frame <- tkframe(plotFrame) plot3Frame <- tkframe(plotFrame) plot4Frame <- tkframe(plotFrame) typeBox <- tklistbox(plot1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") typeScroll <- tkscrollbar(plot1Frame, repeatinterval=5, command=function(...) tkyview(typeBox, ...)) tkconfigure(typeBox, yscrollcommand=function(...) tkset(typeScroll, ...)) types <- c("dendrogram1 (hang = -1)", "dendrogram2 (hang = 0.1)", "dendrogram3 (horizontal)", "dendrogram (color_branches)", "dendrogram (colored_dots)", "circlize_dendrogram", "phylogram (ape package)", "cladogram (ape package)", "fan (ape package)", "unrooted (ape package)", "rectangles", "pruned dendrogram", "silhouette", "cophenetic", "cascadeKM", "reorder (variable)", "labels (variable)", "tiplabels (variable size)", "tiplabels (factor)", "aspectHeatmap (ClassDiscovery)", "aspectHeatmap (save cluster membership)", "heat map (Thresher)") for (x in types) tkinsert(typeBox, "end", x) cexVariable <- tclVar("1") cexa <- tkentry(plot3Frame, width=8, textvariable=cexVariable) colVariable <- tclVar("blue") cola <- tkentry(plot4Frame, width=8, textvariable=colVariable) axisBox <- tklistbox(plot2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") axisScroll <- tkscrollbar(plot2Frame, repeatinterval=5, command=function(...) tkyview(axisBox, ...)) tkconfigure(axisBox, yscrollcommand=function(...) tkset(axisScroll, ...)) for (x in variables) tkinsert(axisBox, "end", x) onOK <- function(){ doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) method <- methods[as.numeric(tkcurselection(methodBox))+1] dist <- distances[as.numeric(tkcurselection(distBox))+1] algo <- algos[as.numeric(tkcurselection(algoBox))+1] treatasdist <- tclvalue(treatasdistVariable)==1 clusters <- tclvalue(clustersVariable) axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] if (method=="agnes" || method=="diana" || method=="pam" || method=="clara" || method=="fanny") { justDoIt(paste("library(cluster)")) logger(paste("library(cluster)")) } if (method=="NbClust (not kmeans)" || method=="NbClust (kmeans)") { justDoIt(paste("library(NbClust)")) logger(paste("library(NbClust)")) } if (method != "kmeans" && method != "cmeans" && method != "kkmeans" && method != "specc" && method != "cascadeKM" && method != "NbClust (kmeans)") { if(treatasdist==F){ logger(paste("distmatrix <- vegdist(", .communityDataSet, ", method='", dist, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist, "')", sep="")) }else{ logger(paste("distmatrix <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } } if (method=="hclust") { command <- paste("hclust(distmatrix, method='", algo, "')", sep="") } if (method=="hclust (fastcluster)") { justDoIt(paste("library(fastcluster)")) logger(paste("library(fastcluster)")) command <- paste("fastcluster::hclust(distmatrix, method='", algo, "')", sep="") } if (method=="agnes") { command <- paste("agnes(distmatrix, method='", algo, "')", sep="") } if (method=="diana") { command <- paste("diana(distmatrix)", sep="") } if (method=="kmeans") { command <- paste("kmeans(", .communityDataSet, ", centers=", clusters, ", iter.max=100)", sep="") } if (method=="cmeans") { justDoIt(paste("library(e1071)")) logger(paste("library(e1071)")) command <- paste("cmeans(", .communityDataSet, ", centers=", clusters, ", dist='", dist, "', iter.max=100)", sep="") } if (method=="kkmeans") { justDoIt(paste("library(kernlab)")) logger(paste("library(kernlab)")) command <- paste("kkmeans(as.matrix(", .communityDataSet, "), centers=", clusters, ")", sep="") } if (method=="specc") { justDoIt(paste("library(kernlab)")) logger(paste("library(kernlab)")) command <- paste("specc(as.matrix(", .communityDataSet, "), centers=", clusters, ")", sep="") } if (method=="cascadeKM") { command <- paste("cascadeKM(", .communityDataSet, ", inf.gr=2, sup.gr=", clusters, ", iter = 100, criterion = 'calinski')", sep="") } if (method=="pam") { command <- paste("pam(distmatrix, k=", clusters, ")", sep="") } if (method=="clara") { size <- as.numeric(clusters)+1 command <- paste("clara(distmatrix, k=", clusters, ", sampsize=", size,")", sep="") } if (method=="fanny") { command <- paste("fanny(distmatrix, k=", clusters, ")", sep="") } if (method=="NbClust (not kmeans)") { logger(paste("In case of error warning of indefinite TSS matrix, try single index as for example 'ch', 'db' or 'sdbw'", sep="")) command <- paste("NbClust(data=", .communityDataSet, ", diss=distmatrix, distance=NULL, min.nc=2, max.nc=10, method='", algo, "', index='alllong')", sep="") } if (method=="NbClust (kmeans)") { command <- paste("NbClust(data=", .communityDataSet, ", diss=NULL, distance='", dist, "', min.nc=2, max.nc=10, method='kmeans', index='alllong')", sep="") } modelValue <- tclvalue(modelName) logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) sum <- tclvalue(summaryVariable) == "1" if (sum==T) { if (method=="kmeans" || method=="cmeans" || method=="kkmeans" || method=="specc" || method=="hclust" || method=="hclust (fastcluster)" || method=="cascadeKM" || method=="NbClust (not kmeans)" || method=="NbClust (kmeans)") { doItAndPrint(paste(modelValue)) doItAndPrint(paste("attributes(", modelValue, ")", sep="")) if (method=="hclust" || method=="hclust (fastcluster)") {doItAndPrint(paste("treeheight(", modelValue, ")", sep=""))} }else{ doItAndPrint(paste("summary(", modelValue, ")", sep="")) } } coph <- tclvalue(copheneticVariable) == "1" if (coph==T && method != "kmeans" && method!="cmeans" && method !="kkmeans" && method != "specc" && method != "pam" && method != "clara" && method != "fanny") { logger(paste("copheneticdist <- cophenetic(", modelValue, ")", sep="")) assign("copheneticdist", justDoIt(paste("cophenetic(", modelValue, ")", sep="")), envir=.GlobalEnv) doItAndPrint(paste("mantel(distmatrix, copheneticdist, permutations=100)",sep="")) } data <- tclvalue(dataVariable) == "1" if (data==T && method!="kkmeans" && method!="specc" && method!="cascadeKM" && method!="NbClust (not kmeans)" && method!="NbClust (kmeans)") { if (method=="kmeans" || method=="cmeans" || method== "pam" || method=="clara" || method=="fanny") { justDoIt(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(", modelValue, "$cluster)", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(", modelValue, "$cluster)", sep="")) }else{ justDoIt(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(cutree(", modelValue, ", k=", clusters, "))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(cutree(", modelValue, ", k=", clusters, "))", sep="")) } activeDataSet(.activeDataSet) } if (method=="kkmeans" || method=="specc") { if (data==T) { justDoIt(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(", modelValue, "@.Data)", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".cluster <- as.factor(", modelValue, "@.Data)", sep="")) activeDataSet(.activeDataSet) } } if (method=="NbClust (not kmeans)" || method=="NbClust (kmeans)") { if (data==T) { justDoIt(paste(.activeDataSet, "$", modelValue, ".NbClust <- as.factor(", modelValue, "$Best.partition)", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".NbClust <- as.factor(", modelValue, "$Best.partition)", sep="")) activeDataSet(.activeDataSet) } } } onPlot <- function(){ method <- methods[as.numeric(tkcurselection(methodBox))+1] modelValue <- tclvalue(modelName) plottype <- types[as.numeric(tkcurselection(typeBox))+1] dist <- distances[as.numeric(tkcurselection(distBox))+1] algo <- algos[as.numeric(tkcurselection(algoBox))+1] clusters <- tclvalue(clustersVariable) axisvar <- .variables[as.numeric(tkcurselection(axisBox))+1] col <- tclvalue(colVariable) cex <- tclvalue(cexVariable) justDoIt(paste("par(cex=",cex,")", sep="")) logger(paste("par(cex=",cex,")", sep="")) if (plottype == "dendrogram1 (hang = -1)"){ doItAndPrint(paste("plot(as.dendrogram(", modelValue, ", hang = -1), horiz=F, edgePar=list(col='", col, "'), nodePar=list(pch=NA, lab.col='", col, "'), main='', sub='', xlab='', ylab='')", sep="")) } if (plottype == "dendrogram2 (hang = 0.1)"){ doItAndPrint(paste("plot(as.dendrogram(", modelValue, ", hang = 0.1), horiz=F, edgePar=list(col='", col, "'), nodePar=list(pch=NA, lab.col='", col, "'), main='', sub='', xlab='', ylab='')", sep="")) } if (plottype == "dendrogram3 (horizontal)"){ doItAndPrint(paste("plot(as.dendrogram(", modelValue, "), horiz=T, edgePar=list(col='", col, "'), nodePar=list(pch=NA, lab.col='", col, "'), main='', sub='', xlab='', ylab='')", sep="")) } if (plottype == "dendrogram (color_branches)" || plottype == "dendrogram (colored_dots)" || plottype == "circlize_dendrogram") { justDoIt(paste("library(dendextend)", sep="")) logger(paste("library(dendextend)", sep="")) logger(paste("dendrogram.new <- color_branches(as.dendrogram(", modelValue, "), k=", clusters, ")", sep="")) assign("dendrogram.new", justDoIt(paste("color_branches(as.dendrogram(", modelValue, "), k=", clusters, ")", sep="")), envir=.GlobalEnv) logger(paste("dendrogram.new <- color_labels(dendrogram.new, k=", clusters, ")", sep="")) assign("dendrogram.new", justDoIt(paste("color_labels(dendrogram.new, k=", clusters, ")", sep="")), envir=.GlobalEnv) if (plottype == "dendrogram (color_branches)") {doItAndPrint(paste("plot(dendrogram.new)",sep=""))} if (plottype == "dendrogram (colored_dots)") { doItAndPrint(paste("plot(dendrogram.new)",sep="")) doItAndPrint(paste("colored_dots(as.numeric(", .activeDataSet, "$", axisvar, "), dendrogram.new, rowLabels='", axisvar, "')", sep="")) } if (plottype == "circlize_dendrogram") {doItAndPrint(paste("circlize_dendrogram(dendrogram.new)",sep=""))} } if (plottype == "phylogram (ape package)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='phylogram', direction='rightwards', edge.color='", col, "', tip.color='", col, "', font=1)", sep="")) } if (plottype == "cladogram (ape package)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='cladogram', edge.color='", col, "', tip.color='", col, "', font=1)", sep="")) } if (plottype == "fan (ape package)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='fan', edge.color='", col, "', tip.color='", col, "', font=1)", sep="")) } if (plottype == "unrooted (ape package)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='unrooted', edge.color='", col, "', tip.color='", col, "', font=1)", sep="")) } if (plottype == "pruned dendrogram"){ justDoIt(paste("library(maptree)", sep="")) logger(paste("library(maptree)", sep="")) doItAndPrint(paste("plot(clip.clust(as.hclust(", modelValue,"), data=", .communityDataSet, ", k=", clusters, "))", sep="")) } if (plottype == "silhouette"){ doItAndPrint(paste("plot(silhouette(cutree(as.hclust(", modelValue,"), k=", clusters, "), distmatrix))", sep="")) } if (plottype == "rectangles" && method != "kmeans" && method != "cascadeKM" && method != "pam" && method != "clara" && method != "fanny"){ justDoIt(paste("rect.hclust(", modelValue, ", k=", clusters, ", border='", col, "')", sep="")) logger(paste("rect.hclust(", modelValue, ", k=", clusters, ", border='", col, "')", sep="")) } if (plottype == "cophenetic" && method != "kmeans" && method != "cascadeKM" && method != "pam" && method != "clara" && method != "fanny"){ logger(paste("copheneticdist <- cophenetic(", modelValue, ")", sep="")) assign("copheneticdist", justDoIt(paste("cophenetic(", modelValue, ")", sep="")), envir=.GlobalEnv) doItAndPrint(paste("plot(distmatrix, copheneticdist, col='", col, "')", sep="")) doItAndPrint(paste("abline(0,1)", sep="")) } if (plottype == "cascadeKM"){ doItAndPrint(paste("plot(", modelValue, ")", sep="")) } if (plottype == "labels (variable)"){ doItAndPrint(paste("plot(", modelValue, ", labels=", .activeDataSet, "$", axisvar, ", main='', sub='', xlab='', ylab='')", sep="")) } if (plottype == "reorder (variable)"){ command <- paste("reorder(as.hclust(", modelValue, "), wts=as.numeric(", .activeDataSet, "$", axisvar, "))", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("plot(as.dendrogram(", modelValue, ", hang = 0.1), horiz=F, edgePar=list(col='", col, "'), nodePar=list(pch=NA, lab.col='", col, "'), main='', sub='', xlab='', ylab='')", sep="")) } if (plottype == "tiplabels (variable size)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='phylogram', direction='rightwards', edge.color='", col, "', tip.color='", col, "', font=1, label.offset=0.05)", sep="")) doItAndPrint(paste("tiplabels(pch=19, col='", col, "', cex=3*as.numeric(", .activeDataSet, "$", axisvar, ")/max(as.numeric(", .activeDataSet, "$", axisvar, ")))", sep="")) } if (plottype == "tiplabels (factor)"){ justDoIt(paste("library(ape)", sep="")) logger(paste("library(ape)", sep="")) doItAndPrint(paste("plot(as.phylo(as.hclust(", modelValue, ")), type='phylogram', direction='rightwards', edge.color='", col, "', tip.color='", col, "', font=1, label.offset=0.05)", sep="")) doItAndPrint(paste("tiplabels(pch=19, cex=2, col=as.numeric(", .activeDataSet, "$", axisvar, "))", sep="")) } if (plottype == "aspectHeatmap (ClassDiscovery)"){ justDoIt(paste("library(ClassDiscovery)", sep="")) logger(paste("library(ClassDiscovery)", sep="")) doItAndPrint(paste("aspectHeatmap(as.matrix(", .communityDataSet, "), Rowv=as.dendrogram(", modelValue, "), Colv=NA, main='heat map from aspectHeatmap')", sep="")) } if (plottype == "aspectHeatmap (save cluster membership)"){ justDoIt(paste(.activeDataSet, "$", modelValue, ".aHm <- as.numeric(cutree(", modelValue, ", k=", clusters, "))", sep="")) logger(paste(.activeDataSet, "$", modelValue, ".aHm <- as.numeric(cutree(", modelValue, ", k=", clusters, "))", sep="")) justDoIt(paste("library(ClassDiscovery)", sep="")) logger(paste("library(ClassDiscovery)", sep="")) doItAndPrint(paste("aspectHeatmap(as.matrix(", .communityDataSet, "), Rowv=as.dendrogram(", modelValue, "), Colv=NA, RowSideColors=colorspace::rainbow_hcl(max(", .activeDataSet, "$", modelValue, ".aHm), c=90, l=50)[", .activeDataSet, "$", modelValue, ".aHm], main='heat map from aspectHeatmap')", sep="")) } if (plottype == "heat map (Thresher)"){ justDoIt(paste("library(Thresher)", sep="")) logger(paste("library(Thresher)", sep="")) doItAndPrint(paste("heat(Thresher(t(as.matrix(", .communityDataSet, ")), metric='", dist, "', linkage='", algo, "', scale=FALSE), main='heat map via Thresher')", sep="")) } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") plotButton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onPlot) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save cluster as: ", width=20), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(method1Frame, text="Cluster method"), sticky="w") tkgrid(methodBox, methodScroll,sticky="w") tkgrid(tklabel(method2Frame, text="Distance"), sticky="w") tkgrid(distBox, distScroll,sticky="w") tkgrid(summaryCheckBox, tklabel(method3Frame, text="cluster summary"), sticky="w") tkgrid(treatasdistCheckBox, tklabel(method3Frame, text="as.dist(Community)", width=15), sticky="w") tkgrid(copheneticCheckBox, tklabel(method3Frame, text="cophenetic correlation"), sticky="w") tkgrid(tklabel(method3Frame, text="clusters", width=8), clustersa, sticky="w") tkgrid(dataCheckBox, tklabel(method3Frame, text="save cluster membership"), sticky="w") tkgrid(tklabel(method4Frame, text="Cluster options"), sticky="w") tkgrid(algoBox, algoScroll, sticky="nw") tkgrid(method1Frame, tklabel(methodFrame, text="", width=1), method2Frame, sticky="w") tkgrid(method3Frame, tklabel(methodFrame, text="", width=1), method4Frame, sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(tklabel(plot1Frame, text="Plot options"), sticky="w") tkgrid(typeBox, typeScroll, sticky="nw") tkgrid(tklabel(plot2Frame, text="Plot variable"), sticky="w") tkgrid(axisBox, axisScroll, sticky="nw") tkgrid(tklabel(plot3Frame, text="cex", width=5), cexa, sticky="w") tkgrid(tklabel(plot4Frame, text="colour", width=5), cola, sticky="w") tkgrid(plot1Frame, tklabel(plotFrame, text="", width=1), plot2Frame, sticky="w") tkgrid(plot3Frame, tklabel(plotFrame, text="", width=1), plot4Frame, sticky="w") tkgrid(plotFrame, sticky="w") tkgrid(OKbutton, plotButton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(typeScroll, sticky="ns") tkgrid.configure(algoScroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(distScroll, sticky="ns") tkgrid.configure(axisScroll, sticky="ns") tkselection.set(typeBox, 0) tkselection.set(methodBox, 0) tkselection.set(algoBox, 0) tkselection.set(distBox, 0) tkselection.set(axisBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } mantelGUI <- function(){ top <- tktoplevel() tkwm.title(top, "Compare distance matrices") .activeDataSet <- ActiveDataSet() .communityDataSet <- CommunityDataSet() .variables <- Variables() variables <- paste(.variables, ifelse(is.element(.variables, Factors()), "[factor]", "")) methodFrame <- tkframe(top, relief="groove", borderwidth=2) method1Frame <- tkframe(methodFrame) method2Frame <- tkframe(methodFrame) method3Frame <- tkframe(methodFrame) method4Frame <- tkframe(methodFrame) method5Frame <- tkframe(methodFrame) method6Frame <- tkframe(methodFrame) testBox <- tklistbox(method1Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") testScroll <- tkscrollbar(method1Frame, repeatinterval=5, command=function(...) tkyview(testBox, ...)) tkconfigure(testBox, yscrollcommand=function(...) tkset(testScroll, ...)) tests <- c("mantel", "anosim (factor)", "mrpp (factor)", "rankindex", "bioenv", "betadisper (factor)", "betadisper (factor, lingoes)", "betadisper (factor, cailliez)", "meandist (factor)", "simper (factor)") for (x in tests) tkinsert(testBox, "end", x) dist1Box <- tklistbox(method3Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") dist1Scroll <- tkscrollbar(method3Frame, repeatinterval=5, command=function(...) tkyview(dist1Box, ...)) tkconfigure(dist1Box, yscrollcommand=function(...) tkset(dist1Scroll, ...)) distances <- c("euclidean", "manhattan", "canberra", "clark", "bray", "kulczynski", "jaccard", "gower", "altGower", "morisita", "horn", "mountford", "raup" , "binomial", "chao", "cao", "mahalanobis", "hellinger", "aitchison", "robust.aitchison") for (x in distances) tkinsert(dist1Box, "end", x) dist2Box <- tklistbox(method2Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") dist2Scroll <- tkscrollbar(method2Frame, repeatinterval=5, command=function(...) tkyview(dist2Box, ...)) tkconfigure(dist2Box, yscrollcommand=function(...) tkset(dist2Scroll, ...)) distances2 <- c("daisy (factor)",distances) for (x in distances2) tkinsert(dist2Box, "end", x) scaleBox <- tklistbox(method4Frame, width=27, height=5, selectmode="single", background="white", exportselection="FALSE") scaleScroll <- tkscrollbar(method4Frame, repeatinterval=5, command=function(...) tkyview(scaleBox, ...)) tkconfigure(scaleBox, yscrollcommand=function(...) tkset(scaleScroll, ...)) svariables <- c("all",variables) for (x in svariables) tkinsert(scaleBox, "end", x) treatasdistVariable <- tclVar("0") treatasdistCheckBox <- tkcheckbutton(method5Frame, variable=treatasdistVariable) plotVariable <- tclVar("0") plotCheckBox <- tkcheckbutton(method5Frame, variable=plotVariable) permVariable <- tclVar("999") perma <- tkentry(method5Frame, width=10, textvariable=permVariable) methodBox <- tklistbox(method6Frame, width=27, height=3, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(method6Frame, repeatinterval=3, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) methods <- c("pearson", "spearman", "kendall") for (x in methods) tkinsert(methodBox, "end", x) onOK <- function(){ test <- tests[as.numeric(tkcurselection(testBox))+1] dist1 <- distances[as.numeric(tkcurselection(dist1Box))+1] dist2 <- distances2[as.numeric(tkcurselection(dist2Box))+1] method <- methods[as.numeric(tkcurselection(methodBox))+1] permutations <- tclvalue(permVariable) treatasdist <- tclvalue(treatasdistVariable)==1 var2 <- svariables[as.numeric(tkcurselection(scaleBox))+1] if (test == "mantel") { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(",.communityDataSet, ", method='",dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } if (var2 == "all") { if(dist2 == "daisy (factor)") { justDoIt(paste("library(cluster)")) logger(paste("library(cluster)")) logger(paste("distmatrix2 <- as.dist(as.matrix(daisy(", .activeDataSet, ")))", sep="")) assign("distmatrix2", justDoIt(paste("as.dist(as.matrix(daisy(", .activeDataSet, ")))", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix2 <- vegdist(", .activeDataSet, ", method='",dist2, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix2", justDoIt(paste("vegdist(",.activeDataSet, ", method='",dist2, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) } }else{ var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { justDoIt(paste("library(cluster)")) logger(paste("library(cluster)")) logger(paste("distmatrix2 <- as.dist(as.matrix(daisy(", .activeDataSet, "[,'", var2, "',drop=F])))", sep="")) assign("distmatrix2", justDoIt(paste("distmatrix2 <- daisy(", .activeDataSet, "[,'", var2, "',drop=F])", sep="")), envir=.GlobalEnv) }else{ logger(paste("distmatrix2 <- vegdist(", .activeDataSet, "$", var2, ", method='",dist2, "', pseudocount=1)", sep="")) assign("distmatrix2", justDoIt(paste("vegdist(", .activeDataSet, "$", var2,", method='",dist2, "', pseudocount=1)", sep="")), envir=.GlobalEnv) } } doItAndPrint(paste("mantel(distmatrix1, distmatrix2, method='", method, "', permutations=", permutations, ")",sep="")) } if (test == "anosim (factor)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(", .communityDataSet, ", method='",dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("summary(anosim(distmatrix1, grouping=", .activeDataSet, "$", var2, ", permutations=", permutations, "))",sep="")) } } if (test == "mrpp (factor)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(",.communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("mrpp(distmatrix1, grouping=", .activeDataSet, "$", var2, ", permutations=", permutations, ")",sep="")) } } if (test == "rankindex") { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if (var2 == "all") { doItAndPrint(paste("rankindex(", .activeDataSet, ", ", .communityDataSet, ", method='",method, "')", sep="")) }else{ var2 <- .variables[as.numeric(tkcurselection(scaleBox))] doItAndPrint(paste("rankindex(", .activeDataSet, "$", var2, ", ", .communityDataSet, ", method='", method, "')", sep="")) } } if (test == "bioenv") { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) doItAndPrint(paste("bioenv(", .communityDataSet, ", prepare.bioenv(", .activeDataSet, ", as.numeric=c()), method='", method, "', index='", dist1, "'", sep="")) } if (test == "betadisper (factor)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', bias.adjust=F, sqrt.dist=F, add=F)", sep="")) doItAndPrint(paste("anova(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add=F))", sep="")) doItAndPrint(paste("permutest(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add=F), pairwise=T, permutations=", permutations, ")", sep="")) } } if (test == "betadisper (factor, lingoes)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', bias.adjust=F, sqrt.dist=F, add='lingoes')", sep="")) doItAndPrint(paste("anova(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add='lingoes'))", sep="")) doItAndPrint(paste("permutest(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add='lingoes'), pairwise=T, permutations=", permutations, ")", sep="")) } } if (test == "betadisper (factor, cailliez)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, bias.adjust=F, add='cailliez')", sep="")) doItAndPrint(paste("anova(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add='cailliez'))", sep="")) doItAndPrint(paste("permutest(betadisper(distmatrix1, group=", .activeDataSet, "$", var2, ", type='centroid', sqrt.dist=F, add='cailliez'), pairwise=T, permutations=", permutations, ")", sep="")) } } if (test == "meandist (factor)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) if(treatasdist==F){ logger(paste("distmatrix1 <- vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")) assign("distmatrix1", justDoIt(paste("vegdist(", .communityDataSet, ", method='", dist1, "', na.rm=T, pseudocount=1)", sep="")), envir=.GlobalEnv) doItAndPrint(paste("dist.eval(", .communityDataSet, ",'", dist1, "')", sep="")) }else{ logger(paste("distmatrix1 <- as.dist(", .communityDataSet, ")", sep="")) assign("distmatrix1", justDoIt(paste("as.dist(",.communityDataSet, ")", sep="")), envir=.GlobalEnv) } doItAndPrint(paste("meandist(distmatrix1, grouping=", .activeDataSet, "$", var2, ")",sep="")) doItAndPrint(paste("summary(meandist(distmatrix1, grouping=", .activeDataSet, "$", var2, "))",sep="")) } } if (test == "simper (factor)" && var2 != "all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { doItAndPrint(paste("check.datasets(", .communityDataSet, ", ", .activeDataSet, ")", sep="")) doItAndPrint(paste("simper(", .communityDataSet, ", group=", .activeDataSet, "$", var2, ", permutations=", permutations, ")",sep="")) doItAndPrint(paste("summary(simper(", .communityDataSet, ", group=", .activeDataSet, "$", var2, ", permutations=", permutations, "))",sep="")) } } plotit <- tclvalue(plotVariable) == "1" var2 <- svariables[as.numeric(tkcurselection(scaleBox))+1] if (plotit==T && test=="mantel" && var2=="all") { justDoIt(paste("plot(distmatrix2, distmatrix1,xlab='environmental distance',ylab='ecological distance')", sep="")) logger(paste("plot(distmatrix2, distmatrix1,xlab='environmental distance',ylab='ecological distance')", sep="")) } if (plotit==T && test=="mantel" && var2!="all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { justDoIt(paste("plot(distmatrix1~as.factor(distmatrix2),xlab='environmental distance',ylab='ecological distance')", sep="")) logger(paste("plot(distmatrix1~as.factor(distmatrix2),xlab='environmental distance',ylab='ecological distance')", sep="")) }else{ justDoIt(paste("plot(distmatrix2, distmatrix1,xlab='environmental distance',ylab='ecological distance')", sep="")) logger(paste("plot(distmatrix2, distmatrix1,xlab='environmental distance',ylab='ecological distance')", sep="")) } } if (plotit==T && test!="mantel" && test!="rankindex" && test!="bioenv" && var2!="all") { var2 <- .variables[as.numeric(tkcurselection(scaleBox))] varfactor <- eval(parse(text=paste("is.factor(",.activeDataSet, "$", var2, ")", sep="")), envir=.GlobalEnv) if (varfactor==T) { justDoIt(paste("library(cluster)")) logger(paste("library(cluster)")) logger(paste("distmatrix2 <- as.dist(as.matrix(daisy(", .activeDataSet, "[,'", var2, "',drop=F])))", sep="")) assign("distmatrix2", justDoIt(paste("distmatrix2 <- daisy(", .activeDataSet, "[,'", var2, "',drop=F])", sep="")), envir=.GlobalEnv) justDoIt(paste("plot(distmatrix1~as.factor(distmatrix2),xlab='environmental distance',ylab='ecological distance')", sep="")) logger(paste("plot(distmatrix1~as.factor(distmatrix2),xlab='environmental distance',ylab='ecological distance')", sep="")) } } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(method1Frame, text="Type of test"), sticky="nw") tkgrid(testBox, testScroll,sticky="nw") tkgrid(tklabel(method3Frame, text="Community distance"), sticky="nw") tkgrid(dist1Box, dist1Scroll,sticky="nw") tkgrid(tklabel(method2Frame, text="Environmental distance"), sticky="w") tkgrid(dist2Box, dist2Scroll,sticky="w") tkgrid(tklabel(method4Frame, text="Environmental variable"), sticky="w") tkgrid(scaleBox, scaleScroll, sticky="w") tkgrid(treatasdistCheckBox, tklabel(method5Frame, text="as.dist(Community)", width=15), sticky="w") tkgrid(plotCheckBox, tklabel(method5Frame, text="plot results"), sticky="w") tkgrid(tklabel(method5Frame, text="permutations", width=10), perma, sticky="w") tkgrid(tklabel(method6Frame, text="correlation"), sticky="w") tkgrid(methodBox, methodScroll, sticky="nw") tkgrid(method1Frame, tklabel(methodFrame, text="", width=1), method2Frame, sticky="w") tkgrid(method3Frame, tklabel(methodFrame, text="", width=1), method4Frame, sticky="w") tkgrid(method5Frame, tklabel(methodFrame, text="", width=1), method6Frame, sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(dist1Scroll, sticky="ns") tkgrid.configure(dist2Scroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(scaleScroll, sticky="ns") tkgrid.configure(testScroll, sticky="ns") tkselection.set(dist1Box, 0) tkselection.set(dist2Box, 0) tkselection.set(methodBox, 0) tkselection.set(scaleBox, 0) tkselection.set(testBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(methodBox) tkwait.window(top) } cepNamesCommunity <- function() { .communityDataSet <- CommunityDataSet() justDoIt(paste("colnames(", .communityDataSet, ") <- make.cepnames(colnames(", .communityDataSet, "))", sep="")) communityDataSet(.communityDataSet) } helpBiodiversityR <- function() { print(help(package="BiodiversityR", help_type="html")) } allcitations <- function() { doItAndPrint(paste("loaded.citations()")) } browseTDAwebsite <- function() { browseURL("https://www.worldagroforestry.org/output/tree-diversity-analysis") } browseTDAmanual <- function() { browseURL("http://www.worldagroforestry.org/downloads/Publications/PDFS/b13695.pdf") } vegan.diversity.pdf <- function() { browseURL(paste(system.file(package="vegan"), "/doc/diversity-vegan.pdf", sep="")) } vegan.ordination.pdf <- function() { browseURL(paste(system.file(package="vegan"), "/doc/intro-vegan.pdf", sep="")) } oksanen.website <- function() { browseURL(paste("http://cc.oulu.fi/~jarioksa/opetus/metodi", sep="")) } ################### # GUI for ensemble suitability modelling # March 2016 ens.start <- function() { browseURL(paste(file.path(path.package(package="BiodiversityR")[1], "doc"), "/", gettextRcmdr("Getting-Started-ensemble-GUI"), ".txt", sep="")) } dismo.pdf <- function() { browseURL(paste(system.file(package="dismo"), "/doc/sdm.pdf", sep="")) } ens.directory <- function() { logger(paste("Select directory with raster layers and (ideally) presence locations", sep="")) logger(paste("Results will be saved in subfolders of this directory", sep="")) logger(paste("As alternative, use: R Commander > File > Change working directory...", sep="")) doItAndPrint(paste("setwd(choose.dir())", sep="")) doItAndPrint(paste("getwd()", sep="")) } ens.workspace <- function(){ logger(paste("Select .RData file", sep="")) doItAndPrint(paste("load(choose.files(default='*.RData', multi=F))", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() } ens.grd <- function(s.files) { if(is.null(s.files) == T) {stop("no files provided")} if(length(s.files) < 1) {stop("no files provided")} for (i in 1:length(s.files)) { working.raster <- raster::raster(s.files[i]) print(working.raster) raster::writeRaster(working.raster, filename=working.raster@file@name, format="raster", overwrite=T) } } ens.grd.menu <- function() { logger(paste("Select files to be converted to grd format of raster package", sep="")) doItAndPrint(paste("selected.files <- choose.files()", sep="")) logger(paste("As alternative use: selected.files <- list.files(path=getwd(), pattern='.tif', full.names=TRUE)", sep="")) justDoIt(paste("selected.files <- normalizePath(selected.files, mustWork=F)", sep="")) doItAndPrint(paste("selected.files", sep="")) doItAndPrint(paste("ens.grd(selected.files)", sep="")) } if (exists("stack.list") == F) {assign("stack.list", NULL, envir=.GlobalEnv)} stack.listP <- function() {return(!is.null(stack.list))} if (exists("stack.focal") == F) {assign("stack.focal", NULL, envir=.GlobalEnv)} stack.focalP <- function() {return(!is.null(stack.focal))} update.stacklist <- function(){ all.objects <- ls(envir=.GlobalEnv) stack.list2 <- stack.list for (i in 1:length(all.objects)) { eval1 <- eval(parse(text=paste(all.objects[i])), envir=.GlobalEnv) # changed to any( if(any(class(eval1) == "RasterStack")) {stack.list2 <- c(stack.list2, all.objects[i])} } stack.list2 <- unique(stack.list2) stack.list2 <- stack.list2[stack.list2 != "eval1"] assign("eval1", NULL, envir=.GlobalEnv) assign("stack.list", stack.list2, envir=.GlobalEnv) } update.stacklist() update.stacklist.menu <- function(){ justDoIt(paste("update.stacklist()", sep="")) doItAndPrint(paste("levels(as.factor(stack.list))", sep="")) } stack.create.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Create stack of environmental layers") modelName <- tclVar("calibration1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) all.varsFrame <- tkframe(top, relief="groove", borderwidth=2) all.vars1Frame <- tkframe(all.varsFrame) all.vars2Frame <- tkframe(all.varsFrame) all.varsVariable <- tclVar("0") all.varsCheckBox <- tkcheckbutton(all.vars2Frame, variable=all.varsVariable) dismoVariable <- tclVar("0") dismoCheckBox <- tkcheckbutton(all.vars2Frame, variable=dismoVariable) onOK <- function(){ all.vars <- tclvalue(all.varsVariable) == "1" dismoValue <- tclvalue(dismoVariable) == "1" if (dismoValue == T) {all.vars <- T} if (all.vars==T) { if (dismoValue == T) { dismo.dir <- normalizePath(paste(system.file(package="dismo"), '/ex', sep=''), mustWork=F) assign("dismo.ex", dismo.dir, envir=.GlobalEnv) logger(paste("dismo.ex <- '", dismo.dir, "'", sep="")) doItAndPrint(paste("selected.files <- list.files(path=dismo.ex, pattern='.grd', full.names=TRUE)", sep="")) }else{ doItAndPrint(paste("selected.files <- list.files(path=getwd(), pattern='.grd', full.names=TRUE)", sep="")) } }else{ doItAndPrint(paste("selected.files <- choose.files(default='*.grd')", sep="")) } if(length(selected.files) > 0) { justDoIt(paste("selected.files <- normalizePath(selected.files, mustWork=F)", sep="")) doItAndPrint(paste("selected.files", sep="")) modelValue <- tclvalue(modelName) modelValue <- gsub(".", "_", modelValue, fixed=T) logger(paste(modelValue, " <- raster::stack(selected.files)", sep="")) stack.eval <- eval(parse(text=paste("raster::stack(selected.files)", sep="")), envir=.GlobalEnv) assign(modelValue, stack.eval, envir=.GlobalEnv) doItAndPrint(paste(modelValue, "@title <- '", modelValue, "'", sep="")) stack.title <- as.character(eval(parse(text=paste(modelValue, "@title", sep="")), envir=.GlobalEnv)) doItAndPrint(paste(modelValue, sep="")) stack.list <- c(stack.list, stack.title) stack.list <- unique(stack.list) assign("stack.list", stack.list, envir=.GlobalEnv) if (length(stack.list) == 1) {assign("stack.focal", stack.list[1], envir=.GlobalEnv)} } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('stack', package='raster', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(all.vars1Frame, text="Use <OK> button to select (<CTRL>-select) layers"), sticky="w") tkgrid(all.varsCheckBox, tklabel(all.vars2Frame, text="select all 'grd' raster layers in the working directory"), sticky="w") tkgrid(dismoCheckBox, tklabel(all.vars2Frame, text="select all 'grd' raster layers from the dismo package examples"), sticky="w") tkgrid(all.vars1Frame, sticky="w") tkgrid(all.vars2Frame, sticky="w") tkgrid(all.varsFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) # tkfocus(modelName) tkwait.window(top) } stack.select.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Select calibration stack") update.stacklist() variables <- stack.list varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=40, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ var <- variables[as.numeric(tkcurselection(subsetBox))+1] assign("stack.focal", var, envir=.GlobalEnv) doItAndPrint(paste(stack.focal, sep="")) doItAndPrint(paste(stack.focal, "@title", sep="")) logger(paste("Note that GUI assumes that stack name and stack title are the same", sep="")) logger(paste("If stack name and stack title are different, create the stack via the GUI", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select calibration stack"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") if (is.null(stack.focal) == F) { tkselection.set(subsetBox, which(stack.focal == variables)-1) }else{ tkselection.set(subsetBox, 0) } for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } viewstack <- function(){ doItAndPrint(paste(stack.focal, sep="")) doItAndPrint(paste(stack.focal, "@title", sep="")) doItAndPrint(paste("dev.new()", sep="")) doItAndPrint(paste("raster::plot(", stack.focal, ")", sep="")) if (is.null(stack.factors) == F) { for (i in 1:length(stack.factors)) { if (i==1) { factor.string <- paste("c('", stack.factors[i], "'", sep="") }else{ factor.string <- paste(factor.string, ", '", stack.factors[i], "'", sep="") } } factor.string <- paste(factor.string, ")", sep="") logger(paste("Selected factors: ", factor.string, sep="")) } if (is.null(stack.dummies) == F) { for (i in 1:length(stack.dummies)) { if (i==1) { dummy.string <- paste("c('", stack.dummies[i], "'", sep="") }else{ dummy.string <- paste(dummy.string, ", '", stack.dummies[i], "'", sep="") } } dummy.string <- paste(dummy.string, ")", sep="") logger(paste("Selected dummy variables: ", dummy.string, sep="")) } } stack.delete.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Delete (drop) layers from calibration stack") lvariables <- eval(parse(text=paste("names(", stack.focal, ")", sep="")), envir=.GlobalEnv) layerFrame <- tkframe(top, relief="groove", borderwidth=2) layerBox <- tklistbox(layerFrame, width=40, height=8, selectmode="multiple", background="white", exportselection="FALSE") layerScroll <- tkscrollbar(layerFrame, repeatinterval=5, command=function(...) tkyview(layerBox, ...)) tkconfigure(layerBox, yscrollcommand=function(...) tkset(layerScroll, ...)) for (x in lvariables) tkinsert(layerBox, "end", x) onOK <- function(){ layers <- lvariables[as.numeric(tkcurselection(layerBox))+1] for (i in 1:length(layers)) { logger(paste(stack.focal, " <- raster::dropLayer(", stack.focal, ", which(names(", stack.focal, ")=='", layers[i], "'))", sep="")) stack.eval <- eval(parse(text=paste("raster::dropLayer(", stack.focal, ", which(names(", stack.focal, ")=='", layers[i], "'))", sep="")), envir=.GlobalEnv) assign(stack.focal, stack.eval, envir=.GlobalEnv) if (is.null(stack.factors) == F) {stack.factors <- stack.factors[which(stack.factors != layers[i])]} if (is.null(stack.dummies) == F) {stack.dummies <- stack.dummies[which(stack.dummies != layers[i])]} } if (length(stack.factors) == 0) {assign("stack.factors", NULL, envir=.GlobalEnv)} if (length(stack.dummies) == 0) {assign("stack.dummies", NULL, envir=.GlobalEnv)} activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('dropLayer', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(layerFrame, text="Select one or several layers to delete"), sticky="w") tkgrid(layerBox, layerScroll, sticky="w") tkgrid(layerFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(layerScroll, sticky="ns") # tkselection.set(layerBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(layerBox) tkwait.window(top) } stack.rename.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Rename layers of calibration stack") lvariables <- eval(parse(text=paste("names(", stack.focal, ")", sep="")), envir=.GlobalEnv) layerFrame <- tkframe(top, relief="groove", borderwidth=2) layerBox <- tklistbox(layerFrame, width=50, height=15, selectmode="single", background="white", exportselection="FALSE") layerScroll <- tkscrollbar(layerFrame, repeatinterval=5, command=function(...) tkyview(layerBox, ...)) tkconfigure(layerBox, yscrollcommand=function(...) tkset(layerScroll, ...)) for (x in lvariables) tkinsert(layerBox, "end", x) modelName <- tclVar("calibration1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) varName <- tclVar("bio0x") varFrame <- tkframe(top, relief="groove", borderwidth=2) var <- tkentry(varFrame, width=40, textvariable=varName) onOK <- function(){ layer <- lvariables[as.numeric(tkcurselection(layerBox))+1] varValue <- tclvalue(varName) doItAndPrint(paste("names(", stack.focal, ")[which(names(", stack.focal, ")=='", layer, "')] <- '", varValue, "'", sep="")) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('names', package='raster', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(layerFrame, text="Select layer to rename"), sticky="w") tkgrid(layerBox, layerScroll, sticky="w") tkgrid(layerFrame, sticky="w") tkgrid(tklabel(varFrame, text="New name:", width=10), var, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(layerScroll, sticky="ns") # tkselection.set(layerBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(layerBox) tkwait.window(top) } if (exists("stack.factors") == F) {assign("stack.factors", NULL, envir=.GlobalEnv)} stack.factors.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Identify factors for calibration stack") lvariables <- eval(parse(text=paste("names(", stack.focal, ")", sep="")), envir=.GlobalEnv) layerFrame <- tkframe(top, relief="groove", borderwidth=2) layerBox <- tklistbox(layerFrame, width=40, height=8, selectmode="multiple", background="white", exportselection="FALSE") layerScroll <- tkscrollbar(layerFrame, repeatinterval=5, command=function(...) tkyview(layerBox, ...)) tkconfigure(layerBox, yscrollcommand=function(...) tkset(layerScroll, ...)) for (x in lvariables) tkinsert(layerBox, "end", x) onOK <- function(){ layers <- lvariables[as.numeric(tkcurselection(layerBox))+1] if (length(layers) < 1) { stack.factors0 <- NULL }else{ stack.factors0 <- character(length=length(layers)) for (i in 1:length(layers)) {stack.factors0[i] <- paste(layers[i], sep="")} } assign("stack.factors", stack.factors0, envir=.GlobalEnv) if (is.null(stack.factors) == F) { for (i in 1:length(stack.factors)) { if (i==1) { factor.string <- paste("c('", stack.factors[i], "'", sep="") }else{ factor.string <- paste(factor.string, ", '", stack.factors[i], "'", sep="") } } factor.string <- paste(factor.string, ")", sep="") logger(paste("Selected factors: ", factor.string, sep="")) } activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(layerFrame, text="Select one or several factors"), sticky="w") tkgrid(layerBox, layerScroll, sticky="w") tkgrid(layerFrame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(layerScroll, sticky="ns") # tkselection.set(layerBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(layerBox) tkwait.window(top) } if (exists("stack.dummies") == F) {assign("stack.dummies", NULL, envir=.GlobalEnv)} stack.dummies.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Identify dummy variables for calibration stack") lvariables <- eval(parse(text=paste("names(", stack.focal, ")", sep="")), envir=.GlobalEnv) layerFrame <- tkframe(top, relief="groove", borderwidth=2) layerBox <- tklistbox(layerFrame, width=40, height=8, selectmode="multiple", background="white", exportselection="FALSE") layerScroll <- tkscrollbar(layerFrame, repeatinterval=5, command=function(...) tkyview(layerBox, ...)) tkconfigure(layerBox, yscrollcommand=function(...) tkset(layerScroll, ...)) for (x in lvariables) tkinsert(layerBox, "end", x) onOK <- function(){ layers <- lvariables[as.numeric(tkcurselection(layerBox))+1] if (length(layers) < 1) { stack.dummies0 <- NULL }else{ stack.dummies0 <- character(length=length(layers)) for (i in 1:length(layers)) {stack.dummies0[i] <- paste(layers[i], sep="")} } assign("stack.dummies", stack.dummies0, envir=.GlobalEnv) if (is.null(stack.dummies) == F) { for (i in 1:length(stack.dummies)) { if (i==1) { dummy.string <- paste("c('", stack.dummies[i], "'", sep="") }else{ dummy.string <- paste(dummy.string, ", '", stack.dummies[i], "'", sep="") } } dummy.string <- paste(dummy.string, ")", sep="") logger(paste("Selected dummy variables: ", dummy.string, sep="")) } activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(layerFrame, text="Select one or several dummy variables"), sticky="w") tkgrid(layerBox, layerScroll, sticky="w") tkgrid(layerFrame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(layerScroll, sticky="ns") # tkselection.set(layerBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(layerBox) tkwait.window(top) } presence_file <- paste(system.file(package="dismo"), '/ex/bradypus.csv', sep='') BradypusPresence <- read.table(presence_file, header=TRUE, sep=',') BradypusPresence[, 'species'] <- as.factor(gsub(' ', '_', BradypusPresence[, 'species'])) if (exists("presence.focal") == F) {assign("presence.focal", NULL, envir=.GlobalEnv)} presence.focalP <- function() {return(!is.null(presence.focal))} make.presence.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Make presence data set") .activeDataSet <- ActiveDataSet() .fvariables <- Factors() fvariables <- paste(.fvariables) .nvariables <- Numeric() nvariables <- paste(.nvariables) modelName <- tclVar("SpeciesPresence") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=24, textvariable=modelName) speciesFrame <- tkframe(top, relief="groove", borderwidth=2) speciesBox <- tklistbox(speciesFrame, width=40, height=5, selectmode="single", background="white", exportselection="FALSE") speciesScroll <- tkscrollbar(speciesFrame, repeatinterval=5, command=function(...) tkyview(speciesBox, ...)) tkconfigure(speciesBox, yscrollcommand=function(...) tkset(speciesScroll, ...)) for (x in fvariables) tkinsert(speciesBox, "end", x) xlonFrame <- tkframe(top, relief="groove", borderwidth=2) xlonBox <- tklistbox(xlonFrame, width=40, height=5, selectmode="single", background="white", exportselection="FALSE") xlonScroll <- tkscrollbar(xlonFrame, repeatinterval=5, command=function(...) tkyview(xlonBox, ...)) tkconfigure(xlonBox, yscrollcommand=function(...) tkset(xlonScroll, ...)) for (x in nvariables) tkinsert(xlonBox, "end", x) ylatFrame <- tkframe(top, relief="groove", borderwidth=2) ylatBox <- tklistbox(ylatFrame, width=40, height=5, selectmode="single", background="white", exportselection="FALSE") ylatScroll <- tkscrollbar(ylatFrame, repeatinterval=5, command=function(...) tkyview(ylatBox, ...)) tkconfigure(ylatBox, yscrollcommand=function(...) tkset(ylatScroll, ...)) for (x in nvariables) tkinsert(ylatBox, "end", x) onOK <- function(){ modelValue <- tclvalue(modelName) species <- .fvariables[as.numeric(tkcurselection(speciesBox))+1] xlon <- .nvariables[as.numeric(tkcurselection(xlonBox))+1] ylat <- .nvariables[as.numeric(tkcurselection(ylatBox))+1] command <- paste(.activeDataSet, "[as.numeric(na.omit(match(c('", species, "', '", xlon, "', '", ylat, "'), names(", .activeDataSet, "))))]", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("names(", modelValue, ") <- c('species', 'x', 'y')", sep="")) assign("presence.focal", modelValue, envir=.GlobalEnv) doItAndPrint(paste(presence.focal, "[, 'species'] <- as.factor(gsub(' ', '_', ", presence.focal, "[, 'species']))", sep="")) doItAndPrint(paste("summary(", presence.focal, ")", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(speciesFrame, text="Species variable"), sticky="w") tkgrid(speciesBox, speciesScroll, sticky="w") tkgrid(speciesFrame, sticky="w") tkgrid(tklabel(xlonFrame, text="x variable (eg, longitude)"), sticky="w") tkgrid(xlonBox, xlonScroll, sticky="w") tkgrid(xlonFrame, sticky="w") tkgrid(tklabel(ylatFrame, text="y variable (eg, latitude)"), sticky="w") tkgrid(ylatBox, ylatScroll, sticky="w") tkgrid(ylatFrame, sticky="w") tkgrid(OKbutton, cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(speciesScroll, sticky="ns") tkgrid.configure(xlonScroll, sticky="ns") tkgrid.configure(ylatScroll, sticky="ns") tkselection.set(speciesBox, 0) tkselection.set(xlonBox, 0) tkselection.set(ylatBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(speciesBox) tkwait.window(top) } presence.select.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Select presence data set") variables <- listDataSets() varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=40, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ var <- variables[as.numeric(tkcurselection(subsetBox))+1] assign("presence.focal", var, envir=.GlobalEnv) presence.names <- eval(parse(text=paste("names(", presence.focal, ")", sep="")), envir=.GlobalEnv) presence.names <- presence.names[1:3] if (all.equal(presence.names, c('species', 'lon', 'lat')) == T) {presence.names <- c('species', 'x', 'y')} if (all.equal(presence.names, c('species', 'x', 'y')) == F) {doItAndPrint(paste("WARNING: variable names are not 'species', 'x' and 'y'", sep=""))} doItAndPrint(paste(presence.focal, "[, 'species'] <- as.factor(gsub(' ', '_', ", presence.focal, "[, 'species']))", sep="")) doItAndPrint(paste("summary(", presence.focal, ")", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select presence data set"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") if (is.null(presence.focal) == F) { tkselection.set(subsetBox, which(presence.focal == variables)-1) }else{ tkselection.set(subsetBox, 0) } for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } viewpresence <- function(){ command <- justDoIt(paste("invisible(edit(", presence.focal, "))", sep="")) } add.spec.name <- function(x, spec.name="species") { x2 <- data.frame(cbind(rep(spec.name, nrow(x)), x)) names(x2) <- c("species", "x", "y") return(x2) } spatial.thin.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Spatial thinning") modelName.suggest <- paste(presence.focal, ".thin1", sep="") modelName <- tclVar(modelName.suggest) modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) thinFrame <- tkframe(top, relief="groove", borderwidth=2) thinkm <- tclVar("10") thinkmEntry <- tkentry(thinFrame, width=8, textvariable=thinkm) runs <- tclVar("100") runsEntry <- tkentry(thinFrame, width=8, textvariable=runs) onOK <- function(){ specs <- eval(parse(text=paste("levels(", presence.focal,"$spec)")), envir=.GlobalEnv) if (length(specs) > 1) { logger(paste("Spatial thinning available only for presence data sets with one species only", sep="")) }else{ modelValue <- tclvalue(modelName) thinkmValue <- tclvalue(thinkm) runsValue <- tclvalue(runs) command <- paste("geosphere::distm(x=", presence.focal, "[, 2:3])", sep="") logger(paste("distm1 <- ", command, sep="")) assign("distm1", justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("diag(distm1) <- NA", sep="")) doItAndPrint(paste("min(distm1, na.rm=T)/1000 # distance in km", sep="")) command <- paste("ensemble.spatialThin(", presence.focal, "[, 2:3], thin.km=", thinkmValue, ", runs=", runsValue, ", verbose=F, return.notRetained=F)", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) assign("specname", as.character(specs), envir=.GlobalEnv) command <- paste("add.spec.name(", modelValue, ", spec.name='", specname, "')", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) assign("presence.focal", modelValue, envir=.GlobalEnv) command <- paste("geosphere::distm(x=", presence.focal, "[, 2:3])", sep="") logger(paste("distm2 <- ", command, sep="")) assign("distm2", justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("diag(distm2) <- NA", sep="")) doItAndPrint(paste("min(distm2, na.rm=T)/1000 # distance in km", sep="")) rm(specname, envir=.GlobalEnv) rm(distm1, envir=.GlobalEnv) rm(distm2, envir=.GlobalEnv) doItAndPrint(paste("summary(", presence.focal, ")", sep="")) } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('ensemble.spatialThin', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(thinFrame, text="thin.km", width=10), thinkmEntry, sticky="w") tkgrid(tklabel(thinFrame, text="runs", width=10), runsEntry, sticky="w") tkgrid(thinFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) # tkfocus(modelName) tkwait.window(top) } environmental.thin.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Environmental thinning") modelName.suggest <- paste(presence.focal, ".thin1", sep="") modelName <- tclVar(modelName.suggest) modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=40, textvariable=modelName) thinFrame <- tkframe(top, relief="groove", borderwidth=2) thinn <- tclVar("50") thinnEntry <- tkentry(thinFrame, width=8, textvariable=thinn) runs <- tclVar("100") runsEntry <- tkentry(thinFrame, width=8, textvariable=runs) onOK <- function(){ specs <- eval(parse(text=paste("levels(", presence.focal,"$spec)")), envir=.GlobalEnv) if (length(specs) > 1) { logger(paste("Environmental thinning available only for presence data sets with one species only", sep="")) }else{ modelValue <- tclvalue(modelName) thinnValue <- tclvalue(thinn) runsValue <- tclvalue(runs) command <- paste("raster::extract(", stack.focal, ", y=", presence.focal, "[, 2:3])", sep="") logger(paste("extract1 <- ", command, sep="")) assign("extract1", justDoIt(command), envir=.GlobalEnv) command <- paste("vegdist(extract1, method='mahalanobis', diag=F, upper=F)", sep="") logger(paste("distm1 <- ", command, sep="")) assign("distm1", justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("min(distm1, na.rm=T) # note environmental thinning done via PCA coordinates", sep="")) command <- paste("ensemble.environmentalThin(", presence.focal, "[, 2:3], predictors.stack=", stack.focal, ", thin.n=", thinnValue, ", runs=", runsValue, ", pca.var=0.95, verbose=F, return.notRetained=F)", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) assign("specname", as.character(specs), envir=.GlobalEnv) command <- paste("add.spec.name(", modelValue, ", spec.name='", specname, "')", sep="") logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) assign("presence.focal", modelValue, envir=.GlobalEnv) command <- paste("raster::extract(", stack.focal, ", y=", presence.focal, "[, 2:3])", sep="") logger(paste("extract2 <- ", command, sep="")) assign("extract2", justDoIt(command), envir=.GlobalEnv) command <- paste("vegdist(extract2, method='mahalanobis', diag=F, upper=F)", sep="") logger(paste("distm2 <- ", command, sep="")) assign("distm2", justDoIt(command), envir=.GlobalEnv) doItAndPrint(paste("min(distm2, na.rm=T) # note environmental thinning done via PCA coordinates", sep="")) rm(specname, envir=.GlobalEnv) rm(distm1, envir=.GlobalEnv) rm(distm2, envir=.GlobalEnv) rm(extract1, envir=.GlobalEnv) rm(extract2, envir=.GlobalEnv) doItAndPrint(paste("summary(", presence.focal, ")", sep="")) } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('ensemble.environmentalThin', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(thinFrame, text="thin.n", width=10), thinnEntry, sticky="w") tkgrid(tklabel(thinFrame, text="runs", width=10), runsEntry, sticky="w") tkgrid(thinFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) # tkfocus(modelName) tkwait.window(top) } make.absences <- function(an=1000, x, excludep=F, presence.data=NULL) { if (excludep == F) { as <- data.frame(dismo::randomPoints(x[[1]], n=an, p=NULL, excludep=F)) names(as) <- c("x", "y") return(as) }else{ species.names <- levels(droplevels(factor(presence.data[, 1]))) for (s in 1:length(species.names)) { focal.species <- species.names[s] ps <- presence.data[presence.data[, 1]==focal.species, c(2:3)] as1 <- data.frame(dismo::randomPoints(x[[1]], n=an, p=ps, excludep=T)) as2 <- cbind(rep(focal.species, length=nrow(as1)), as1) names(as2) <- c("species", "x", "y") if(s == 1) { as3 <- as2 }else{ as3 <- rbind(as3, as2) } } return(as3) } } predictor.files <- list.files(path=paste(system.file(package="dismo"), '/ex', sep=''), pattern='grd', full.names=TRUE) calibration00000 <- raster::stack(predictor.files) BradypusAbsence <- make.absences(x=calibration00000, excludep=T, presence.data=BradypusPresence) rm(calibration00000) if (exists("absence.focal") == F) {assign("absence.focal", NULL, envir=.GlobalEnv)} absence.focalP <- function() {return(!is.null(absence.focal))} make.absence.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Create absence data set") modelName <- tclVar("Absence1") modelFrame <- tkframe(top, relief="groove", borderwidth=2) model <- tkentry(modelFrame, width=24, textvariable=modelName) excludeFrame <- tkframe(top, relief="groove", borderwidth=2) exclude1Frame <- tkframe(excludeFrame) exclude2Frame <- tkframe(excludeFrame) n.abs <- tclVar("1000") n.absEntry <- tkentry(exclude1Frame, width=12, textvariable=n.abs) excludeVariable <- tclVar("0") excludeCheckBox <- tkcheckbutton(exclude2Frame, variable=excludeVariable) onOK <- function(){ modelValue <- tclvalue(modelName) n.absValue <- tclvalue(n.abs) exclude <- tclvalue(excludeVariable) == "1" if (exclude==T) { command <- paste("make.absences(an=", n.absValue, ", x=", stack.focal, ", excludep=T, presence.data=", presence.focal, ")", sep="") }else{ command <- paste("make.absences(an=", n.absValue, ", x=", stack.focal, ", excludep=F)", sep="") } logger(paste(modelValue, " <- ", command, sep="")) assign(modelValue, justDoIt(command), envir=.GlobalEnv) if (exclude==T) { doItAndPrint(paste("names(", modelValue, ") <- c('species', 'x', 'y')", sep="")) }else{ doItAndPrint(paste("names(", modelValue, ") <- c('x', 'y')", sep="")) } if (is.null(absence.focal) == T) {assign("absence.focal", modelValue, envir=.GlobalEnv)} doItAndPrint(paste("summary(", absence.focal, ")", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('randomPoints', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(modelFrame, text="Save result as: ", width=15), model, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(exclude1Frame, text="number of absence points", width=26), n.absEntry, sticky="w") tkgrid(excludeCheckBox, tklabel(exclude2Frame, text="exclude raster cells with presence "), sticky="w") tkgrid(exclude1Frame, sticky="w") tkgrid(exclude2Frame, sticky="w") tkgrid(excludeFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) # tkfocus(modelName) tkwait.window(top) } absence.select.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Select absence data set") variables <- listDataSets() varFrame <- tkframe(top, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=40, height=7, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) onOK <- function(){ var <- variables[as.numeric(tkcurselection(subsetBox))+1] assign("absence.focal", var, envir=.GlobalEnv) absence.names <- eval(parse(text=paste("names(", absence.focal, ")", sep="")), envir=.GlobalEnv) if (length(absence.names) == 3) { if (all.equal(absence.names, c('species', 'lon', 'lat')) == T) {absence.names <- c('species', 'x', 'y')} if (all.equal(absence.names, c('species', 'x', 'y')) == F) {doItAndPrint(paste("WARNING: variable names are not 'species', 'x' and 'y'", sep=""))} doItAndPrint(paste(absence.focal, "[, 'species'] <- as.factor(gsub(' ', '_', ", absence.focal, "[, 'species']))", sep="")) } doItAndPrint(paste("summary(", absence.focal, ")", sep="")) Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(varFrame, text="Select absence data set"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(OKbutton, tklabel(buttonsFrame, text=" "), cancelButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") if (is.null(absence.focal) == F) { tkselection.set(subsetBox, which(absence.focal == variables)-1) }else{ tkselection.set(subsetBox, 0) } for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } viewabsence <- function(){ command <- justDoIt(paste("invisible(edit(", absence.focal, "))", sep="")) } if (exists("ensmodels.file") == F) {assign("ensmodels.file", NULL, envir=.GlobalEnv)} if (exists("focal.ensemble.object") == F) {assign("focal.ensemble.object", NULL, envir=.GlobalEnv)} ensmodelsP <- function() {return(!is.null(focal.ensemble.object))} batch.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Ensemble suitability modelling") firstFrame <- tkframe(top, relief="groove", borderwidth=2) leftFrame <- tkframe(firstFrame) left1Frame <- tkframe(leftFrame) left2Frame <- tkframe(leftFrame) n.ensemblesVariable <- tclVar("1") n.ensembleEntry <- tkentry(left1Frame, width=15, textvariable=n.ensemblesVariable) k.splitsVariable <- tclVar("4") k.splitsEntry <- tkentry(left1Frame, width=15, textvariable=k.splitsVariable) ENSEMBLE.bestVariable <- tclVar("0") ENSEMBLE.bestEntry <- tkentry(left1Frame, width=15, textvariable=ENSEMBLE.bestVariable) ENSEMBLE.minVariable <- tclVar("0.7") ENSEMBLE.minEntry <- tkentry(left1Frame, width=15, textvariable=ENSEMBLE.minVariable) ENSEMBLE.exponentVariable <- tclVar("c(1, 2, 3)") ENSEMBLE.exponentEntry <- tkentry(left1Frame, width=15, textvariable=ENSEMBLE.exponentVariable) CIRCLES.dVariable <- tclVar("0") CIRCLES.dEntry <- tkentry(left1Frame, width=15, textvariable=CIRCLES.dVariable) VIF.maxVariable <- tclVar("10") VIF.maxEntry <- tkentry(left1Frame, width=15, textvariable=VIF.maxVariable) PROBITVariable <- tclVar("1") PROBITCheckBox <- tkcheckbutton(left2Frame, variable=PROBITVariable) get.blockVariable <- tclVar("0") get.blockCheckBox <- tkcheckbutton(left2Frame, variable=get.blockVariable) infoFrame <- tkframe(firstFrame) info1Frame <- tkframe(infoFrame) if (is.null(stack.focal) == F) {stack.focalVariable <- tclVar(stack.focal)} if (is.null(stack.focal) == T) {stack.focalVariable <- tclVar("(insert calibration stack)")} stack.focalEntry <- tkentry(info1Frame, width=25, textvariable=stack.focalVariable) if (is.null(presence.focal) == F) {presence.focalVariable <- tclVar(presence.focal)} if (is.null(presence.focal) == T) {presence.focalVariable <- tclVar("(insert presence)")} presence.focalEntry <- tkentry(info1Frame, width=25, textvariable=presence.focalVariable) if (is.null(absence.focal) == F) {absence.focalVariable <- tclVar(absence.focal)} if (is.null(absence.focal) == T) {absence.focalVariable <- tclVar("NULL")} absence.focalEntry <- tkentry(info1Frame, width=25, textvariable=absence.focalVariable) info2Frame <- tkframe(infoFrame) secondFrame <- tkframe(top, relief="groove", borderwidth=2) stackFrame <- tkframe(secondFrame, relief="groove", borderwidth=2) update.stacklist() if (is.null(stack.list) == F) {s.variables <- stack.list} if (is.null(stack.list) == T) {s.variables <- c("(none available)")} stackBox <- tklistbox(stackFrame, width=40, height=8, selectmode="multiple", background="white", exportselection="FALSE") stackScroll <- tkscrollbar(stackFrame, repeatinterval=5, command=function(...) tkyview(stackBox, ...)) tkconfigure(stackBox, yscrollcommand=function(...) tkset(stackScroll, ...)) for (x in s.variables) tkinsert(stackBox, "end", x) rightFrame <- tkframe(secondFrame) right1Frame <- tkframe(rightFrame) right2Frame <- tkframe(rightFrame) right3Frame <- tkframe(rightFrame) variables <- c("spec_sens", "equal_sens_spec", "sensitivity", "no_omission", "prevalence", "kappa", "Sens=Spec", "MaxSens+Spec", "MaxKappa", "MaxPCC", "PredPrev=Obs", "ObsPrev", "MeanProb", "MinROCdist", "ReqSens", "threshold2013.mean", "threshold2013.min", "threshold2005.mean", "threshold2005.min") varFrame <- tkframe(right1Frame, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=40, height=5, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) sensitivityVariable <- tclVar("0.9") sensitivityEntry <- tkentry(right2Frame, width=15, textvariable=sensitivityVariable) PRESABSVariable <- tclVar("0") PRESABSCheckBox <- tkcheckbutton(right3Frame, variable=PRESABSVariable) algoFrame <- tkframe(top, relief="groove", borderwidth=2) algo1Frame <- tkframe(algoFrame) algo2Frame <- tkframe(algoFrame) algo3Frame <- tkframe(algoFrame) algo4Frame <- tkframe(algoFrame) # MAXENTVariable <- tclVar("0") MAXENTCheckBox <- tkcheckbutton(algo1Frame, variable=MAXENTVariable) GBMSTEPVariable <- tclVar("0") GBMSTEPCheckBox <- tkcheckbutton(algo1Frame, variable=GBMSTEPVariable) GLMSTEPVariable <- tclVar("0") GLMSTEPCheckBox <- tkcheckbutton(algo1Frame, variable=GLMSTEPVariable) MGCVFIXVariable <- tclVar("0") MGCVFIXCheckBox <- tkcheckbutton(algo1Frame, variable=MGCVFIXVariable) FDAVariable <- tclVar("0") FDACheckBox <- tkcheckbutton(algo1Frame, variable=FDAVariable) BIOCLIMOVariable <- tclVar("0") BIOCLIMOCheckBox <- tkcheckbutton(algo1Frame, variable=BIOCLIMOVariable) MAHAL01Variable <- tclVar("0") MAHAL01CheckBox <- tkcheckbutton(algo1Frame, variable=MAHAL01Variable) # MAXNETVariable <- tclVar("0") MAXNETCheckBox <- tkcheckbutton(algo2Frame, variable=MAXNETVariable) RFVariable <- tclVar("0") RFCheckBox <- tkcheckbutton(algo2Frame, variable=RFVariable) GAMVariable <- tclVar("0") GAMCheckBox <- tkcheckbutton(algo2Frame, variable=GAMVariable) EARTHVariable <- tclVar("0") EARTHCheckBox <- tkcheckbutton(algo2Frame, variable=EARTHVariable) SVMVariable <- tclVar("0") SVMCheckBox <- tkcheckbutton(algo2Frame, variable=SVMVariable) BIOCLIMVariable <- tclVar("0") BIOCLIMCheckBox <- tkcheckbutton(algo2Frame, variable=BIOCLIMVariable) # MAXLIKEVariable <- tclVar("0") MAXLIKECheckBox <- tkcheckbutton(algo3Frame, variable=MAXLIKEVariable) CFVariable <- tclVar("0") CFCheckBox <- tkcheckbutton(algo3Frame, variable=CFVariable) GAMSTEPVariable <- tclVar("0") GAMSTEPCheckBox <- tkcheckbutton(algo3Frame, variable=GAMSTEPVariable) RPARTVariable <- tclVar("0") RPARTCheckBox <- tkcheckbutton(algo3Frame, variable=RPARTVariable) SVMEVariable <- tclVar("0") SVMECheckBox <- tkcheckbutton(algo3Frame, variable=SVMEVariable) DOMAINVariable <- tclVar("0") DOMAINCheckBox <- tkcheckbutton(algo3Frame, variable=DOMAINVariable) # GBMVariable <- tclVar("0") GBMCheckBox <- tkcheckbutton(algo4Frame, variable=GBMVariable) GLMVariable <- tclVar("0") GLMCheckBox <- tkcheckbutton(algo4Frame, variable=GLMVariable) MGCVVariable <- tclVar("0") MGCVCheckBox <- tkcheckbutton(algo4Frame, variable=MGCVVariable) NNETVariable <- tclVar("0") NNETCheckBox <- tkcheckbutton(algo4Frame, variable=NNETVariable) GLMNETVariable <- tclVar("0") GLMNETCheckBox <- tkcheckbutton(algo4Frame, variable=GLMNETVariable) MAHALVariable <- tclVar("0") MAHALCheckBox <- tkcheckbutton(algo4Frame, variable=MAHALVariable) # if (is.null(presence.focal) == F) {species.names <- eval(parse(text=paste("levels(droplevels(factor(", presence.focal, "[, 1])))", sep="")), envir=.GlobalEnv)} if (is.null(presence.focal) == F) {species.last <- species.names[length(species.names)]} # onOK <- function(){ n.ensembles <- tclvalue(n.ensemblesVariable) k.splits <- tclvalue(k.splitsVariable) ENSEMBLE.best <- tclvalue(ENSEMBLE.bestVariable) ENSEMBLE.min <- tclvalue(ENSEMBLE.minVariable) ENSEMBLE.exponent <- tclvalue(ENSEMBLE.exponentVariable) SSB.reduce <- FALSE CIRCLES.d <- tclvalue(CIRCLES.dVariable) if (CIRCLES.d > 0) {SSB.reduce <- TRUE} VIF.max <- tclvalue(VIF.maxVariable) PROBIT <- tclvalue(PROBITVariable) == "1" get.block1 <- tclvalue(get.blockVariable) == "1" stack.focalValue <- tclvalue(stack.focalVariable) presence.focalValue <- tclvalue(presence.focalVariable) absence.focalValue <- tclvalue(absence.focalVariable) var <- variables[as.numeric(tkcurselection(subsetBox))+1] sensitivity <- tclvalue(sensitivityVariable) PRESABS <- tclvalue(PRESABSVariable) == "1" if (var == "Sens=Spec"){PRESABS <- TRUE} if (var == "MaxSens+Spec"){PRESABS <- TRUE} if (var == "MaxKappa"){PRESABS <- TRUE} if (var == "MaxPCC"){PRESABS <- TRUE} if (var == "PredPrev=Obs"){PRESABS <- TRUE} if (var == "ObsPrev"){PRESABS <- TRUE} if (var == "MeanProb"){PRESABS <- TRUE} if (var == "MinROCdist"){PRESABS <- TRUE} if (var == "ReqSens"){PRESABS <- TRUE} MAXENT <- tclvalue(MAXENTVariable) MAXNET <- tclvalue(MAXNETVariable) MAXLIKE <- tclvalue(MAXLIKEVariable) GBM <- tclvalue(GBMVariable) GBMSTEP <- tclvalue(GBMSTEPVariable) RF <- tclvalue(RFVariable) CF <- tclvalue(CFVariable) GLM <- tclvalue(GLMVariable) GLMSTEP <- tclvalue(GLMSTEPVariable) GAM <- tclvalue(GAMVariable) GAMSTEP <- tclvalue(GAMSTEPVariable) MGCV <- tclvalue(MGCVVariable) MGCVFIX <- tclvalue(MGCVFIXVariable) EARTH <- tclvalue(EARTHVariable) RPART <- tclvalue(RPARTVariable) NNET <- tclvalue(NNETVariable) FDA <- tclvalue(FDAVariable) SVM <- tclvalue(SVMVariable) SVME <- tclvalue(SVMEVariable) GLMNET <- tclvalue(GLMNETVariable) BIOCLIMO <- tclvalue(BIOCLIMOVariable) BIOCLIM <- tclvalue(BIOCLIMVariable) DOMAIN <- tclvalue(DOMAINVariable) MAHAL <- tclvalue(MAHALVariable) MAHAL01 <- tclvalue(MAHAL01Variable) if (is.null(stack.factors) == F) { for (i in 1:length(stack.factors)) { if (i==1) { factor.string <- paste("c('", stack.factors[i], "'", sep="") }else{ factor.string <- paste(factor.string, ", '", stack.factors[i], "'", sep="") } } factor.string <- paste(factor.string, ")", sep="") }else{ factor.string <- paste("c()", sep="") } if (is.null(stack.dummies) == F) { for (i in 1:length(stack.dummies)) { if (i==1) { dummy.string <- paste("c('", stack.dummies[i], "'", sep="") }else{ dummy.string <- paste(dummy.string, ", '", stack.dummies[i], "'", sep="") } } dummy.string <- paste(dummy.string, ")", sep="") }else{ dummy.string <- paste("c()", sep="") } stacks <- s.variables[as.numeric(tkcurselection(stackBox))+1] if (is.null(stack.list) == T) {stacks <- tclvalue(stack.focalVariable)} if (length(stacks) > 0) { for (i in 1:length(stacks)) { if (i==1) { stack.string <- paste("c(", stacks[i], sep="") }else{ stack.string <- paste(stack.string, ", ", stacks[i], sep="") } } stack.string <- paste(stack.string, ")", sep="") }else{ stack.string <- paste("c()", sep="") } logger(paste("Note that it can take a while before results will be shown", sep="")) logger(paste("Probably capturing output in file is considerably faster", sep="")) logger(paste("When calculations and projections are finalized, the window interface will close", sep="")) logger(paste("You can also monitor progress in the 'outputs' subfolder of the working directory: ", getwd(), sep="")) doItAndPrint(paste("batch.1 <- ensemble.batch(x=", stack.focalValue, ", xn=", stack.string, ", species.presence=", presence.focalValue, ", species.absence=", absence.focalValue, ", presence.min=20, thin.km=0.1, get.block=", get.block1, ", SSB.reduce=", SSB.reduce, ", CIRCLES.d=", CIRCLES.d, ", k.splits=", k.splits, ", n.ensembles=", n.ensembles, ", VIF.max=", VIF.max, ", VIF.keep=NULL", ", KML.out=TRUE, models.save=TRUE", ", threshold.method='", var, "', threshold.sensitivity=", sensitivity, ", threshold.PresenceAbsence=", PRESABS, ", factors=", factor.string, ", dummy.vars=", dummy.string, ", ENSEMBLE.best=", ENSEMBLE.best, ", ENSEMBLE.min=", ENSEMBLE.min, ", ENSEMBLE.exponent=", ENSEMBLE.exponent, ", ENSEMBLE.weight.min=0.05", ", MAXENT=", MAXENT, ", MAXNET=", MAXNET, ", MAXLIKE=", MAXLIKE, ", GBM=", GBM, ", GBMSTEP=", GBMSTEP, ", RF=", RF, ", CF=", CF, ", GLM=", GLM, ", GLMSTEP=", GLMSTEP, ", GAM=", GAM, ", GAMSTEP=", GAMSTEP, ", MGCV=", MGCV, ", MGCVFIX=", MGCVFIX, ", EARTH=", EARTH, ", RPART=", RPART, ", NNET=", NNET, ", FDA=", FDA, ", SVM=", SVM, ", SVME=", SVME, ", GLMNET=", GLMNET, ", BIOCLIM.O=", BIOCLIMO, ", BIOCLIM=", BIOCLIM, ", DOMAIN=", DOMAIN, ", MAHAL=", MAHAL, ", MAHAL01=", MAHAL01, ", PROBIT=", PROBIT, ")", sep="")) doItAndPrint(paste("batch.1")) if (is.null(presence.focal) == F) { if (n.ensembles > 1) { models.file <- paste(getwd(), "//models//", species.last, "_ENSEMBLE_", n.ensembles, "_models", sep="") models.file <- normalizePath(models.file, mustWork=F) if (file.exists(models.file) == T) { assign("ensmodels.file", models.file, envir=.GlobalEnv) load(ensmodels.file) assign("focal.ensemble.object", ensemble.models, envir=.GlobalEnv) logger(paste("Focal ensemble (object focal.ensemble.object) loaded from: ", models.file, sep="")) } }else{ models.file <- paste(getwd(), "//models//", species.last, "_models", sep="") models.file <- normalizePath(models.file, mustWork=F) if (file.exists(models.file) == T) { assign("ensmodels.file", models.file, envir=.GlobalEnv) load(ensmodels.file) assign("focal.ensemble.object", ensemble.models, envir=.GlobalEnv) logger(paste("Focal ensemble models (object focal.ensemble.object) loaded from: ", models.file, sep="")) } } } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCapture <- function(){ n.ensembles <- tclvalue(n.ensemblesVariable) k.splits <- tclvalue(k.splitsVariable) ENSEMBLE.best <- tclvalue(ENSEMBLE.bestVariable) ENSEMBLE.min <- tclvalue(ENSEMBLE.minVariable) ENSEMBLE.exponent <- tclvalue(ENSEMBLE.exponentVariable) SSB.reduce <- FALSE CIRCLES.d <- tclvalue(CIRCLES.dVariable) if (CIRCLES.d > 0) {SSB.reduce <- TRUE} VIF.max <- tclvalue(VIF.maxVariable) PROBIT <- tclvalue(PROBITVariable) == "1" get.block1 <- tclvalue(get.blockVariable) == "1" stack.focalValue <- tclvalue(stack.focalVariable) presence.focalValue <- tclvalue(presence.focalVariable) absence.focalValue <- tclvalue(absence.focalVariable) var <- variables[as.numeric(tkcurselection(subsetBox))+1] sensitivity <- tclvalue(sensitivityVariable) PRESABS <- tclvalue(PRESABSVariable) == "1" if (var == "Sens=Spec"){PRESABS <- TRUE} if (var == "MaxSens+Spec"){PRESABS <- TRUE} if (var == "MaxKappa"){PRESABS <- TRUE} if (var == "MaxPCC"){PRESABS <- TRUE} if (var == "PredPrev=Obs"){PRESABS <- TRUE} if (var == "ObsPrev"){PRESABS <- TRUE} if (var == "MeanProb"){PRESABS <- TRUE} if (var == "MinROCdist"){PRESABS <- TRUE} if (var == "ReqSens"){PRESABS <- TRUE} MAXENT <- tclvalue(MAXENTVariable) MAXNET <- tclvalue(MAXNETVariable) MAXLIKE <- tclvalue(MAXLIKEVariable) GBM <- tclvalue(GBMVariable) GBMSTEP <- tclvalue(GBMSTEPVariable) RF <- tclvalue(RFVariable) CF <- tclvalue(CFVariable) GLM <- tclvalue(GLMVariable) GLMSTEP <- tclvalue(GLMSTEPVariable) GAM <- tclvalue(GAMVariable) GAMSTEP <- tclvalue(GAMSTEPVariable) MGCV <- tclvalue(MGCVVariable) MGCVFIX <- tclvalue(MGCVFIXVariable) EARTH <- tclvalue(EARTHVariable) RPART <- tclvalue(RPARTVariable) NNET <- tclvalue(NNETVariable) FDA <- tclvalue(FDAVariable) SVM <- tclvalue(SVMVariable) SVME <- tclvalue(SVMEVariable) GLMNET <- tclvalue(GLMNETVariable) BIOCLIMO <- tclvalue(BIOCLIMOVariable) BIOCLIM <- tclvalue(BIOCLIMVariable) DOMAIN <- tclvalue(DOMAINVariable) MAHAL <- tclvalue(MAHALVariable) MAHAL01 <- tclvalue(MAHAL01Variable) if (is.null(stack.factors) == F) { for (i in 1:length(stack.factors)) { if (i==1) { factor.string <- paste("c('", stack.factors[i], "'", sep="") }else{ factor.string <- paste(factor.string, ", '", stack.factors[i], "'", sep="") } } factor.string <- paste(factor.string, ")", sep="") }else{ factor.string <- paste("c()", sep="") } if (is.null(stack.dummies) == F) { for (i in 1:length(stack.dummies)) { if (i==1) { dummy.string <- paste("c('", stack.dummies[i], "'", sep="") }else{ dummy.string <- paste(dummy.string, ", '", stack.dummies[i], "'", sep="") } } dummy.string <- paste(dummy.string, ")", sep="") }else{ dummy.string <- paste("c()", sep="") } stacks <- s.variables[as.numeric(tkcurselection(stackBox))+1] if (is.null(stack.list) == T) {stacks <- tclvalue(stack.focalVariable)} if (length(stacks) > 0) { for (i in 1:length(stacks)) { if (i==1) { stack.string <- paste("c(", stacks[i], sep="") }else{ stack.string <- paste(stack.string, ", ", stacks[i], sep="") } } stack.string <- paste(stack.string, ")", sep="") }else{ stack.string <- paste("c()", sep="") } dir.create("outputs", showWarnings = F) filename1 <- paste(path=getwd(), "//outputs//", presence.focal, "_output.txt", sep="") filename1 <- normalizePath(filename1, mustWork=F) assign("output.filename", filename1, envir=.GlobalEnv) logger(paste("Results will be written to file: ", output.filename, sep="")) logger(paste("When calculations and projections are finalized, the window interface will close", sep="")) logger(paste("You can also monitor progress in the 'outputs' subfolder of the working directory: ", getwd(), sep="")) doItAndPrint(paste("capture.output(batch.1 <- ensemble.batch(x=", stack.focalValue, ", xn=", stack.string, ", species.presence=", presence.focalValue, ", species.absence=", absence.focalValue, ", presence.min=20, thin.km=0.1, get.block=", get.block1, ", SSB.reduce=", SSB.reduce, ", CIRCLES.d=", CIRCLES.d, ", k.splits=", k.splits, ", n.ensembles=", n.ensembles, ", VIF.max=", VIF.max, ", VIF.keep=NULL", ", KML.out=TRUE, models.save=TRUE", ", threshold.method='", var, "', threshold.sensitivity=", sensitivity, ", threshold.PresenceAbsence=", PRESABS, ", factors=", factor.string, ", dummy.vars=", dummy.string, ", ENSEMBLE.best=", ENSEMBLE.best, ", ENSEMBLE.min=", ENSEMBLE.min, ", ENSEMBLE.exponent=", ENSEMBLE.exponent, ", ENSEMBLE.weight.min=0.05", ", MAXENT=", MAXENT, ", MAXNET=", MAXNET, ", MAXLIKE=", MAXLIKE, ", GBM=", GBM, ", GBMSTEP=", GBMSTEP, ", RF=", RF, ", CF=", CF, ", GLM=", GLM, ", GLMSTEP=", GLMSTEP, ", GAM=", GAM, ", GAMSTEP=", GAMSTEP, ", MGCV=", MGCV, ", MGCVFIX=", MGCVFIX, ", EARTH=", EARTH, ", RPART=", RPART, ", NNET=", NNET, ", FDA=", FDA, ", SVM=", SVM, ", SVME=", SVME, ", GLMNET=", GLMNET, ", BIOCLIM.O=", BIOCLIMO, ", BIOCLIM=", BIOCLIM, ", DOMAIN=", DOMAIN, ", MAHAL=", MAHAL, ", MAHAL01=", MAHAL01, ", PROBIT=", PROBIT, "), file=output.filename)", sep="")) doItAndPrint(paste("batch.1")) if (is.null(presence.focal) == F) { if (n.ensembles > 1) { models.file <- paste(getwd(), "//models//", species.last, "_ENSEMBLE_", n.ensembles, "_models", sep="") models.file <- normalizePath(models.file, mustWork=F) if (file.exists(models.file) == T) { assign("ensmodels.file", models.file, envir=.GlobalEnv) load(ensmodels.file) assign("focal.ensemble.object", ensemble.models, envir=.GlobalEnv) logger(paste("Focal ensemble models (object focal.ensemble.object) loaded from: ", models.file, sep="")) } }else{ models.file <- paste(getwd(), "//models//", species.last, "_models", sep="") models.file <- normalizePath(models.file, mustWork=F) if (file.exists(models.file) == T) { assign("ensmodels.file", models.file, envir=.GlobalEnv) load(ensmodels.file) assign("focal.ensemble.object", ensemble.models, envir=.GlobalEnv) logger(paste("Focal ensemble models (object focal.ensemble.object) loaded from: ", models.file, sep="")) } } logger(paste("You can find the results in: ", output.filename, sep="")) } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() closeDialog() tkfocus(CommanderWindow()) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('ensemble.batch', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="12", command=onOK, default="active") capturebutton <- tkbutton(buttonsFrame, text="capture output in file", width="25", command=onCapture) cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(left1Frame, text="number of ensembles", width=24), n.ensembleEntry, sticky="w") tkgrid(tklabel(left1Frame, text="k.splits", width=24), k.splitsEntry, sticky="w") tkgrid(tklabel(left1Frame, text="ENSEMBLE.best", width=24), ENSEMBLE.bestEntry, sticky="w") tkgrid(tklabel(left1Frame, text="ENSEMBLE.min", width=24), ENSEMBLE.minEntry, sticky="w") tkgrid(tklabel(left1Frame, text="ENSEMBLE.exponent", width=24), ENSEMBLE.exponentEntry, sticky="w") tkgrid(tklabel(left1Frame, text="CIRCLES.d (if > 0)", width=24), CIRCLES.dEntry, sticky="w") tkgrid(tklabel(left1Frame, text="VIF.max", width=22), VIF.maxEntry, sticky="w") tkgrid(PROBITCheckBox, tklabel(left2Frame, text="PROBIT transformations "), sticky="w") tkgrid(get.blockCheckBox, tklabel(left2Frame, text="get.block for k-fold crossvalidation"), sticky="w") # tkgrid(tklabel(left2Frame, text=" ", width=5), tklabel(left2Frame, text=" ", width=30), sticky="w") tkgrid(left1Frame, sticky="w") tkgrid(left2Frame, sticky="w") tkgrid(tklabel(info1Frame, text="calibration", width=15), stack.focalEntry, sticky="w") tkgrid(tklabel(info1Frame, text="presence", width=15), presence.focalEntry, sticky="w") tkgrid(tklabel(info1Frame, text="absence", width=15), absence.focalEntry, sticky="w") tkgrid(tklabel(info2Frame, text=" ", width=35), absence.focalEntry, sticky="w") tkgrid(tklabel(info2Frame, text="Better not to edit the parameters above", width=35), sticky="w") tkgrid(tklabel(info2Frame, text="but to use other menus for selections", width=35), sticky="w") tkgrid(tklabel(info2Frame, text=" ", width=35), absence.focalEntry, sticky="w") tkgrid(tklabel(info2Frame, text="Capturing output in file", width=35), sticky="w") tkgrid(tklabel(info2Frame, text="will be much faster", width=35), sticky="w") tkgrid(info1Frame, sticky="w") tkgrid(info2Frame, sticky="w") tkgrid(leftFrame, infoFrame, sticky="w") tkgrid(firstFrame, sticky="w") tkgrid(tklabel(stackFrame, text="Select one or several stacks to predict"), sticky="w") tkgrid(stackBox, stackScroll, sticky="w") tkgrid(tklabel(right1Frame, text="Select threshold method"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(tklabel(right2Frame, text="threshold.sensitivity", width=20), sensitivityEntry, sticky="w") tkgrid(PRESABSCheckBox, tklabel(right3Frame, text="PresenceAbsence package"), sticky="w") tkgrid(right1Frame, sticky="w") tkgrid(right2Frame, sticky="w") tkgrid(right3Frame, sticky="w") tkgrid(stackFrame, rightFrame, sticky="w") tkgrid(secondFrame, sticky="w") # tkgrid(tklabel(algoFrame, text="Select algorithms", width=15), sticky="w") # tkgrid(tklabel(algo1Frame, text=" ", width=5), tklabel(algo1Frame, text=" ", width=12), sticky="w") tkgrid(MAXENTCheckBox, tklabel(algo1Frame, text="MAXENT"), sticky="w") tkgrid(GBMSTEPCheckBox, tklabel(algo1Frame, text="GBMSTEP"), sticky="w") tkgrid(GLMSTEPCheckBox, tklabel(algo1Frame, text="GLMSTEP"), sticky="w") tkgrid(MGCVFIXCheckBox, tklabel(algo1Frame, text="MGCVFIX"), sticky="w") tkgrid(FDACheckBox, tklabel(algo1Frame, text="FDA"), sticky="w") tkgrid(BIOCLIMOCheckBox, tklabel(algo1Frame, text="BIOCLIM.O"), sticky="w") tkgrid(MAHAL01CheckBox, tklabel(algo1Frame, text="MAHAL01"), sticky="w") # tkgrid(tklabel(algo2Frame, text=" ", width=5), tklabel(algo2Frame, text=" ", width=12), sticky="w") tkgrid(MAXNETCheckBox, tklabel(algo2Frame, text="MAXNET"), sticky="w") tkgrid(RFCheckBox, tklabel(algo2Frame, text="RF"), sticky="w") tkgrid(GAMCheckBox, tklabel(algo2Frame, text="GAM"), sticky="w") tkgrid(EARTHCheckBox, tklabel(algo2Frame, text="EARTH"), sticky="w") tkgrid(SVMCheckBox, tklabel(algo2Frame, text="SVM"), sticky="w") tkgrid(BIOCLIMCheckBox, tklabel(algo2Frame, text="BIOCLIM"), sticky="w") tkgrid(tklabel(algo2Frame, text=" ", width=5), tklabel(algo2Frame, text=" ", width=12), sticky="w") # tkgrid(tklabel(algo3Frame, text=" ", width=5), tklabel(algo3Frame, text=" ", width=12), sticky="w") tkgrid(MAXLIKECheckBox, tklabel(algo3Frame, text="MAXLIKE"), sticky="w") tkgrid(CFCheckBox, tklabel(algo3Frame, text="CF"), sticky="w") tkgrid(GAMSTEPCheckBox, tklabel(algo3Frame, text="GAMSTEP"), sticky="w") tkgrid(RPARTCheckBox, tklabel(algo3Frame, text="RPART"), sticky="w") tkgrid(SVMECheckBox, tklabel(algo3Frame, text="SVME"), sticky="w") tkgrid(DOMAINCheckBox, tklabel(algo3Frame, text="DOMAIN"), sticky="w") tkgrid(tklabel(algo3Frame, text=" ", width=5), tklabel(algo3Frame, text=" ", width=12), sticky="w") # tkgrid(tklabel(algo4Frame, text=" ", width=5), tklabel(algo4Frame, text=" ", width=12), sticky="w") tkgrid(GBMCheckBox, tklabel(algo4Frame, text="GBM"), sticky="w") tkgrid(GLMCheckBox, tklabel(algo4Frame, text="GLM"), sticky="w") tkgrid(MGCVCheckBox, tklabel(algo4Frame, text="MGCV"), sticky="w") tkgrid(NNETCheckBox, tklabel(algo4Frame, text="NNET"), sticky="w") tkgrid(GLMNETCheckBox, tklabel(algo4Frame, text="GLMNET"), sticky="w") tkgrid(MAHALCheckBox, tklabel(algo4Frame, text="MAHAL"), sticky="w") tkgrid(tklabel(algo4Frame, text=" ", width=5), tklabel(algo4Frame, text=" ", width=12), sticky="w") # tkgrid(algo1Frame, algo2Frame, algo3Frame, algo4Frame, sticky="w") tkgrid(algoFrame, sticky="w") tkgrid(OKbutton, capturebutton, tklabel(buttonsFrame, text=" "), cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(subsetScroll, sticky="ns") tkgrid.configure(stackScroll, sticky="ns") tkselection.set(subsetBox, 0) tkselection.set(stackBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(subsetBox) tkwait.window(top) } ensemble.plot.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Plot suitability maps") firstFrame <- tkframe(top, relief="groove", borderwidth=2) left1Frame <- tkframe(firstFrame) right1Frame <- tkframe(firstFrame) species.names <- eval(parse(text=paste("levels(droplevels(factor(", presence.focal, "[, 1])))", sep="")), envir=.GlobalEnv) speciesFrame <- tkframe(left1Frame, relief="groove", borderwidth=2) speciesBox <- tklistbox(speciesFrame, width=40, height=6, selectmode="single", background="white", exportselection="FALSE") speciesScroll <- tkscrollbar(speciesFrame, repeatinterval=5, command=function(...) tkyview(speciesBox, ...)) tkconfigure(speciesBox, yscrollcommand=function(...) tkset(speciesScroll, ...)) for (x in species.names) tkinsert(speciesBox, "end", x) m.variables <- c("suitability", "presence", "count", "consensussuitability", "consensuspresence", "consensuscount", "consensussd") methodFrame <- tkframe(right1Frame, relief="groove", borderwidth=2) methodBox <- tklistbox(methodFrame, width=40, height=6, selectmode="single", background="white", exportselection="FALSE") methodScroll <- tkscrollbar(methodFrame, repeatinterval=5, command=function(...) tkyview(methodBox, ...)) tkconfigure(methodBox, yscrollcommand=function(...) tkset(methodScroll, ...)) for (x in m.variables) tkinsert(methodBox, "end", x) secondFrame <- tkframe(top, relief="groove", borderwidth=2) left2Frame <- tkframe(secondFrame) right2Frame <- tkframe(secondFrame) update.stacklist() s.variables <- stack.list stackFrame <- tkframe(left2Frame, relief="groove", borderwidth=2) stackBox <- tklistbox(stackFrame, width=40, height=6, selectmode="single", background="white", exportselection="FALSE") stackScroll <- tkscrollbar(stackFrame, repeatinterval=5, command=function(...) tkyview(stackBox, ...)) tkconfigure(stackBox, yscrollcommand=function(...) tkset(stackScroll, ...)) for (x in s.variables) tkinsert(stackBox, "end", x) variables <- c("spec_sens", "equal_sens_spec", "sensitivity", "no_omission", "prevalence", "kappa", "Sens=Spec", "MaxSens+Spec", "MaxKappa", "MaxPCC", "PredPrev=Obs", "ObsPrev", "MeanProb", "MinROCdist", "ReqSens", "threshold2013.mean", "threshold2013.min", "threshold2005.mean", "threshold2005.min") varFrame <- tkframe(right2Frame, relief="groove", borderwidth=2) subsetBox <- tklistbox(varFrame, width=40, height=6, selectmode="single", background="white", exportselection="FALSE") subsetScroll <- tkscrollbar(varFrame, repeatinterval=5, command=function(...) tkyview(subsetBox, ...)) tkconfigure(subsetBox, yscrollcommand=function(...) tkset(subsetScroll, ...)) for (x in variables) tkinsert(subsetBox, "end", x) thirdFrame <- tkframe(top, relief="groove", borderwidth=2) left3Frame <- tkframe(thirdFrame) right3Frame <- tkframe(thirdFrame) f.variables <- c("(none)", as.character(paste("_ENSEMBLE_", c(1:100), "_", sep=""))) filterFrame <- tkframe(left3Frame, relief="groove", borderwidth=2) filterBox <- tklistbox(filterFrame, width=40, height=8, selectmode="single", background="white", exportselection="FALSE") filterScroll <- tkscrollbar(filterFrame, repeatinterval=5, command=function(...) tkyview(filterBox, ...)) tkconfigure(filterBox, yscrollcommand=function(...) tkset(filterScroll, ...)) for (x in f.variables) tkinsert(filterBox, "end", x) right31Frame <- tkframe(right3Frame) right32Frame <- tkframe(right3Frame) right33Frame <- tkframe(right3Frame) right34Frame <- tkframe(right3Frame) fixedThresholdVariable <- tclVar("-1") fixedThresholdEntry <- tkentry(right31Frame, width=20, textvariable=fixedThresholdVariable) sensitivityVariable <- tclVar("0.9") sensitivityEntry <- tkentry(right31Frame, width=20, textvariable=sensitivityVariable) PRESABSVariable <- tclVar("0") PRESABSCheckBox <- tkcheckbutton(right32Frame, variable=PRESABSVariable) absVariable <- tclVar("6") absEntry <- tkentry(right33Frame, width=20, textvariable=absVariable) presVariable <- tclVar("6") presEntry <- tkentry(right33Frame, width=20, textvariable=presVariable) locVariable <- tclVar("0") locCheckBox <- tkcheckbutton(right34Frame, variable=locVariable) onOK <- function(){ speciesValue <- species.names[as.numeric(tkcurselection(speciesBox))+1] stackValue <- s.variables[as.numeric(tkcurselection(stackBox))+1] methodValue <- m.variables[as.numeric(tkcurselection(methodBox))+1] filterValue <- f.variables[as.numeric(tkcurselection(filterBox))+1] var <- variables[as.numeric(tkcurselection(subsetBox))+1] sensitivity <- tclvalue(sensitivityVariable) fixedThreshold <- tclvalue(fixedThresholdVariable) PRESABS <- tclvalue(PRESABSVariable) == "1" if (var == "Sens=Spec"){PRESABS <- TRUE} if (var == "MaxSens+Spec"){PRESABS <- TRUE} if (var == "MaxKappa"){PRESABS <- TRUE} if (var == "MaxPCC"){PRESABS <- TRUE} if (var == "PredPrev=Obs"){PRESABS <- TRUE} if (var == "ObsPrev"){PRESABS <- TRUE} if (var == "MeanProb"){PRESABS <- TRUE} if (var == "MinROCdist"){PRESABS <- TRUE} if (var == "ReqSens"){PRESABS <- TRUE} presValue <- tclvalue(presVariable) absValue <- tclvalue(absVariable) if (filterValue == "(none)") { doItAndPrint(paste("ensemble.plot(RASTER.species.name='", speciesValue, "', RASTER.stack.name='", stackValue, "', plot.method='", methodValue, "', positive.filters=c('grd'), threshold=", fixedThreshold, ", p=", presence.focal, ", a=", absence.focal, ", threshold.method='", var, "', threshold.sensitivity=", sensitivity, ", threshold.PresenceAbsence=", PRESABS, ", abs.breaks=", absValue, ", pres.breaks=", presValue, ", maptools.boundaries=T, maptools.col='dimgrey')", sep="")) }else{ doItAndPrint(paste("ensemble.plot(RASTER.species.name='", speciesValue, "', RASTER.stack.name='", stackValue, "', plot.method='", methodValue, "', positive.filters=c('grd', '", filterValue, "'), threshold=", fixedThreshold, ", p=", presence.focal, ", a=", absence.focal, ", threshold.method='", var, "', threshold.sensitivity=", sensitivity, ", threshold.PresenceAbsence=", PRESABS, ", abs.breaks=", absValue, ", pres.breaks=", presValue, ", maptools.boundaries=T, maptools.col='dimgrey')", sep="")) } LOC <- tclvalue(locVariable) == "1" if (LOC == T) { if (methodValue == "suitability" || methodValue == "consensussuitability") { doItAndPrint(paste("points(", presence.focal, "[which(", presence.focal, "[, 1] == '", speciesValue, "'), c(2:3)], pch=21, col='black', bg='chartreuse', cex=1, lwd=1.5)", sep="")) } if (methodValue == "presence" || methodValue == "consensuspresence") { doItAndPrint(paste("points(", presence.focal, "[which(", presence.focal, "[, 1] == '", speciesValue, "'), c(2:3)], pch=21, col='black', bg='orange', cex=1, lwd=1.5)", sep="")) } if (methodValue == "count" || methodValue == "consensuscount" || methodValue == "consensussd") { doItAndPrint(paste("points(", presence.focal, "[which(", presence.focal, "[, 1] == '", speciesValue, "'), c(2:3)], pch=21, col='black', bg='blueviolet', cex=1, lwd=1.5)", sep="")) } } } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('ensemble.plot', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="Plot", width="12", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(speciesFrame, text="Species variable"), sticky="w") tkgrid(speciesBox, speciesScroll, sticky="w") tkgrid(speciesFrame, sticky="w") tkgrid(tklabel(methodFrame, text="Suitability method"), sticky="w") tkgrid(methodBox, methodScroll, sticky="w") tkgrid(methodFrame, sticky="w") tkgrid(left1Frame, right1Frame, sticky="w") tkgrid(firstFrame, sticky="w") tkgrid(tklabel(stackFrame, text="Stack"), sticky="w") tkgrid(stackBox, stackScroll, sticky="w") tkgrid(stackFrame, sticky="w") tkgrid(tklabel(right2Frame, text="Select threshold method"), sticky="w") tkgrid(subsetBox, subsetScroll, sticky="w") tkgrid(varFrame, sticky="w") tkgrid(left2Frame, right2Frame, sticky="w") tkgrid(secondFrame, sticky="w") tkgrid(tklabel(filterFrame, text="Filter"), sticky="w") tkgrid(filterBox, filterScroll, sticky="w") tkgrid(filterFrame, sticky="w") tkgrid(tklabel(right31Frame, text="threshold value", width=20), fixedThresholdEntry, sticky="w") tkgrid(tklabel(right31Frame, text="threshold.sensitivity", width=20), sensitivityEntry, sticky="w") tkgrid(PRESABSCheckBox, tklabel(right32Frame, text="PresenceAbsence package"), sticky="w") tkgrid(tklabel(right33Frame, text="breaks for absence", width=20), absEntry, sticky="w") tkgrid(tklabel(right33Frame, text="breaks for presence", width=20), presEntry, sticky="w") tkgrid(locCheckBox, tklabel(right34Frame, text="Add presence locations"), sticky="w") tkgrid(right31Frame, sticky="w") tkgrid(right32Frame, sticky="w") tkgrid(right33Frame, sticky="w") tkgrid(right34Frame, sticky="w") tkgrid(left3Frame, right3Frame, sticky="w") tkgrid(thirdFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(speciesScroll, sticky="ns") tkgrid.configure(stackScroll, sticky="ns") tkgrid.configure(methodScroll, sticky="ns") tkgrid.configure(filterScroll, sticky="ns") tkgrid.configure(subsetScroll, sticky="ns") tkselection.set(speciesBox, 0) tkselection.set(stackBox, 0) tkselection.set(methodBox, 0) tkselection.set(filterBox, 0) tkselection.set(subsetBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(speciesBox) tkwait.window(top) } model.select.menu <- function(){ logger(paste("Select models", sep="")) models.dir <- normalizePath(paste(getwd(), "//models", sep=""), mustWork=F) assign("models.default", models.dir, envir=.GlobalEnv) doItAndPrint(paste("models.file <- choose.files(default=models.default, filters='*models', multi=F)", sep="")) models.file <- normalizePath(models.file, mustWork=F) if (file.exists(models.file) == T) { assign("ensmodels.file", models.file, envir=.GlobalEnv) load(ensmodels.file) assign("focal.ensemble.object", ensemble.models, envir=.GlobalEnv) logger(paste("Focal ensemble models (object focal.ensemble.object) loaded from: ", models.file, sep="")) } Rcmdr::putRcmdr("dialog.values", list()) activateMenus() } eval.strip.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Evaluation strips") update.stacklist() s.variables <- stack.list stackFrame <- tkframe(top, relief="groove", borderwidth=2) stackBox <- tklistbox(stackFrame, width=65, height=8, selectmode="single", background="white", exportselection="FALSE") stackScroll <- tkscrollbar(stackFrame, repeatinterval=5, command=function(...) tkyview(stackBox, ...)) tkconfigure(stackBox, yscrollcommand=function(...) tkset(stackScroll, ...)) for (x in s.variables) tkinsert(stackBox, "end", x) mvariables <- eval(parse(text=paste("names(focal.ensemble.object$output.weights)[focal.ensemble.object$output.weights>0]", sep="")), envir=.GlobalEnv) mvariables <- c("ENSEMBLE", mvariables) modelFrame <- tkframe(top, relief="groove", borderwidth=2) modelBox <- tklistbox(modelFrame, width=65, height=8, selectmode="single", background="white", exportselection="FALSE") modelScroll <- tkscrollbar(modelFrame, repeatinterval=5, command=function(...) tkyview(modelBox, ...)) tkconfigure(modelBox, yscrollcommand=function(...) tkset(modelScroll, ...)) for (x in mvariables) tkinsert(modelBox, "end", x) vvariables <- eval(parse(text=paste("focal.ensemble.object$var.names", sep="")), envir=.GlobalEnv) variableFrame <- tkframe(top, relief="groove", borderwidth=2) variableBox <- tklistbox(variableFrame, width=65, height=8, selectmode="single", background="white", exportselection="FALSE") variableScroll <- tkscrollbar(variableFrame, repeatinterval=5, command=function(...) tkyview(variableBox, ...)) tkconfigure(variableBox, yscrollcommand=function(...) tkset(variableScroll, ...)) for (x in vvariables) tkinsert(variableBox, "end", x) onOK <- function(){ stackValue <- s.variables[as.numeric(tkcurselection(stackBox))+1] command <- paste("evaluation.strip.data(xn=", stackValue, ", models.list=focal.ensemble.object, steps=200)", sep="") logger(paste("strip.data <- ", command, sep="")) assign("strip.data", justDoIt(command), envir=.GlobalEnv) } onplot <- function(){ model.focal <- mvariables[as.numeric(tkcurselection(modelBox))+1] doItAndPrint(paste("evaluation.strip.plot(data=strip.data$plot.data, TrainData=strip.data$TrainData, model.focal='", model.focal, "', col='red')", sep="")) } onplot2 <- function(){ variable.focal <- vvariables[as.numeric(tkcurselection(variableBox))+1] doItAndPrint(paste("evaluation.strip.plot(data=strip.data$plot.data, TrainData=strip.data$TrainData, variable.focal='", variable.focal, "', col='red')", sep="")) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('evaluation.strip.data', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="prepare data", width="15", command=onOK, default="active") plotbutton <- tkbutton(buttonsFrame, text="plot model", width="12", command=onplot, default="active") plot2button <- tkbutton(buttonsFrame, text="plot variable", width="12", command=onplot2, default="active") tkgrid(tklabel(stackFrame, text="Select stack to prepare data"), sticky="w") tkgrid(stackBox, stackScroll, sticky="w") tkgrid(stackFrame, sticky="w") tkgrid(tklabel(modelFrame, text="Select model to plot"), sticky="w") tkgrid(modelBox, modelScroll, sticky="w") tkgrid(modelFrame, sticky="w") tkgrid(tklabel(variableFrame, text="Select variable to plot"), sticky="w") tkgrid(variableBox, variableScroll, sticky="w") tkgrid(variableFrame, sticky="w") tkgrid(OKbutton, plotbutton, plot2button, tklabel(buttonsFrame, text=" "), helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(variableScroll, sticky="ns") tkgrid.configure(modelScroll, sticky="ns") tkgrid.configure(stackScroll, sticky="ns") tkselection.set(variableBox, 0) tkselection.set(modelBox, 0) tkselection.set(stackBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(modelBox) tkwait.window(top) } ens.predict.GUI <- function(){ top <- tktoplevel() tkwm.title(top, "Predict ensemble models") update.stacklist() s.variables <- stack.list stackFrame <- tkframe(top, relief="groove", borderwidth=2) stackBox <- tklistbox(stackFrame, width=50, height=8, selectmode="single", background="white", exportselection="FALSE") stackScroll <- tkscrollbar(stackFrame, repeatinterval=5, command=function(...) tkyview(stackBox, ...)) tkconfigure(stackBox, yscrollcommand=function(...) tkset(stackScroll, ...)) for (x in s.variables) tkinsert(stackBox, "end", x) onOK <- function(){ stackValue <- s.variables[as.numeric(tkcurselection(stackBox))+1] doItAndPrint(paste("ensemble.raster(xn=", stackValue, ", models.list=focal.ensemble.object, RASTER.species.name = focal.ensemble.object$species.name)", sep="")) } onCancel <- function() { tkgrab.release(top) tkfocus(CommanderWindow()) tkdestroy(top) } buttonsFrame <- tkframe(top) onHelp <- function() { if (.Platform$OS.type != "windows") tkgrab.release(top) doItAndPrint(paste("help('ensemble.raster', help_type='html')", sep="")) } helpButton <- tkbutton(buttonsFrame, text="Help", width="12", command=onHelp) OKbutton <- tkbutton(buttonsFrame, text="OK", width="15", command=onOK, default="active") cancelButton <- tkbutton(buttonsFrame, text="Cancel", width="12", command=onCancel) tkgrid(tklabel(stackFrame, text="Select stack to predict suitabilities"), sticky="w") tkgrid(stackBox, stackScroll, sticky="w") tkgrid(stackFrame, sticky="w") tkgrid(OKbutton, cancelButton, helpButton) tkgrid(buttonsFrame, sticky="w") tkgrid.configure(stackScroll, sticky="ns") tkselection.set(stackBox, 0) for (row in 0:6) tkgrid.rowconfigure(top, row, weight=0) for (col in 0:0) tkgrid.columnconfigure(top, col, weight=0) .Tcl("update idletasks") tkwm.resizable(top, 0, 0) tkwm.deiconify(top) tkgrab.set(top) tkfocus(stackBox) tkwait.window(top) }
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/inst/etc/BiodiversityGUI.R
<!-- R Commander Markdown Template --> Replace with Main Title ======================= ### Your Name ### `r as.character(Sys.Date())` ```{r echo=FALSE} # include this code chunk as-is to set options knitr::opts_chunk$set(comment=NA, prompt=TRUE, out.width=750, fig.height=8, fig.width=8) library(Rcmdr) library(car) library(RcmdrMisc) ```
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/inst/etc/Rcmdr-Markdown-Template.Rmd
--- title: "Replace with Main Title" author: "Your Name" date: "`r Sys.Date()`" # Uses current date --- <!-- You can edit this R Markdown document, for example to explain what you're doing and to draw conclusions from your data analysis. Auto-generated section titles, typically preceded by ###, can also be edited. It's generally not a good idea to edit the R code that the R Commander writes, but you can freely edit between (not within) R "code blocks." Each R code block starts with ```{r} and ends with ```. --> ```{r echo=FALSE, message=FALSE} # include this code chunk as-is to set options knitr::opts_chunk$set(comment=NA, prompt=TRUE) library(Rcmdr) library(car) library(RcmdrMisc) ```
/scratch/gouwar.j/cran-all/cranData/BiodiversityR/inst/etc/Rcmdr-RMarkdown-Template.Rmd
#' \code{Bioi} package #' #' PALM/iPALM localization and biological image analysis functions. #' #' @docType package #' @name Bioi #' @useDynLib Bioi, .registration = TRUE NULL
/scratch/gouwar.j/cran-all/cranData/Bioi/R/Bioi.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title Return the group number for each localization. #' #' @description #' Group PALM/iPALM localizations based on their physical separation distance #' #' PALM/iPALM data results in a list of spatial coordinates for fluorophore #' localizations. This function groups nearby localizations if they are within #' the provided critical distance from each other. #' #' @param input A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param critDist The critical distance for which localizations nearer than #' this distance are deemed part of the same group. #' @param use_prog_bar A logical indicating whether a progress bar should be #' used. This must be set to false when running in parallel. #' #' @author Zach Colburn #' #' @examples #' # Function call #' \dontrun{.euclidean_linker_cpp(inputMatrix, critDist)} #' #' @import Rcpp #' #' @useDynLib Bioi, .registration = TRUE .euclidean_linker_cpp <- function(input, critDist, use_prog_bar = TRUE) { .Call(`_Bioi_euclidean_linker_cpp`, input, critDist, use_prog_bar) } #' @title For all points in matrix 1, return the distance to and index of the #' nearest point in matrix 2. #' #' @description #' Find the shortest distance between each point in one data set and the points #' in a second set. #' #' This function determines the distance between every point in data set 1 and #' the points in data set 2. Unlike this function's naive counterpart, #' find_min_dists, this function divides the PALM/iPALM localization data #' into blocks, operates on the data in each block, and then performs linking #' operations on neighboring blocks. #' #' @param mOne A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param mTwo A numeric matrix with the same number of columns as mOne. #' #' @author Zach Colburn #' #' @examples #' \dontrun{ #' set.seed(10) #' #' mOne <- as.matrix(data.frame( #' x = rnorm(10), #' y = rbinom(10, 100, 0.5), #' z = runif(10) #' )) #' #' mTwo <- as.matrix(data.frame( #' x = rnorm(20), #' y = rbinom(20, 100, 0.5), #' z = runif(20) #' )) #' #' .find_min_dists_cpp(mOne, mTwo) #'} #' #' @import Rcpp #' #' @useDynLib Bioi, .registration = TRUE .find_min_dists_cpp <- function(mOne, mTwo) { .Call(`_Bioi_find_min_dists_cpp`, mOne, mTwo) }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/RcppExports.R
#' Group PALM/iPALM localizations based on their physical separation distance #' #' PALM/iPALM data results in a list of spatial coordinates for fluorophore #' localizations. This function groups nearby localizations if they are within #' the provided critical distance from each other. #' #' @title Return the group number for each localization. #' #' @param input A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param critDist The critical distance for which localizations nearer than #' this distance are deemed part of the same group. #' @param use_prog_bar TRUE/FALSE indicating whether a progress bar should be #' used. This is only available when run_parallel is FALSE. #' @param run_parallel TRUE/FALSE indicating whether operations should be #' performed in parallel. This is only valid if partitioning is performed. #' @param num_cores The number of cores to use if running in parallel. #' @param partition_req The minimum number of points required to create a new #' partition. #' @param parallel_call_depth The number of levels of partitioning that should #' be performed before terminating calls to run operations in parallel. The #' number of threads opened when running in parallel is equal to #' 2^(parallel_call_depth)*num_cores. #' @param ... Additional parameters passed to euclidean_linker (i.e. #' finding_blobs). #' #' @author Zach Colburn #' #' @examples #' # Generate random data. #' #set.seed(10) #' #input <- as.matrix(data.frame(x=rnorm(10),y=rnorm(10))) #' #' # Perform linking. #' #euclidean_linker(input, 0.4) #' #' @export #' #' @importFrom assertthat assert_that is.number is.flag #' @importFrom parallel parLapply makeCluster detectCores euclidean_linker <- function( input, critDist, use_prog_bar = TRUE, run_parallel = FALSE, num_cores = NULL, partition_req = 5000, parallel_call_depth = 3, ... ) { assert_that(class(input)[1] == "matrix") assert_that(class(input[1])[1] %in% c("integer", "numeric")) assert_that(nrow(input) >= 1) assert_that(nrow(input) < 2147483646)# The C++ max limit for an int minus 1. assert_that(ncol(input) > 0) assert_that(is.number(critDist)) assert_that(critDist > 0) assert_that(is.flag(use_prog_bar)) assert_that(is.flag(run_parallel)) assert_that( is.number(partition_req) && (partition_req >= 100) && (as.integer(partition_req) == partition_req) ) assert_that( is.null(num_cores) || is.number(num_cores) ) if(run_parallel){use_prog_bar <- FALSE;} if( is.null(num_cores) && run_parallel ){ num_cores <- detectCores() }else if(run_parallel){ assert_that( (as.integer(num_cores) == num_cores) && (num_cores >= 2) && (num_cores <= detectCores()) ) } finding_blobs <- FALSE if("find_blobs" %in% names(match.call())){ assert_that(is.flag(match.call()[["find_blobs"]])) finding_blobs <- match.call()[["find_blobs"]] } if(finding_blobs){ min_gap <- 6 }else{ min_gap <- 6*critDist } # Return 1 if there is only a single input point. if(nrow(input) == 1){ return(1) } # If partitioning: if(nrow(input) >= partition_req){ # This value is used as an identifier of ungrouped points. It is used # in ".euclidean_linker_cpp" and ".perform_grouping" as well. The value # should not be changed. no_group <- -1 groups <- .perform_partitioning( input, critDist = critDist, use_prog_bar = FALSE, run_parallel = run_parallel, num_cores = num_cores, partition_req = partition_req, parallel_call_depth = parallel_call_depth, min_gap = min_gap ) output <- as.numeric(factor(groups)) # Ensure memory from any parallelized threads is retrieved. gc() return(output) } # If not partitioning: output <- .perform_grouping( input, critDist, use_prog_bar = use_prog_bar ) # Return the output. output }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/euclidean_linker.R
#' @title Assign all neighboring pixels the same group number. #' #' @description #' Perform connected-component labeling to group continuous, thresholded #' objects in 3-dimensional arrays. #' #' This function takes a vector, matrix, or 3-dimensional array where each #' element is TRUE if it corresponds to an object-positive index or FALSE if it #' corresponds to a background index. An object of the same dimension as the #' input is returned. All connected object indices take the value of their #' group number and all background indices take the value NA. #' #' @param arr A vector, matrix, or 3-dimensional array where object-positive #' elements are denoted by the value TRUE and background elements are denoted #' by the value FALSE. #' @param use_prog_bar TRUE/FALSE indicating whether a progress bar should be #' used. This is only available when run_parallel is FALSE. #' @param run_parallel TRUE/FALSE indicating whether operations should be #' performed in parallel. This is only valid if partitioning is performed. #' @param num_cores The number of cores to use if running in parallel. #' @param partition_req The minimum number of points required to create a new #' partition. #' @param parallel_call_depth The number of levels of partitioning that should #' be performed before terminating calls to run operations in parallel. The #' number of threads opened when running in parallel is equal to #' 2^(parallel_call_depth)*num_cores. #' #' @author Zach Colburn #' #' @examples #' # Generate a random matrix. #' set.seed(10) #' mat <- matrix(runif(70), nrow = 7) #' #' # Arbitrarily say that everything below 0.8 is background. #' logical_mat <- mat > 0.8 #' #' # Find blobs. #' find_blobs(logical_mat) #' #' @export #' #' @importFrom assertthat assert_that noNA is.number find_blobs <- function( arr, use_prog_bar = TRUE, run_parallel = FALSE, num_cores = NULL, partition_req = NULL, parallel_call_depth = 3 ) { # Perform type checking. assert_that(is.vector(arr) || is.matrix(arr) || is.array(arr)) assert_that(length(arr) >= 1) assert_that(class(arr[1])[1] == "logical") assert_that(noNA(arr)) if(is.null(partition_req)){partition_req <- 1000000} assert_that( is.number(partition_req) && (as.integer(partition_req) == partition_req) && (partition_req > 0) ) # Get object class and attributes. initial_class <- class(arr)[1] arr_attributes <- attributes(arr) # Convert arr to an array and store object indices in "input". arr <- as.array(arr) input <- as.matrix(which(arr, arr.ind = TRUE)) # Initialize the output object. output <- array(NA, dim = dim(arr), dimnames = dimnames(arr)) # If there are no object indices then return output without performing # any more operations. if(nrow(input) == 0){ print("There are no objects to link!") return(output) } # Use the euclidean_linker_cpp function to link neighboring object indices. # Since the critical distance is sqrt(3), every neighboring object index # (both horizontally/vertically and diagonally) will be joined in 1, 2, or 3 # dimensions. "links" is a vector of group numbers. links <- euclidean_linker( input, critDist = sqrt(3), use_prog_bar = use_prog_bar, run_parallel = run_parallel, num_cores = num_cores, partition_req = partition_req, parallel_call_depth = parallel_call_depth, find_blobs = TRUE ) # Assign indices in the output object their respective group number. output[input] <- links # Convert the output object to the class of the original arr object. if(initial_class == "logical"){ output <- as.vector(output) }else if(initial_class == "matrix"){ output <- as.matrix(output) } # Restore the object's attributes. This is mainly for restoring vector # element names and matrix row/column names). attributes(output) <- arr_attributes # Return the output object. return(output) } #' @title Assign all neighboring pixels the same group number. #' #' @description #' This function is deprecated. It now calls the more efficient find_blobs #' method. #' #' This function takes a matrix corresponding to a thresholded image and #' returns a matrix of the same size, where all adjacent, thresholded pixels #' are the same integer corresponding to that object's cluster number. #' #' @param img A thresholded matrix (where non-object pixels are assigned a #' value of 0). #' @param pixRange This parameter is now obsolete. Previously, the parameter #' denoted an integer number of pixels to specify a search region. Execution #' was faster when this value was small. However, the value needed to be larger #' than the diameter of the largest continuous object in the image. #' #' @author Zach Colburn #' #' @examples #' # Generate a random matrix. #' set.seed(10) #' mat <- matrix(runif(70), nrow = 7) #' #' # Arbitrarily say that everything below 0.8 is background. #' mat[mat < 0.8] <- 0 #' #' # Find blobs. #' identify_thresholded_objects(mat) #' #' @export #' #' @importFrom assertthat assert_that is.number identify_thresholded_objects <- function(img, pixRange = 50){ # Perform type checking. This function was meant to receive different inputs # than the function find_blobs which replaces it. Type checking is performed # to ensure object inputs are backwards compatible. assert_that(class(img)[1] == "matrix") assert_that(length(img) >= 1) assert_that(class(img[1]) %in% c("integer", "numeric")) assert_that(nrow(img) >= 1) assert_that(ncol(img) > 0) # If pixRange has changed, then inform the user that its use is deprecated. assert_that(is.number(pixRange)) if(pixRange != 50){ print("The use of pixRange is deprecated.") } # Convert the input to a logical matrix. img <- img != 0 # Perform connected component labeling. find_blobs(img) }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/find_blobs.R
#' @title For all points in matrix 1, return the distance to and index of the #' nearest point in matrix 2. #' #' @description #' Find the shortest distance between each point in one data set and the points #' in a second set. #' #' This function determines the distance between every point in mOne and the #' nearest point in mTwo. #' #' @param mOne A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param mTwo A numeric matrix with the same number of columns as mOne. #' #' @author Zach Colburn #' #' @examples #' # Generate random data. #' set.seed(10) #' #' mOne <- as.matrix(data.frame( #' x = rnorm(10), #' y = rbinom(10, 100, 0.5), #' z = runif(10) #' )) #' #' mTwo <- as.matrix(data.frame( #' x = rnorm(20), #' y = rbinom(20, 100, 0.5), #' z = runif(20) #' )) #' #' # Find the minimum distance between each point in mOne and the points in #' # mTwo. #' find_min_dists(mOne, mTwo) #' #' @export #' #' @importFrom assertthat assert_that find_min_dists <- function(mOne, mTwo) { # Perform type checking. assert_that(class(mOne)[1] == "matrix") assert_that(length(mOne) >= 1) assert_that(class(mOne[1]) %in% c("integer", "numeric")) assert_that(nrow(mOne) >= 1) assert_that(ncol(mOne) > 0) assert_that(class(mTwo)[1] == "matrix") assert_that(length(mTwo) >= 1) assert_that(class(mTwo[1]) %in% c("integer", "numeric")) assert_that(nrow(mTwo) >= 1) assert_that(ncol(mTwo) > 0) # Find the minimum distance between each point in mOne and the points in mTwo. .find_min_dists_cpp(mOne, mTwo) }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/find_min_dists.R
#' Group PALM/iPALM localizations based on their physical separation distance #' #' PALM/iPALM data results in a list of spatial coordinates for fluorophore #' localizations. This function groups nearby localizations if they are within #' the provided critical distance from each other. #' #' @title Return the group number for each localization. #' #' @param input A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param critDist The critical distance for which localizations nearer than #' this distance are deemed part of the same group. #' @param use_prog_bar TRUE/FALSE indicating whether a progress bar should be #' used. This is only available when run_parallel is FALSE. #' #' @author Zach Colburn #' #' @export #' #' @importFrom assertthat assert_that #' @importFrom stats var .perform_grouping <- function( input, critDist, use_prog_bar = TRUE ) { # Complete type checking was performed at the start of "euclidean_linker" # which calls this function. As such, only critical type checks are # performed. This is meant mainly to ensure that data passed from the # function ".perform_partitioning" which is called by "euclidean_linker" is # in the correct format. assert_that(class(input)[1] == "matrix") assert_that(class(input[1]) %in% c("integer", "numeric")) assert_that(nrow(input) >= 1) # Return 1 if there is only a single input point. if(nrow(input) == 1){ return(1) } # Reorder the columns such that the first column accounts for the most # variance in position and the last column accounts for the least variance in # position. column_variances <- apply(input, MARGIN = 2, FUN = var) new_column_order <- order(column_variances, decreasing = TRUE) input <- input[, new_column_order, drop = FALSE] # Order elements in input. ordered_elements <- do.call(order, as.data.frame(input)) # Arrange the rows in input based on the ordering performed above. input <- input[ordered_elements,,drop = FALSE] # Identify groups. groups <- .euclidean_linker_cpp( input, critDist, use_prog_bar = use_prog_bar ) # Change the order of groups in the output to undo the sorting performed # above. output <- vector(mode = "numeric", length = length(groups)) output[ordered_elements] <- groups # Change group numbers such that they range from 1 to number of groups. output <- as.numeric(factor(output)) # Return the output. output }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/perform_grouping.R
#' Group PALM/iPALM localizations based on their physical separation distance #' #' PALM/iPALM data results in a list of spatial coordinates for fluorophore #' localizations. This function groups nearby localizations if they are within #' the provided critical distance from each other. #' #' @title Return the group number for each localization. #' #' @param input A numeric matrix where each row is a localization and each #' column is a spatial axis. #' @param critDist The critical distance for which localizations nearer than #' this distance are deemed part of the same group. #' @param use_prog_bar TRUE/FALSE indicating whether a progress bar should be #' used. This is only available when run_parallel is FALSE. #' @param run_parallel TRUE/FALSE indicating whether operations should be #' performed in parallel. This is only valid if partitioning is performed. #' @param num_cores The number of cores to use if running in parallel. #' @param partition_req The minimum number of points required to create a new #' partition. #' @param parallel_call_depth The number of levels of partitioning that should #' be performed before terminating calls to run operations in parallel. The #' number of threads opened when running in parallel is equal to #' 2^(parallel_call_depth)*num_cores. #' @param min_gap The minimum width of any dimension created during #' partitioning. #' #' @author Zach Colburn #' #' @export #' #' @importFrom assertthat assert_that is.number is.flag #' @importFrom parallel parLapply makeCluster detectCores #' @importFrom igraph components graph.data.frame #' @importFrom dplyr mutate_all .perform_partitioning <- function( input, critDist, use_prog_bar = TRUE, run_parallel = FALSE, num_cores = NULL, partition_req = 5000, parallel_call_depth = 3, min_gap = NULL ) { # Perform essential type checking. assert_that((class(input)[1] == "matrix")) assert_that(is.number(min_gap)) # Define the ungrouped value. Don't change this. no_group <- -1 # Check whether parallel_call_depth is 1. If so set run_parallel to FALSE. if(parallel_call_depth == 1){ run_parallel <- FALSE } # Handle the case of only a single input point. if(nrow(input) == 1){ return(1) } # Specify partitions. use_prog_bar <- FALSE ranges <- apply(input, 2, function(item){max(item) - min(item)}) dim_to_split <- which.max(ranges) # If the partition is too small, then run euclidean linker on this data set # without performing partitioning, regardless of how many points are in the # partition. Also, if the number of points in this partition is less than the # partition requirement, then perform grouping. if( (min(ranges) <= min_gap) || (nrow(input) <= partition_req) ){ groups <- .perform_grouping( input, critDist, use_prog_bar = use_prog_bar ) return(groups) } # Determine partitioning bounds. max_pos <- max(input[,dim_to_split]) min_pos <- min(input[,dim_to_split]) mid_pos <- min_pos + (max_pos - min_pos)/2 # Assign points to partitions and stem group. partition <- rep(1, nrow(input)) partition[input[,dim_to_split] > mid_pos] <- 2 stem <- (input[,dim_to_split] >= (mid_pos - min_gap/2)) & (input[,dim_to_split] < (mid_pos + min_gap/2)) # If the stem and both partitions don't all have points then group now. if(!( (sum(stem) >= 1) && (sum(partition == 1) >= 1) && (sum(partition == 2) >= 1) )){ groups <- .perform_grouping( input, critDist, use_prog_bar = use_prog_bar ) groups <- as.numeric(factor(groups)) return(groups) } # Perform grouping. if(!run_parallel){ groups <- lapply( list( input[partition == 1,, drop = FALSE], input[partition == 2,, drop = FALSE], input[stem,, drop = FALSE] ), function(item){ .perform_partitioning( item, critDist, use_prog_bar = FALSE, run_parallel = run_parallel, num_cores = NULL, partition_req = partition_req, min_gap = min_gap ) } ) }else{ # Perform operations in parallel. groups <- parLapply( makeCluster(num_cores), list( input[partition == 1,, drop = FALSE], input[partition == 2,, drop = FALSE], input[stem,, drop = FALSE] ), function( item, critDist, run_parallel, num_cores, partition_req, parallel_call_depth, .perform_partitioning, min_gap ){ .perform_partitioning( item, critDist, use_prog_bar = FALSE, run_parallel = run_parallel, num_cores = num_cores, partition_req = partition_req, parallel_call_depth = parallel_call_depth - 1, min_gap = min_gap ) }, critDist, run_parallel, num_cores, partition_req, parallel_call_depth, .perform_partitioning, min_gap ) } # Shift up par 2 group numbers so they don't overlap with par 1 group # numbers. groups[[2]] <- groups[[2]] + max(groups[[1]]) # Create a matrix of these values. mat <- matrix(no_group, nrow = length(partition), ncol = 3) mat[partition == 1, 1] <- groups[[1]] mat[partition == 2, 2] <- groups[[2]] mat[stem, 3] <- groups[[3]] # Set the values in the stem column to be larger than the values in the other # columns. mat[mat[,3] != -1, 3] <- mat[mat[,3] != -1, 3] + max(mat[, c(1,2)]) # If there are no points in the stem, then retrieve the final group numbers. if(length(unique(mat[mat[,3] != no_group, 3])) == 0){ mat <- mat[, 1:2, drop = FALSE] groups <- t(mat) groups <- groups[groups != no_group] groups <- as.numeric(factor(groups)) return(groups) } # Structure incoming data. colnames(mat) <- c("pOne", "pTwo", "stem") mat <- as.data.frame(mat) # Link pOne to stem and update pTwo accordingly. ## Select pOne and stem. pOne <- mat[,c("pOne", "stem"), drop = FALSE] ## Determine the largest group number. maxGroup <- max(apply(mat, 2, max)) ## Create a data.frame of the links to be made. pOne_dict <- pOne[ (pOne[,"pOne"] != no_group) & (pOne[,"stem"] != no_group), , drop = FALSE ] ## Convert those values to factors. pOne_dict <- mutate_all(pOne_dict, as.factor) ## Perform linking. members <- components( graph.data.frame( pOne_dict, directed = FALSE ), mode = "weak" )$membership ## Convert mat to a matrix in order then update all group numbers accordingly. mat <- as.matrix(mat) logicalVector <- (mat != no_group) & (as.character(mat) %in% names(members)) mat[logicalVector] <- members[as.character(mat[logicalVector])] + maxGroup mat <- as.data.frame(mat) # Link pOne to stem and update pTwo accordingly. ## Select pOne and stem. pTwo <- mat[,c("pTwo", "stem"), drop = FALSE] ## Determine the largest group number. maxGroup <- max(apply(mat, 2, max)) ## Create a data.frame of the links to be made. pTwo_dict <- pTwo[ (pTwo[,"pTwo"] != no_group) & (pTwo[,"stem"] != no_group), , drop = FALSE ] ## Convert those values to factors. pTwo_dict <- mutate_all(pTwo_dict, as.factor) ## Perform linking. members <- components( graph.data.frame( pTwo_dict, directed = FALSE ), mode = "weak" )$membership ## Convert mat to a matrix then update all group numbers accordingly. mat <- as.matrix(mat) logicalVector <- (mat != no_group) & (as.character(mat) %in% names(members)) mat[logicalVector] <- members[as.character(mat[logicalVector])] + maxGroup # Select the pOne and pTwo columns mat <- mat[,c("pOne", "pTwo"), drop = FALSE] # Transpose the matrix and then remove all no_group values. mat <- t(mat) groups <- mat[mat != no_group] gc() as.numeric(factor(groups)) }
/scratch/gouwar.j/cran-all/cranData/Bioi/R/perform_partitioning.R
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- library(Bioi) # Generate two data sets to simulate 3D PALM data. set.seed(10) mOne <- as.matrix(data.frame( x = rnorm(10), y = rbinom(10, 100, 0.5), z = runif(10) )) mTwo <- as.matrix(data.frame( x = rnorm(20), y = rbinom(20, 100, 0.5), z = runif(20) )) # Get separation distances. find_min_dists(mOne, mTwo) ## ----------------------------------------------------------------------------- # Generate random data. set.seed(10) input <- as.matrix(data.frame(x=rnorm(10),y=rnorm(10))) # Perform clustering. groups <- euclidean_linker(input, 0.8) print(groups) ## ----eval=FALSE--------------------------------------------------------------- # library(ggplot2) # input <- as.data.frame(input) # input$group <- as.character(groups) # ggplot(input, aes(x, y, colour = group)) + geom_point(size = 3) ## ----------------------------------------------------------------------------- # Generate a random matrix. set.seed(10) mat <- matrix(runif(70), nrow = 7) # Arbitrarily say that everything below 0.8 is background. logical_mat <- mat > 0.8 # Row names and column names are preserved in the output of find_blobs rownames(logical_mat) <- letters[1:7] colnames(logical_mat) <- 1:10 # Find blobs find_blobs(logical_mat)
/scratch/gouwar.j/cran-all/cranData/Bioi/inst/doc/Bioi.R
--- title: "Bioi" author: "Zachary Colburn" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Bioi} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Bioi provides functions to perform connected component labeling on 1, 2, or 3 dimensional arrays, single linkage clustering, and identification of the nearest neighboring point in a second data set. # Determine the minimum separation distance between two sets of points Dual channel PALM or iPALM data can represent the localizations of two distinct fluorophore-labeled proteins. To determine the minimum distance separating each localization of protein A from protein B, the following code could be implemented. Each row in the output of find_min_dists corresponds to the point denoted by the same row in mOne. "dist" is the distance to the nearest point in mTwo, whose row number in mTwo is given in the "index" column. ```{r} library(Bioi) # Generate two data sets to simulate 3D PALM data. set.seed(10) mOne <- as.matrix(data.frame( x = rnorm(10), y = rbinom(10, 100, 0.5), z = runif(10) )) mTwo <- as.matrix(data.frame( x = rnorm(20), y = rbinom(20, 100, 0.5), z = runif(20) )) # Get separation distances. find_min_dists(mOne, mTwo) ``` # Single linkage clustering PALM/iPALM localizations in very close proximity may belong to the same superstructure, for example a single focal adhesion. By linking localizations separated by less than some empirically determined distance, these different super structures can be identified. Below, 2D PALM data is simulated and all points falling within a distance of 0.8 are linked. ```{r} # Generate random data. set.seed(10) input <- as.matrix(data.frame(x=rnorm(10),y=rnorm(10))) # Perform clustering. groups <- euclidean_linker(input, 0.8) print(groups) ``` Clusters can be easily visualized using ggplot2. ```{r,eval=FALSE} library(ggplot2) input <- as.data.frame(input) input$group <- as.character(groups) ggplot(input, aes(x, y, colour = group)) + geom_point(size = 3) ``` # Label connected components (i.e. find contiguous blobs) Following background subtraction and thresholding, distinct cellular structures (focal adhesions for example) can be identified in fluorescent microscopy images. Array elements that are connected horizontally/vertically or diagonally are labeled with the same group number. Each contiguous object is labeled with a different group number. The function find_blobs can be used to implement this functionality in 1, 2, or 3 dimensions. Below, this operations is performed on a matrix structure. ```{r} # Generate a random matrix. set.seed(10) mat <- matrix(runif(70), nrow = 7) # Arbitrarily say that everything below 0.8 is background. logical_mat <- mat > 0.8 # Row names and column names are preserved in the output of find_blobs rownames(logical_mat) <- letters[1:7] colnames(logical_mat) <- 1:10 # Find blobs find_blobs(logical_mat) ```
/scratch/gouwar.j/cran-all/cranData/Bioi/inst/doc/Bioi.Rmd
--- title: "Bioi" author: "Zachary Colburn" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Bioi} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Bioi provides functions to perform connected component labeling on 1, 2, or 3 dimensional arrays, single linkage clustering, and identification of the nearest neighboring point in a second data set. # Determine the minimum separation distance between two sets of points Dual channel PALM or iPALM data can represent the localizations of two distinct fluorophore-labeled proteins. To determine the minimum distance separating each localization of protein A from protein B, the following code could be implemented. Each row in the output of find_min_dists corresponds to the point denoted by the same row in mOne. "dist" is the distance to the nearest point in mTwo, whose row number in mTwo is given in the "index" column. ```{r} library(Bioi) # Generate two data sets to simulate 3D PALM data. set.seed(10) mOne <- as.matrix(data.frame( x = rnorm(10), y = rbinom(10, 100, 0.5), z = runif(10) )) mTwo <- as.matrix(data.frame( x = rnorm(20), y = rbinom(20, 100, 0.5), z = runif(20) )) # Get separation distances. find_min_dists(mOne, mTwo) ``` # Single linkage clustering PALM/iPALM localizations in very close proximity may belong to the same superstructure, for example a single focal adhesion. By linking localizations separated by less than some empirically determined distance, these different super structures can be identified. Below, 2D PALM data is simulated and all points falling within a distance of 0.8 are linked. ```{r} # Generate random data. set.seed(10) input <- as.matrix(data.frame(x=rnorm(10),y=rnorm(10))) # Perform clustering. groups <- euclidean_linker(input, 0.8) print(groups) ``` Clusters can be easily visualized using ggplot2. ```{r,eval=FALSE} library(ggplot2) input <- as.data.frame(input) input$group <- as.character(groups) ggplot(input, aes(x, y, colour = group)) + geom_point(size = 3) ``` # Label connected components (i.e. find contiguous blobs) Following background subtraction and thresholding, distinct cellular structures (focal adhesions for example) can be identified in fluorescent microscopy images. Array elements that are connected horizontally/vertically or diagonally are labeled with the same group number. Each contiguous object is labeled with a different group number. The function find_blobs can be used to implement this functionality in 1, 2, or 3 dimensions. Below, this operations is performed on a matrix structure. ```{r} # Generate a random matrix. set.seed(10) mat <- matrix(runif(70), nrow = 7) # Arbitrarily say that everything below 0.8 is background. logical_mat <- mat > 0.8 # Row names and column names are preserved in the output of find_blobs rownames(logical_mat) <- letters[1:7] colnames(logical_mat) <- 1:10 # Find blobs find_blobs(logical_mat) ```
/scratch/gouwar.j/cran-all/cranData/Bioi/vignettes/Bioi.Rmd
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ angle2rotamer <- function(dynamic_structure, conversion_file=system.file("rotamer/dynameomics_rotamers.csv", package= "Bios2cor")){ if (missing(dynamic_structure)){ stop("A 'dynamic_structure' object is required") } #Importing rotamer information angles <- read.table(conversion_file, header= TRUE, fill= TRUE, sep= ",") #Importing torsional data from dynamic_structure object pdb <- dynamic_structure$pdb trj <- dynamic_structure$trj ca.inds <- dynamic_structure$ca.inds xyz <- dynamic_structure$xyz tor <- dynamic_structure$tor nb_torsions <- dynamic_structure$nb_torsions nb_frames <- dynamic_structure$nb_frames prot.seq <- dynamic_structure$prot.seq tor.names <- dynamic_structure$tor.names tor.resno <- dynamic_structure$tor.resno tor.angle <- dynamic_structure$tor.angle tor.seq <- dynamic_structure$tor.seq converted_angles <- matrix("", ncol= nb_torsions, nrow= nb_frames) colnames(converted_angles) <- tor.names #converting dihedral angles into rotamers for(i in 1:nb_frames){ for(j in 1:nb_torsions){ residue <- tor.seq[j] dihedral <- tor.angle[j] angle <- tor[i,j] #select the rotamers possible for the dihedral angle and residue under consideration selected_torsion <- as.matrix(angles[(angles[,1] == residue) & (angles[,2] == dihedral),]) rotamer_nb <- length(selected_torsion[,1]) #check if the selected torsion is contained in the conversion file if(rotamer_nb != 0){ for(r in 1:rotamer_nb){ ang_1 <- selected_torsion[r,4] ang_2 <- selected_torsion[r,5] if(ang_1 == "0-" | ang_1 == "0+") ang_1 <- "0" if(ang_2 == "0-" | ang_2 == "0+") ang_2 <- "0" min_angle <- as.numeric(ang_1) max_angle <- as.numeric(ang_2) special_interval <- FALSE #the interval is greater than 180 degrees if(min_angle > max_angle) special_interval <- TRUE if(special_interval){ if((min_angle <= angle & angle <= 180) | (-180 <= angle & angle <= max_angle)) converted_angles[i,j] <- selected_torsion[r,3] } else { if(min_angle <= angle & angle <= max_angle) converted_angles[i,j] <- selected_torsion[r,3] } } } else { cat(paste("dihedral angle not found in the conversion file \n")) cat(paste("residue = ", residue, "\n")) cat(paste("dihedral = ", dihedral, "\n")) } } } return (converted_angles) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/angle2rotamer.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # angles.plot <- function(dynamic_structure, angles=NULL, filepathroot=NULL){ if (missing(dynamic_structure)) stop("A 'dynamic_structure' object is required") if (is.null(angles)){ angles <- dynamic_structure$tor.names } if (is.null(filepathroot)) { final_filepath <- "ANGLES.pdf" final_csv <- "ANGLES.csv" } else { final_filepath <- paste(filepathroot, "_ANGLES.pdf",sep="") final_csv <- paste(filepathroot, "_ANGLES.csv",sep="") } #Importing torsional and angles data tor <- dynamic_structure$tor nb_frames <- length(tor[,1]) nb_angles <- length(angles) frames <- dynamic_structure$frames #Generating ordered plots nb_max_figures <- 35 nb_col <- 5 pdf(final_filepath) layout(matrix(1:nb_max_figures, ncol= nb_col, byrow= TRUE)) # plot up to 7 rows of 5 figures / page #Setting the margins of the graph in inches (mai) par(mai= c(0.25, 0.25, 0.2, 0.2)) #Alphanumerical graph ordering graph_order <- c(1:nb_angles) graph_order <- sort(unlist(as.integer(lapply(strsplit(angles, "\\."), "[[", 1))), index.return= TRUE)$ix for(i in graph_order){ angle_name <- angles[i] dihedral_angles <- tor[, angle_name] plot(x= 1:nb_frames, y= dihedral_angles, cex= 0.1, xlab= "frames", ylab="dihedral angles", main= angle_name) } dev.off() tor <- tor[,angles] write.csv(cbind(frames,round(tor, digits=2)), file = final_csv) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/angles.plot.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # centered_pca <- function(corr_matrix, filepathroot = NULL, filter = NULL, pc= NULL, dec_val= 5){ if(missing(corr_matrix) | is.null(corr_matrix)) { stop("A correlation matrix is required") } if(!is.matrix(corr_matrix)){ stop("The first argument must be a score or Zscore matrix.") } #Centering the initial matrix cor <- corr_matrix size <- length(corr_matrix[,1]) #Positions count if(is.null(pc)) { pc <- size print(paste("pc : ", size)) } #identity matrix I <- diag(1, size) #matrix of ones ONES <- matrix(1, nrow = size, ncol = 1) if(is.null(filter)) { print("No filter applied") #Without filter, the elements have the same mass/weight m <- matrix(1/size, nrow= size, ncol= 1) mat_names <- colnames(corr_matrix) # names of elements in corr_matrix } else { print("Filter applied") mat_names <- colnames(corr_matrix) #names of elements in corr_matrix #filter is limited to the elements present in corr_matrix (may differ in sequence analysis) m <- filter[mat_names] SUM <- sum(m) m <- m/SUM } #m vector of mass BigI<-I-(ONES%*%t(m)) #compute cross-product matrix S <- BigI %*% cor %*% t(BigI) #Diagonalizing the centered matrix res <- list() eigen <- eigen(S) res$eigen <- round(eigen$values[1:pc], 3) eigen.perc <- (abs(eigen$values) * 100) / sum(eigen$values[eigen$values>0]) res$eigen.perc <- round(eigen.perc[1:pc], 3) all_eigen_values <- eigen$values nb_eigenvalues <- length(all_eigen_values) #only positive eigenvalues are kept eigen$vectors <- eigen$vectors[, eigen$values > 0] eigen$values <- eigen$values[eigen$values > 0] nb_positiv_eigenvalues <- length(eigen$values) #Storing and plotting eigen values if(is.null(filepathroot)){ eigen_csv <- "EIGEN.csv" eigen_png <- "EIGEN.png" } else { eigen_csv <- paste(filepathroot, "_EIGEN.csv", sep="") eigen_png <- paste(filepathroot, "_EIGEN.png", sep="") } positiv_ev <- sum(eigen$values) perc_positiv_ev <- unlist(lapply(eigen$values, function(x){x*100/positiv_ev})) png(eigen_png) plot(1:nb_eigenvalues, all_eigen_values, main= basename(eigen_png)) abline(h= 0, lty= 2) dev.off() all_eigen_values <- round(all_eigen_values,digits = 3) perc_positiv_ev <- round(perc_positiv_ev, digits= 3) csv_tab <- data.frame("eigen$values"= all_eigen_values, "positiv_percent"= c(perc_positiv_ev, rep(0, nb_eigenvalues-nb_positiv_eigenvalues))) write.table(csv_tab, row.names= FALSE, file= eigen_csv) res$source<-list() res$source$cor <- cor res$source$m<-m #check principal components if (pc < 2) pc <- 3 if (pc > length(eigen$values)) pc <- length(eigen$values) #compute the matrix of factor scores F <- diag(as.vector(m)^(-0.5)) %*% eigen$vectors %*% diag(eigen$values^0.5) coord <- data.frame(F[, 1:pc]) rownames(coord) <- rownames(corr_matrix) colnames(coord) <- paste ("PC", (1:pc), sep = "") res$coord = round(coord, dec_val) class (res) <- c("pca") return (res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/centered_pca.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # delta_filter <- function(entropy, Smin= 0, Smax= 1){ nb_pos <- length(entropy) res_names <- names(entropy) res <- matrix(1/nb_pos, nrow= nb_pos, ncol= 1) delta <- unlist(lapply(1:nb_pos, function(pos){ if(entropy[pos] <= Smax & entropy[pos] >= Smin) 1 else 0 })) res <- delta names(res) <- res_names return (res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/delta_filter.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ dynamic_circular <- function(dynamic_structure, res_selection= c("C","I","L","M","V","R","H","K","D","E","N","Q","F","Y","W","T","S","P")){ if(missing(dynamic_structure)){ stop("A 'dynamic_structure' object is required") } # Importing torsional information tor <- dynamic_structure$tor nb_torsions <- length(tor[1,]) # Torsional angles sidechain.tor <- tor[, 1:nb_torsions] dihed_names <- colnames(sidechain.tor) # Transforms the torsion matrix in circular object (circular package) dihed <- circular(sidechain.tor, units="degrees", type="angles", modulo="2pi") # Computes circular correlation dihed_corr <- cor.circular(dihed) colnames(dihed_corr) <- dihed_names rownames(dihed_corr) <- dihed_names diag(dihed_corr) <- 0 # Squares because only absolute value of correlation matters dihed_corr <- dihed_corr^2 dihed_corr_names <- colnames(dihed_corr) if(!is.null(res_selection)){ # Position selection tor.seq <- dynamic_structure$tor.seq residue_selection.inds <- which(tor.seq %in% res_selection) dihed_corr <- dihed_corr[, residue_selection.inds] dihed_corr <- dihed_corr[residue_selection.inds, ] nb_angles <- length(dihed_corr[1,]) selected_dihed_names <- colnames(dihed_corr) } COV2 <- dihed_corr # Removing "Inf" and "NA" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() # Save matrix of scores res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV3 <- (COV2-mean_up)/stdev diag(COV3) <- 0 res$Zscore <- COV3 # Save matrices of score and Zscores without auto correlation # Create and save matrix with 0 values for autocorrelation (dihedral angles within the same residue) COV5 <- COV2 res_num <- c() res_num <- unlist(lapply(selected_dihed_names, function(x){strsplit(x, "[.]")[[1]][1]})) # Create matrix with NA that will be used for Zscores for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- NA COV5[j,i] <- NA } } } # Save matrix of score with 0 for autocorrelation COV4 <-COV5 COV4[is.na(COV4)] <- 0 res$score_noauto <- COV4 # Compute and save matrix of Z_scores without auto correlation # Mean and stdev are calculated on non NA elements of upper triangle mean_noauto <- mean(COV5[upper.tri(COV5)],na.rm=TRUE) stdev_noauto <- sd(COV5[upper.tri(COV5)],na.rm=TRUE) COV5[is.na(COV5)] <- 0 COV5 <- (COV5-mean_noauto)/stdev_noauto ### Correct diag and autocorrelated elements diag(COV5) <- 0 for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- 0 COV5[j,i] <- 0 } } } res$Zscore_noauto <- COV5 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_circular.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # dynamic_entropy <- function(rotamers){ if(missing(rotamers)){ stop("A 'rotamers' matrix is required") } if(!is.matrix(rotamers)){ stop("The argument is not a matrix") } nb_rotamers <- length(rotamers[1,]) nb_frames <- length(rotamers[,1]) max_changes <- nb_frames-1 dihed_names <- colnames(rotamers) entropy <- matrix(0, ncol= nb_rotamers, nrow= 1) for(i in 1:nb_rotamers){ changes <- 0 for(j in 1:(nb_frames-1)){ if(rotamers[j,i] != rotamers[j+1, i]) changes <- changes+1 } entropy[i] <- changes/max_changes } names(entropy) <- dihed_names return (entropy) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_entropy.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # dynamic_mi <- function(dynamic_structure, rotamers, res_selection= c("C","I","L","M","V","R","H","K","D","E","N","Q","F","Y","W","T","S","P")){ if(missing(dynamic_structure)){ stop("An object of class 'structure' is required as first argument") } if (missing(rotamers)){ stop("A matrix of type 'rotamers' is required as second argument") } nb_angles <- length(rotamers[1,]) nb_frames <- length(rotamers[,1]) pos_names <- colnames(rotamers) names <- unique(as.vector(rotamers)) nb_rotamers <- length(names) # Binary matrix indicating which rotamer is present or not at dihedral position i for each trajectory frame ROT<-lapply(1:nb_angles, function(i){ t(table(c(rotamers[,i], names), row.names=c(1:(nb_frames+nb_rotamers))))[1:nb_frames,] }) MI <- matrix(0, ncol= nb_angles, nrow= nb_angles) freq_ij <- matrix(0, ncol= nb_rotamers, nrow= nb_rotamers, dimnames=list(names, names)) # Calculating MI score for each position for(i in 1:nb_angles){ mat_i <- ROT[[i]] # matrix nb_frames*nb_rotamers cat(paste("i : ", i, "\n")) # Rotamer frequency at dihedral position i in the trajectory freq_i <- colSums(mat_i)/nb_frames for(j in i:nb_angles){ mat_j <- ROT[[j]] #matrix nb_seq*nb_rotamers # Rotamer frequency at dihedral position j in the trajectory freq_j <- colSums(mat_j)/nb_frames #matrix 1*nb_rotamers # Rotamer frequency at dihedral positions i AND j in the trajectory freq_p <- ((t(mat_i))%*%mat_j)/nb_frames #matrix nb_rotamers*nb_rotamers for (k in names) { for (l in names) { freq_ij[k,l]<-freq_i[k]*freq_j[l] } } LOG<-(log(freq_p, base=9)-log(freq_ij, base=9)) LOG<-replace(LOG, which(LOG=="-Inf"),0) LOG<-replace(LOG, which(LOG=="Inf"),0) LOG<-replace(LOG, which(LOG=="NaN"),0) MI[i, j]<-sum((freq_p)*(LOG)) } } # MI final value COV2<-matrix(0, ncol= nb_angles, nrow= nb_angles) COV2 <- MI # Setting columns and rows names before matrix reduction rownames(COV2)<-paste(pos_names, sep="") colnames(COV2)<-paste(pos_names, sep="") COV2<-COV2+t(COV2) #Complete the second triangular part of the matrix diag(COV2) <- 0 # Removing "Inf" and "NA" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 selected_pos_names <- colnames(COV2) if(!is.null(res_selection)){ # Position selection tor.seq <- dynamic_structure$tor.seq residue_selection.inds <- which(tor.seq %in% res_selection) COV2 <- COV2[, residue_selection.inds] COV2 <- COV2[residue_selection.inds, ] nb_angles <- length(COV2[1,]) selected_pos_names <- colnames(COV2) } res <- list() # Save matrix of score res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV3 <- (COV2-mean_up)/stdev diag(COV3) <- 0 res$Zscore <- COV3 # Save matrices of score and Zscores without auto correlation # Create and save matrix with 0 values for autocorrelation (dihedral angles within the same residue) COV5 <- COV2 res_num <- c() res_num <- unlist(lapply(selected_pos_names, function(x){strsplit(x, "[.]")[[1]][1]})) # Create matrix with NA that will be used for Zscores for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- NA COV5[j,i] <- NA } } } # Save matrix of score with 0 for autocorrelation COV4 <-COV5 COV4[is.na(COV4)] <- 0 res$score_noauto <- COV4 # Compute and save matrix of Z_scores without auto correlation # Mean and stdev are calculated on non NA elements of upper triangle mean_noauto <- mean(COV5[upper.tri(COV5)],na.rm=TRUE) stdev_noauto <- sd(COV5[upper.tri(COV5)],na.rm=TRUE) COV5[is.na(COV5)] <- 0 COV5 <- (COV5-mean_noauto)/stdev_noauto # Correct diag and autocorrelated elements diag(COV5) <- 0 for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- 0 COV5[j,i] <- 0 } } } res$Zscore_noauto <- COV5 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_mi.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # dynamic_mip <- function(dynamic_structure, rotamers, res_selection= c("C","I","L","M","V","R","H","K","D","E","N","Q","F","Y","W","T","S","P")){ if(missing(dynamic_structure)){ stop("An object of class 'structure' is required as first argument") } if (missing(rotamers)){ stop("A matrix of type 'rotamers' is required as second argument") } nb_angles <- length(rotamers[1,]) nb_frames <- length(rotamers[,1]) pos_names <- colnames(rotamers) names <- unique(as.vector(rotamers)) nb_rotamers <- length(names) # Binary matrix indicating which rotamer is present or not at dihedral position i for each trajectory frame ROT<-lapply(1:nb_angles, function(i){ t(table(c(rotamers[,i], names), row.names=c(1:(nb_frames+nb_rotamers))))[1:nb_frames,] }) MI <- matrix(0, ncol= nb_angles, nrow= nb_angles) freq_ij <- matrix(0, ncol= nb_rotamers, nrow= nb_rotamers, dimnames=list(names, names)) # Calculating MI score for each position for(i in 1:nb_angles){ mat_i <- ROT[[i]] # matrix nb_frames*nb_rotamers cat(paste("i : ", i, "\n")) # Rotamer frequency at dihedral position i in the trajectory freq_i <- colSums(mat_i)/nb_frames for(j in i:nb_angles){ mat_j <- ROT[[j]] #matrix nb_seq*nb_rotamers # Rotamer frequency at dihedral position j in the trajectory freq_j <- colSums(mat_j)/nb_frames #matrix 1*nb_rotamers # Rotamer frequency at dihedral positions i AND j in the trajectory freq_p <- ((t(mat_i))%*%mat_j)/nb_frames #matrix nb_rotamers*nb_rotamers for (k in names) { for (l in names) { freq_ij[k,l]<-freq_i[k]*freq_j[l] } } LOG<-(log(freq_p, base=9)-log(freq_ij, base=9)) LOG<-replace(LOG, which(LOG=="-Inf"),0) LOG<-replace(LOG, which(LOG=="Inf"),0) LOG<-replace(LOG, which(LOG=="NaN"),0) MI[i, j]<-sum((freq_p)*(LOG)) } } # Product correction P P <- matrix(0, ncol= nb_angles, nrow= nb_angles) mean_cov=sum(MI)/(length(which(MI!=0))) for(i in 1:(nb_angles-1)){ mean_i<-(sum(MI[i,])+sum(MI[,i]))/(nb_angles-1) for(k in (i+1):nb_angles){ mean_k<-(sum(MI[k,])+sum(MI[,k]))/(nb_angles-1) P[i, k]<-((mean_i*mean_k))/(mean_cov) } } diag(P)<-0 # MIP final value COV2<-matrix(0, ncol= nb_angles, nrow= nb_angles) COV2 <- MI-P # Setting columns and rows names before matrix reduction rownames(COV2)<-paste(pos_names, sep="") colnames(COV2)<-paste(pos_names, sep="") COV2<-COV2+t(COV2) #Complete the second triangular part of the matrix diag(COV2) <- 0 #Removing "Inf" and "NA" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 selected_pos_names <- colnames(COV2) if(!is.null(res_selection)){ #Position selection tor.seq <- dynamic_structure$tor.seq residue_selection.inds <- which(tor.seq %in% res_selection) COV2 <- COV2[, residue_selection.inds] COV2 <- COV2[residue_selection.inds, ] nb_angles <- length(COV2[1,]) selected_pos_names <- colnames(COV2) } res <- list() # Save matrix of scores res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV3 <- (COV2-mean_up)/stdev diag(COV3) <- 0 res$Zscore <- COV3 # Save matrices of scores and Zscores without auto correlation # Create and save matrix with 0 values for autocorrelation (dihedral angles within the same residue) COV5 <- COV2 res_num <- c() res_num <- unlist(lapply(selected_pos_names, function(x){strsplit(x, "[.]")[[1]][1]})) # Create matrix with NA that will be used for Zscores for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- NA COV5[j,i] <- NA } } } # Save matrix of score with 0 for autocorrelation COV4 <-COV5 COV4[is.na(COV4)] <- 0 res$score_noauto <- COV4 # Compute and save matrix of Z_scores without auto correlation # Mean and stdev are calculated on non NA elements of upper triangle mean_noauto <- mean(COV5[upper.tri(COV5)],na.rm=TRUE) stdev_noauto <- sd(COV5[upper.tri(COV5)],na.rm=TRUE) COV5[is.na(COV5)] <- 0 COV5 <- (COV5-mean_noauto)/stdev_noauto # Correct diag and autocorrelated elements diag(COV5) <- 0 for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- 0 COV5[j,i] <- 0 } } } res$Zscore_noauto <- COV5 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_mip.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ dynamic_omes <- function(dynamic_structure, rotamers, res_selection= c("C","I","L","M","V","R","H","K","D","E","N","Q","F","Y","W","T","S","P")){ if(missing(dynamic_structure)){ stop("An object of class 'structure' is required as first argument") } if (missing(rotamers)){ stop("A matrix of type 'rotamers' is required as second argument") } nb_angles <- length(rotamers[1,]) nb_frames <- length(rotamers[,1]) pos_names <- colnames(rotamers) names <- unique(as.vector(rotamers)) nb_rotamers <- length(names) # Binary matrix indicating which rotamer is present or not at dihedral position i for each trajectory frame ROT<-lapply(1:nb_angles, function(i){ t(table(c(rotamers[,i], names), row.names=c(1:(nb_frames+nb_rotamers))))[1:nb_frames,] }) COV2 <- matrix(0, ncol= nb_angles, nrow= nb_angles) #Setting columns and rows names before matrix reduction rownames(COV2) <- pos_names colnames(COV2) <- pos_names # Calculating OMES score for each position for(i in 1:nb_angles){ mat_i <- ROT[[i]] #matrix nb_frames*nb_rotamers cat(paste("pos_i : ", i, "\n")) for(j in i:nb_angles){ mat_j <- ROT[[j]] #matrix nb_frames*nb_rotamers Valid <- which(ROT[[i]]%*%matrix(1, nrow= nb_rotamers)*ROT[[j]]%*%matrix(1, nrow= nb_rotamers)!=0) #possible pairs n <- length(Valid) Ex <- matrix(as.vector(t(ROT[[i]])%*%matrix(1,nrow= nb_frames)), ncol= nb_rotamers, nrow= nb_rotamers)*t(matrix(as.vector(t(ROT[[j]])%*%matrix(1,nrow= nb_frames)), ncol= nb_rotamers, nrow= nb_rotamers))/n Obs <- t(ROT[[i]])%*%ROT[[j]] COV2[i, j] <- sum((Obs-Ex)*(Obs-Ex)/n) } } COV2<-COV2+t(COV2) #Complete the second triangular part of the matrix diag(COV2) <- 0 #Removing "Inf" and "NA" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 selected_pos_names <- colnames(COV2) if(!is.null(res_selection)){ #Position selection tor.seq <- dynamic_structure$tor.seq residue_selection.inds <- which(tor.seq %in% res_selection) COV2 <- COV2[, residue_selection.inds] COV2 <- COV2[residue_selection.inds, ] nb_angles <- length(COV2[1,]) selected_pos_names <- colnames(COV2) } res <- list() # save matrix of scores res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV3 <- (COV2-mean_up)/stdev diag(COV3) <- 0 res$Zscore <- COV3 # Save matrices of scores and Zscores without auto correlation # Create and save matrix with 0 values for autocorrelation (dihedral angles within the same residue) COV5 <- COV2 res_num <- c() res_num <- unlist(lapply(selected_pos_names, function(x){strsplit(x, "[.]")[[1]][1]})) # Create matrix with NA that will be used for Zscores for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- NA COV5[j,i] <- NA } } } # Save matrix of score with 0 for autocorrelation COV4 <-COV5 COV4[is.na(COV4)] <- 0 res$score_noauto <- COV4 # Compute and save matrix of Z_scores without auto correlation # Mean and stdev are calculated on non NA elements of upper triangle mean_noauto <- mean(COV5[upper.tri(COV5)],na.rm=TRUE) stdev_noauto <- sd(COV5[upper.tri(COV5)],na.rm=TRUE) COV5[is.na(COV5)] <- 0 COV5 <- (COV5-mean_noauto)/stdev_noauto # Correct diag and autocorrelated elements diag(COV5) <- 0 for(i in 1:nb_angles){ for(j in i:nb_angles){ res_i <- res_num[i] res_j <- res_num[j] if(res_i == res_j){ COV5[i,j] <- 0 COV5[j,i] <- 0 } } } res$Zscore_noauto <- COV5 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_omes.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ dynamic_structure <- function(pdb, trj, frames=NULL){ if (missing(pdb)){ stop("Missing pdb file") } if (missing(trj)){ stop("Missing trajectory file") } ## Transform the xyz trajectory into dihedral trajectory # Reading pdb file pdb <- read.pdb(pdb) trj <- read.dcd(trj) # Selecting only "CA" atoms for superposition ca.inds <- atom.select(pdb, elety = "CA") # Getting xyz coordinates using fit.xyz from bio3D xyz <- fit.xyz(fixed = pdb$xyz, mobile = trj, fixed.inds = ca.inds$xyz, mobile.inds = ca.inds$xyz) nb_frames <- length(xyz[,1]) # Creating torsion object using the xyz2torsion function from bio3D if(is.null(frames)) { tor <- xyz2torsion(pdb, xyz, tbl = "sidechain", ncore= 1) } else { tor <- xyz2torsion(pdb, xyz[frames,], tbl = "sidechain", ncore= 1) } nb_torsions <- length(tor[1,]) nb_frames <- length(tor[,1]) # Associating one letter AA code to each residue, using the pdbseq function from bio3D prot.seq <- pdbseq(pdb) tor.names <- colnames(tor) tor.resno <- sub("\\..*$", "", tor.names) tor.angle <- sub("^[0-9]*\\.","", tor.names) tor.seq <- prot.seq[tor.resno] # Filling torsional structure res <- list() res$pdb <- pdb res$trj <- trj res$ca.inds <- ca.inds res$xyz <- xyz res$nb_frames <- nb_frames if(!is.null(frames)) { res$frames <- frames } else { res$frames <- c(1:nb_frames) } res$tor <- tor res$nb_torsions <- nb_torsions res$prot.seq <- prot.seq res$tor.names <- tor.names res$tor.resno <- tor.resno res$tor.angle <- tor.angle res$tor.seq <- tor.seq class (res) <- c("structure") return (res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/dynamic_structure.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # elsc <- function (align, gap_ratio = 0.2){ if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("Error in elsc argument: gap_ratio must be in the [0,1] range") } diag <- 0 msa<-align MSA <- matrix(as.vector(unlist(msa)), ncol = length(msa[[1]]),byrow = TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) pos_names <- colnames(MSA) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") AA<-lapply(1:nb_pos,function(i){ t(table(c(MSA[,i],names),row.names=c(1:(nb_seq+21))))[1:nb_seq, -1] }) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) COV2<-matrix(0, ncol= nb_pos, nrow= nb_pos) # Setting columns and rows names before matrix reduction rownames(COV2)<-pos_names colnames(COV2)<-pos_names # Determining valid positions with gap ratio under the (1 - gap) limit Valid_pos <- c() for(i in 1:nb_pos){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { Valid_pos <- c(Valid_pos, i) } } nb_Valid_pos <- length(Valid_pos) # Calculating ELSC score for each valid position for(i in 1:nb_Valid_pos){ pos_i <- Valid_pos[i] # current valid position cat(paste("pos_i : ", pos_i, "\n")) for(j in i:nb_Valid_pos){ pos_j <- Valid_pos[j] aln_tot_i<-AA[[pos_i]] aln_tot_i[is.na(aln_tot_i)] <- 0 aln_tot_j<-AA[[pos_j]] aln_tot_j[is.na(aln_tot_j)] <- 0 Nyj<-colSums(aln_tot_j) # number of amino acids y in position j in the full alignment v<-sort(colSums(aln_tot_i),decreasing = TRUE) v<-unique(c(which(aln_tot_i[,names(v[1])] == 1))) # sequences where the most occuring amino acid in position i appears ss_aln_i<-aln_tot_i[v,] nb_seq_ss_aln_i <- length(ss_aln_i[,1]) ss_aln_j<-aln_tot_j[v,] nb_seq_ss_aln_j <- length(ss_aln_j[,1]) n<-colSums(ss_aln_j) # number of amino acids y in position j in the sub-alignment mf<-(Nyj/nb_seq)*nb_seq_ss_aln_j calc<-matrix(0, ncol=2, nrow= nb_aa, dimnames = list(names,c("r", "m"))) calc[,2]<-trunc(mf) # inferior mf round calc[,1]<-(mf-calc[,2]) # remainder calc<-calc[order(calc[,1],rownames(calc), decreasing =T),] # order m by decreasing order of r then by alpha inverse order of aa # The sum of the "m" column must be equal to the sum of "n" if(sum(calc[,2]) > sum(n)){ z<-nb_aa while((sum(calc[,2]))!=(sum(n))){ calc[z,2]<-(calc[z,2]-1) if(z == 1) z <- nb_aa else z<-z-1 } } else { z<-1 while((sum(calc[,2]))!=(sum(n))){ calc[z,2]<-(calc[z,2]+1) if(z == nb_aa) z <- 1 else z<-z+1 } } m<-calc[order(rownames(calc)),2] COV2[pos_i, pos_j]<-(-log(prod(choose(Nyj, n)/choose(Nyj, m)))) #matrice asymetrique - vrais scores obtenus } } #Complete matrix second triangular part COV2 <- COV2 + t(COV2) diag(COV2) <- diag #Reducting the final correlation matrix to the valid positions COV2 <- COV2[Valid_pos,] COV2 <- COV2[,Valid_pos] #Removing "Inf" and "NaN" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() # save matrix of score res$score <- COV2 # compute and save matrix of Z_scores # mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV2 <- (COV2-mean_up)/stdev diag(COV2) <- diag res$Zscore <- COV2 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/elsc.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # entropy <- function (align, gap_ratio = 0.2) { if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("Error in entropy argument: gap_ratio must be in the [0,1] range.") } MSA <- matrix(as.vector(unlist(align)), ncol= length(align[[1]]), byrow= TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") # Binary matrix indicating which amino acid is present or not at position i in the sequence j AA<-lapply(1:nb_pos, function(i){ t(table(c(MSA[,i],names),row.names=c(1:(nb_seq+21))))[1:nb_seq, -1] }) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) entropy <- matrix(0, ncol= nb_pos, nrow= 1) # Calculating validity and entropy for each position entropy <- unlist(lapply(1:nb_pos, function(i){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { percent_i <- S_i/Tot_i entropy_i <- 0 for(j in 1:nb_aa){ if (S_i[[j]] == 0) { entropy_i <- entropy_i } else { entropy_i <- entropy_i - percent_i[[j]]*log(percent_i[[j]], base = 20) } } entropy[i] <- entropy_i } else { entropy[i] <- '' } })) names(entropy) <- c(1:nb_pos) return(entropy) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/entropy.R
import.fasta <- function (file, aa.to.upper = TRUE, gap.to.dash = TRUE,log.file=NULL) { if(missing(file)) { stop("file is missing") } # Read as a vector of lines lines <- readLines(file) # Localize sequence identifiers and check fasta format loc <- grep(">", lines) if (length(loc) == 0){ if(!is.null(log.file)) write("file is not in fasta format",log.file) stop("file is not in fasta format") } # Get sequence identifiers id <- sub("^>(\\S+).*$","\\1", lines[loc]) nb.seq <- length(id) # Localize sequence pieces for each identifier start <- loc + 1 end <- loc - 1 end <- c(end[-1], length(lines)) seq <- sapply(seq_len(nb.seq), function(i) {paste(lines[start[i]:end[i]], collapse = "")}) seq <- gsub("\\s", "", seq) # Turn aa into upper case if (aa.to.upper) seq <- toupper(seq) # Give a list of split sequences seq <- strsplit(seq, split = "") names(seq) <- id # Turn gap into dash character if (gap.to.dash) seq <- lapply(seq, function (i) {i[is.gap(i)] <- "-"; return(i)}) class (seq) <- c("align") return(seq) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/import.fasta.R
import.msf <- function (file, aa.to.upper = TRUE, gap.to.dash = TRUE,log.file=NULL) { if(missing(file)) { stop("file is missing") } # Read as a vector of lines lines <- readLines(file) # Check msf format check1 <- grep("MSF:.*Type:.*Check:.*", lines) check2 <- grep("Name:.*Len:.*Check:.*Weight:.*", lines) limit <- grep("^//", lines) if (length(check1) == 0 || length(check2) == 0 || length(limit) == 0) { if(!is.null(log.file)) write("file is not in msf format",log.file) stop("file is not in msf format") } # Get sequence identifiers from header id.head <- sub("^\\s*Name:\\s+(\\S+).*$","\\1", lines[check2]) nb.seq <- length(id.head) # Check duplicated identifiers in header if(any(duplicated(id.head))){ if(!is.null(log.file)) write("duplicated identifiers in header",log.file) stop("duplicated identifiers in header") } # Get sequence identifiers from alignment align <- grep("^\\s*\\S+\\s+[^1-9]+$", lines[limit:length(lines)], value = TRUE) id.align <- sub("^\\s*(\\S+)\\s+[^1-9]+$", "\\1", align) # Localize sequence pieces for each identifier loc <- lapply(seq_len(nb.seq), function(i) {which(id.align == id.head[i])}) # Paste and clean sequences seq <- sapply(loc, function(i) {paste(sub("^\\s*\\S+\\s+([^1-9]+)$", "\\1", align[i]), collapse = "")}) seq <- gsub("\\s", "", seq) # Turn aa into upper case if (aa.to.upper) seq <- toupper(seq) # Give a list of split sequences seq <- strsplit(seq, split = "") names(seq) <- id.head # Turn gap into dash character if (gap.to.dash) seq <- lapply(seq, function (i) {i[is.gap(i)] <- "-"; return(i)}) class (seq) <- c("align") return(seq) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/import.msf.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # mcbasc <- function (align, gap_ratio = 0.2) { if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("gap_ratio must be in the [0,1] range.") } diag <- 0 msa<-align MSA <- matrix(as.vector(unlist(msa)), ncol= length(msa[[1]]), byrow= TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) pos_names <- colnames(MSA) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") # Binary matrix indicating which amino acid is present or not at position i in the sequence j AA<-lapply(1:nb_pos, function(i){ t(table(c(MSA[,i],names),row.names=c(1:(nb_seq+21))))[1:nb_seq, -1] }) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) # Determining valid positions with correct gap ratio (equal to 1 - gap argument) Valid_pos <- c() for(i in 1:nb_pos){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { Valid_pos <- c(Valid_pos, i) } } nb_Valid_pos <- length(Valid_pos) matrice_mclachlan <- matrix(c( 8,1,3,4,1,3,3,2,3,2,3,3,4,3,2,4,3,3,1,1, 1,9,1,0,0,1,3,1,0,0,3,1,0,0,1,2,2,1,2,1, 3,1,8,5,1,3,4,0,3,1,2,5,3,4,1,3,3,1,0,1, 4,0,5,8,0,3,2,1,4,1,1,4,4,5,3,4,4,2,1,2, 1,0,1,0,9,0,4,3,0,5,5,0,1,0,1,2,1,3,6,6, 3,1,3,3,0,8,2,1,3,1,1,3,3,2,3,3,2,2,1,0, 3,3,4,2,4,2,8,2,4,2,3,4,3,4,5,3,4,2,3,4, 2,1,0,1,3,1,2,8,1,5,5,1,1,0,1,2,3,5,3,3, 3,0,3,4,0,3,4,1,8,2,1,4,3,4,5,3,3,2,1,1, 2,0,1,1,5,1,2,5,2,8,6,1,1,3,2,2,3,5,3,3, 3,3,2,1,5,1,3,5,1,6,8,2,1,3,1,2,3,4,1,2, 3,1,5,4,0,3,4,1,4,1,2,8,1,4,3,5,3,1,0,2, 4,0,3,4,1,3,3,1,3,1,1,1,8,3,3,3,3,2,0,0, 3,0,4,5,0,2,4,0,4,3,3,4,3,8,5,4,3,2,2,1, 2,1,1,3,1,3,5,1,5,2,1,3,3,5,8,4,3,2,3,2, 4,2,3,4,2,3,3,2,3,2,2,5,3,4,4,8,5,2,3,3, 3,2,3,4,1,2,4,3,3,3,3,3,3,3,3,5,8,3,2,1, 3,1,1,2,3,2,2,5,2,5,4,1,2,2,2,2,3,8,2,3, 1,2,0,1,6,1,3,3,1,3,1,0,0,2,3,3,2,2,9,6, 1,1,1,2,6,0,4,3,1,3,2,2,0,1,2,3,1,3,6,9),nrow = 20) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") rownames(matrice_mclachlan) <- names colnames(matrice_mclachlan) <- names A<-lapply(1:nb_pos, function(i){ matrix(0, ncol=nb_seq, nrow=nb_seq, dimnames = list(c(MSA[,i]), c(MSA[,i]))) }) for (k in 1:nb_pos) { mat<-A[[k]] for (i in names) { for (j in names) { mat[rownames(mat)==i, colnames(mat)==j] <- matrice_mclachlan[rownames(matrice_mclachlan)==i, colnames(matrice_mclachlan)==j] } } A[[k]] <- mat } COV2<-matrix(0, ncol= nb_pos, nrow= nb_pos) # Setting columns and rows names before matrix reduction rownames(COV2)<-pos_names colnames(COV2)<-pos_names # Calculating McBASC score for each valid position for(i in 1:nb_Valid_pos){ pos_i <- Valid_pos[i] #current valid position mat_i <- A[[pos_i]] #matrix nb_seq*nb_aa cat(paste("pos_i : ", pos_i, "\n")) for(j in i:nb_Valid_pos){ pos_j <- Valid_pos[j] mat_j <- A[[pos_j]] #matrix nb_seq*nb_aa SUM<-sum((mat_i-mean(mat_i))*(mat_j-mean(mat_j))) COV2[pos_i, pos_j] <- (1/((nb_seq)^2))*(SUM/(sd(mat_i)*sd(mat_j))) } } # Complete the second triangular part of the matrix COV2 <- COV2+t(COV2) diag(COV2) <- diag # Reducting the final correlation matrix to the valid positions COV2 <- COV2[Valid_pos,] COV2 <- COV2[,Valid_pos] # Removing "Inf" and "NaN" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV2 <- (COV2-mean_up)/stdev diag(COV2) <- diag res$Zscore <- COV2 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/mcbasc.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ mi <- function (align, gap_ratio = 0.2) { if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("gap_ratio must be in the [0,1] range.") } diag <- 0 msa<-align MSA <- matrix(as.vector(unlist(msa)), ncol= length(msa[[1]]), byrow= TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) pos_names <- colnames(MSA) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") AA<-lapply(1:length(MSA[1,]),function(i){t(table(c(MSA[,i],names),row.names=c(1:(length(MSA[,i])+21))))[1:length(MSA[,i]),-1]}) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) # Determining valid positions with correct gap ratio (equal to 1 - gap argument) Valid_pos <- c() for(i in 1:nb_pos){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { Valid_pos <- c(Valid_pos, i) } } nb_Valid_pos <- length(Valid_pos) MI <- matrix(0, ncol= nb_pos, nrow= nb_pos) freq_ij <- matrix(0, ncol= nb_aa, nrow= nb_aa, dimnames=list(names, names)) # Calculating MI score for each valid position for(i in 1:nb_Valid_pos){ pos_i <- Valid_pos[i] #current valid position mat_i <- AA[[pos_i]] #matrix nb_seq*nb_aa cat(paste("pos_i : ", pos_i, "\n")) #amino acids frequency at position i in the alignment freq_i <- colSums(mat_i)/nb_seq for(j in i:nb_Valid_pos){ pos_j <- Valid_pos[j] mat_j <- AA[[pos_j]] #matrix nb_seq*nb_aa #amino acids frequency at position j in the alignment freq_j <- colSums(mat_j)/nb_seq #matrix 1*nb_aa #amino acids frequency at positions i AND j in the alignment freq_p <- ((t(mat_i))%*%mat_j)/nb_seq #matrix nb_aa*nb_aa for (k in names) { for (l in names) { freq_ij[k,l]<-freq_i[k]*freq_j[l] } } LOG<-(log(freq_p, base=400)-log(freq_ij, base=400)) LOG<-replace(LOG, which(LOG=="-Inf"),0) LOG<-replace(LOG, which(LOG=="NaN"),0) MI[pos_i, pos_j]<-sum((freq_p)*(LOG)) } } COV2<-matrix(0, ncol= nb_pos, nrow= nb_pos) # MI final value COV2 <- MI # Setting columns and rows names before matrix reduction rownames(COV2)<-pos_names colnames(COV2)<-pos_names COV2 <- COV2+t(COV2) #Complete the second triangular part of the matrix diag(COV2) <- diag # Reduction of the final correlation matrix to the valid positions COV2 <- COV2[Valid_pos,] COV2 <- COV2[,Valid_pos] # Removing "Inf" and "NaN" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV2 <- (COV2-mean_up)/stdev diag(COV2) <- diag res$Zscore <- COV2 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/mi.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ mip <- function (align, gap_ratio= 0.2) { if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("gap_ratio must be in the [0,1] range.") } diag <- 0 msa<-align MSA <- matrix(as.vector(unlist(msa)), ncol= length(msa[[1]]), byrow= TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) pos_names <- colnames(MSA) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") AA<-lapply(1:length(MSA[1,]),function(i){t(table(c(MSA[,i],names),row.names=c(1:(length(MSA[,i])+21))))[1:length(MSA[,i]),-1]}) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) # Determining valid positions with correct gap ratio (equal to 1 - gap argument) Valid_pos <- c() for(i in 1:nb_pos){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { Valid_pos <- c(Valid_pos, i) } } nb_Valid_pos <- length(Valid_pos) MI <- matrix(0, ncol= nb_pos, nrow= nb_pos) freq_ij <- matrix(0, ncol= nb_aa, nrow= nb_aa, dimnames=list(names, names)) # Calculating MI score for each valid position for(i in 1:nb_Valid_pos){ pos_i <- Valid_pos[i] #current valid position mat_i <- AA[[pos_i]] #matrix nb_seq*nb_aa cat(paste("pos_i : ", pos_i, "\n")) #amino acids frequency at position i in the alignment freq_i <- colSums(mat_i)/nb_seq for(j in i:nb_Valid_pos){ pos_j <- Valid_pos[j] mat_j <- AA[[pos_j]] #matrix nb_seq*nb_aa #amino acids frequency at position j in the alignment freq_j <- colSums(mat_j)/nb_seq #matrix 1*nb_aa #amino acids frequency at positions i AND j in the alignment freq_p <- ((t(mat_i))%*%mat_j)/nb_seq #matrix nb_aa*nb_aa for (k in names) { for (l in names) { freq_ij[k,l]<-freq_i[k]*freq_j[l] } } LOG<-(log(freq_p, base=400)-log(freq_ij, base=400)) LOG<-replace(LOG, which(LOG=="-Inf"),0) LOG<-replace(LOG, which(LOG=="NaN"),0) MI[pos_i, pos_j]<-sum((freq_p)*(LOG)) } } #Correction P P <- matrix(0, ncol= nb_pos, nrow= nb_pos) mean_cov=sum(MI)/(length(which(MI!=0))) for(i in 1:(nb_Valid_pos-1)){ pos_i <- Valid_pos[i] #current valid position mean_i<-(sum(MI[pos_i,])+sum(MI[,pos_i]))/(nb_pos-1) for(k in (i+1):nb_Valid_pos){ pos_k <- Valid_pos[k] #current valid position mean_k<-(sum(MI[pos_k,])+sum(MI[,pos_k]))/(nb_pos-1) P[pos_i, pos_k]<-((mean_i*mean_k))/(mean_cov) } } diag(P)<-0 COV2<-matrix(0, ncol= nb_pos, nrow= nb_pos) # MIP final value COV2 <- MI-P # Setting columns and rows names before matrix reduction rownames(COV2)<-pos_names colnames(COV2)<-pos_names COV2 <- COV2+t(COV2) #Complete the second triangular part of the matrix diag(COV2) <- diag # Reduction of the final correlation matrix to the valid positions COV2 <- COV2[Valid_pos,] COV2 <- COV2[,Valid_pos] # Removing "Inf" and "NaN" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() res$score <- COV2 # Compute and save matrix of Z_scores # Mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV2 <- (COV2-mean_up)/stdev diag(COV2) <- diag res$Zscore <- COV2 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/mip.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # network.plot <- function(top_pairs, filepathroot=NULL){ pairs_i <- top_pairs$pairs_i pairs_j <- top_pairs$pairs_j if (is.null(filepathroot)) { filename <- "NETWORK.pdf" } else { filename <- paste(filepathroot, "_NETWORK.pdf", sep="") } pdf(filename) # create data: links <- data.frame(source= pairs_i, target= pairs_j) # Turn it into igraph object network <- graph_from_data_frame(d= links, directed= F) # Count the number of degree for each node: deg <- degree(network, mode= "all") # Plot plot(network, vertex.size= deg*6, vertex.color= rgb(0.1,0.7,0.8,0.5)) dev.off() }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/network.plot.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # omes <- function(align, gap_ratio = 0.2) { if ((gap_ratio < 0) | (gap_ratio > 1)) { stop("gap_ratio must be in the [0,1] range.") } diag <- 0 msa<-align MSA <- matrix(as.vector(unlist(msa)), ncol= length(msa[[1]]), byrow= TRUE) nb_pos <- length(MSA[1,]) #number of positions in the alignment nb_seq <- length(MSA[,1]) #number of sequences in the alignment colnames(MSA)<-c(1:nb_pos) pos_names <- colnames(MSA) gap <- 1-gap_ratio #gap value indicates the minimal ratio of aa to nb_seq in the MSA if (gap < 1/nb_seq) { gap <- 1/nb_seq # positions must have at leat ONE aa to be taken into account (removes gap column) } names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y","-") # Binary matrix indicating which amino acid is present or not at position i in the sequence j AA<-lapply(1:nb_pos, function(i){ t(table(c(MSA[,i],names),row.names=c(1:(nb_seq+21))))[1:nb_seq, -1] }) names<-c("A","C","D","E","F","G","H","I","K","L","M","N","P","Q","R","S","T","V","W","Y") nb_aa <- length(names) COV2<-matrix(0, ncol= nb_pos, nrow= nb_pos) # Setting columns and rows names before matrix reduction rownames(COV2)<-pos_names colnames(COV2)<-pos_names # Determining valid positions with correct gap ratio (equal to 1 - gap argument) Valid_pos <- c() for(i in 1:nb_pos){ mat_i <- AA[[i]] #matrix nb_seq*nb_aa S_i <- colSums(AA[[i]]) Tot_i <- sum(S_i) if (Tot_i/nb_seq >= gap) { Valid_pos <- c(Valid_pos, i) } } nb_Valid_pos <- length(Valid_pos) # Calculating score for each valid position for(i in 1:nb_Valid_pos){ pos_i <- Valid_pos[i] #current valid position mat_i <- AA[[pos_i]] #matrix nb_seq*nb_aa cat(paste("pos_i : ", pos_i, "\n")) for(j in i:nb_Valid_pos){ pos_j <- Valid_pos[j] mat_j <- AA[[pos_j]] #matrix nb_seq*nb_aa #sequences in the alignment without gapped residues at positions i and k (no "-" in the positions i and k) Valid<-which(AA[[pos_i]]%*%matrix(1, nrow= nb_aa)*AA[[pos_j]]%*%matrix(1, nrow= nb_aa)!=0) #the number of sequences in the alignment without gapped residues at positions i and j (no "-" in the positions i and j) n<-length(Valid) Ex<-matrix(as.vector(t(AA[[pos_i]])%*%matrix(1,nrow= nb_seq)), ncol= nb_aa, nrow= nb_aa)*t(matrix(as.vector(t(AA[[pos_j]])%*%matrix(1,nrow= nb_seq)), ncol= nb_aa, nrow= nb_aa))/n Obs<-t(AA[[pos_i]])%*%AA[[pos_j]] COV2[pos_i, pos_j]<-sum((Obs-Ex)*(Obs-Ex)/n) } } #Complete the second triangular part of the matrix COV2<-COV2+t(COV2) diag(COV2) <- diag #Reducting the final correlation matrix to the valid positions COV2 <- COV2[Valid_pos,] COV2 <- COV2[,Valid_pos] #Removing "Inf" values COV2[is.infinite(COV2)] <- 0 COV2[is.na(COV2)] <- 0 res <- list() # save matrix of score res$score <- COV2 # compute and save matrix of Z_scores # mean and stdev must be calculated on off diagonal elements mean_up <- mean(COV2[upper.tri(COV2)]) stdev <- sd(COV2[upper.tri(COV2)]) COV2 <- (COV2-mean_up)/stdev diag(COV2) <- diag res$Zscore <- COV2 return(res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/omes.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # pca_2d <- function(pca_struct, abs= 1, ord= 2, filepathroot=NULL){ # if(is.null(abs)) abs <- 1 # if(is.null(ord)) ord <- 2 pca_coord <- pca_struct$coord pca_abs <- pca_coord[, abs] pca_ord <- pca_coord[, ord] pca_x <- paste("PCA", abs, sep= "") pca_y <- paste("PCA", ord, sep= "") if(is.null(filepathroot)){ filename <- paste("PCA_", abs,"_",ord,".png") } else { filename <- paste(filepathroot,"_PCA_", abs,"_",ord,".png") } png(filename) plot(pca_abs, pca_ord, xlab= pca_x, ylab= pca_y, main= basename(filename)) dev.off() }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/pca_2d.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # pca_screeplot <- function(pca_struct, filepathroot=NULL){ pca_coord <- pca_struct$coord nb_component <- length(pca_coord[1,]) pca_positions <- rownames(pca_coord) components <- 1:nb_component variances <- unlist(lapply(1:nb_component, function(x){var(pca_coord[,x])})) if(is.null(filepathroot)) { filename <- "SCREEPLOT.png" } else { filename <- paste(filepathroot, "_SCREEPLOT.png", sep="") } png(filename, width = 600, height = 600, units = "px") plot(components, variances, main= basename(filename)) lines(components, variances) dev.off() }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/pca_screeplot.R
# Package: Bios2cor # This file is part of the Bios2cor and Bios2mds R package. # Bios2cor and Bios2mds are free softwares: you can redistribute them and/or modify # them under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # random.msa <- function (nb.seq = 100, id = "SEQ", nb.pos = 100, gap = FALSE, aa.strict = FALSE,align = NULL, align.replace = TRUE) { #one letter codes for amino acids aa <- c("A", "R", "N", "D", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "F", "P", "S", "T", "W", "Y", "V", "B", "Z", "J", "X") replace <- TRUE #remove ambiguous amino acids if (aa.strict) aa <- aa[1:20] if (gap) aa <- c(aa, "-") if(!is.null(align)){ if (!inherits(align, "align")) stop("mmds is not a 'align' object") aa <-as.vector(unlist(align)) replace<-align.replace } msa <- lapply(seq_len(nb.seq), function (i) {sample(aa, nb.pos, replace = replace)}) msa.names <- paste(id, seq_len(nb.seq), sep = "") names(msa) <- msa.names return(msa) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/random.msa.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # scores.boxplot <- function(corr_matrix_list, name_list, filepathroot=NULL, elite=25, high=275){ if (is.null(filepathroot)) { filename <- "BOXPLOT.png" }else{ filename <- paste(filepathroot, "_BOXPLOT.png", sep = "") } corr_tab <- sapply(corr_matrix_list, function(x){as.vector(x)}) nb_objects <- length(corr_matrix_list) print(paste("Boxplot elements :", nb_objects)) png(filename, width = 600, height = 400, units = "px", pointsize = 12) boxplot(corr_tab, names = name_list, xlab="Z-score", cex.lab=1.5, cex.axis=0.90, col = "grey", outcol = "grey", horizontal=T, las=1) for(i in 1:nb_objects){ matrix <- corr_matrix_list[i] X <- upper.tri(matrix,diag=FALSE) increasing_corr <- sort(corr_tab[,i]) decreasing_corr <- sort(corr_tab[,i], decreasing= TRUE) top_high <- decreasing_corr[1:high*2] points(top_high, rep(i,length(top_high)), pch=16, col="dodgerblue") top_elite <- decreasing_corr[1:elite*2] points(top_elite, rep(i,length(top_elite)), pch=16, col="blue") bottom_high <- increasing_corr[1:high*2] points(bottom_high, rep(i,length(bottom_high)), pch=16, col="pink") bottom_elite <- increasing_corr[1:elite*2] points(bottom_elite, rep(i,length(bottom_elite)), pch=16, col="red") } #Draw a line at position v abline(v=4, lty=2) #Box with special line width box(lwd = 3) dev.off() }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/scores.boxplot.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # scores_entropy.plot <- function(entropy, corr_matrix, filepathroot=NULL, elite=25, high=275, filter=NULL){ if(missing(entropy)) stop("An 'entropy' object is required") if(missing(corr_matrix)) stop("A correlation matrix is required") if(!is.matrix(corr_matrix)) stop("corr_matrix must be a matrix. Select a score or Zscore matrix") corr_score <- corr_matrix names_c <- colnames(corr_matrix) size_matrix <- length(names_c) names_s <- names(entropy) if(!is.null(filter)){ for(i in 1:size_matrix){ for(j in 1:size_matrix){ name_i <- names_c[i] name_j <- names_c[j] ponderation_i <- filter[name_i] ponderation_j <- filter[name_j] corr_score[i,j] <- corr_score[i,j]*ponderation_i*ponderation_j } } } # Checking the numbering used in the entropy object and in the correlation matrix numbering1 <- grep(".", names_s, fixed= TRUE) numbering2 <- grep(".", names_c, fixed= TRUE) # Analysis is possible only if the same numbering is used for entropy and correlation if(((length(numbering1) == 0) && (length(numbering2) == 0)) || ((length(numbering1) != 0) && (length(numbering2) != 0))){ entropy_i <- c() entropy_j <- c() score <- c() k <-0 for(i in 1:(size_matrix-1)){ for(j in (i+1):size_matrix){ posi <- names_c[i] posj <- names_c[j] #find entropy for each position of the pair if(posi %in% names_s && posj %in% names_s){ k <- k+1 entropy_i[k] <- entropy[posi] entropy_j[k] <- entropy[posj] score[k] <- corr_score[posi, posj] }else{ next } } } results <- cbind(score,entropy_i,entropy_j) result_ordered <- results[order(as.numeric(results[,1]), decreasing = TRUE),] x<- result_ordered[,1] i<-result_ordered[,2] j<-result_ordered[,3] nb_pairs <-length(x) bottom_elite <- nb_pairs - elite bottom_high <- nb_pairs - high stop_high <- bottom_elite - 1 start_high <- elite + 1 if (is.null(filepathroot)){ filename <- "ei_ej.pdf" } else { filename = paste(filepathroot, "_ei_ej.pdf", sep ="") } pdf(file= filename) plot(i, j, main= filepathroot, xlab= "S[i]", ylab= "S[j]", pch= ".", col= "gray80",cex.axis=1.4, cex.lab=1.4) if(is.null(filter)) { points(i[bottom_high:stop_high], j[bottom_high:stop_high], col = "lightpink1", pch=20) points(i[bottom_elite:nb_pairs], j[bottom_elite:nb_pairs], col = "red", pch=20) points(i[start_high:high], j[start_high:high], col = "skyblue2", pch=20) points(i[1:elite], j[1:elite], col = "blue", pch=20) } else { points(i[start_high:high], j[start_high:high], col = "skyblue2", pch=20) points(i[1:elite], j[1:elite], col = "blue", pch=20) } dev.off() } else { print("Mismatch in the notation used for the correlation and entropy files. Please verify your files!") } }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/scores_entropy.plot.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # top_pairs_analysis <- function(corr_matrix, filepathroot= NULL, top= 25, entropy = NULL, filter = NULL ){ if(missing(corr_matrix)){ stop("A correlation matrix is required") } if(!is.matrix(corr_matrix)){ stop("The first argument must be a score or Zscore matrix.") } if(is.null(filepathroot)){ filename_contact <- paste("TOP", top, "_CONTACTS.csv", sep="") filename_score <- paste("TOP", top, "_SCORES.csv", sep="") } else { filename_contact <- paste(filepathroot,"_TOP", top, "_CONTACTS.csv", sep="") filename_score <- paste(filepathroot,"_TOP", top, "_SCORES.csv", sep="") } names <- colnames(corr_matrix) corr_matrix[lower.tri(corr_matrix)] <- 0 corr_score <- corr_matrix names <- colnames(corr_score) size_matrix <- length(names) # Taking into account the delta filter if(!is.null(filter)){ for(i in 1:size_matrix){ for(j in 1:size_matrix){ name_i <- names[i] name_j <- names[j] ponderation_i <- filter[name_i] ponderation_j <- filter[name_j] # score equal to zero when at least one of the two positions is out of the allowed entropy range corr_score[i,j] <- corr_score[i,j]*ponderation_i*ponderation_j } } } # Extracting top value positions x <- which(corr_score >= sort(corr_score, decreasing= TRUE)[top], arr.ind= TRUE) nb_pairs <- length(x[,1]) positions <- unique(c(x[,1], x[,2])) nb_positions <- length(positions) contacts <- c() for(i in 1:nb_positions){ pos <- positions[i] nb_contacts <- 0 for(j in 1:nb_pairs){ node_1 <- x[j,1] node_2 <- x[j,2] if((pos == node_1) | (pos == node_2)){ nb_contacts <- nb_contacts+1 } } contacts[i] <- nb_contacts } # Writing results positions <- names[positions] if (is.null(entropy)){ # Writing list of top positions with number of contacts head <- paste("position", "contact") write(head, filename_contact, append= FALSE) for(k in 1:nb_positions){ position <- positions[k] contact <- contacts[k] current_line <- paste(position, contact) write(current_line, filename_contact, append= TRUE) } } else { entropy <- entropy head <- paste("position", "contact", "entropy") write(head, filename_contact, append= FALSE) for(k in 1:nb_positions){ position <- positions[k] contact <- contacts[k] entropy_k <- format(as.numeric(entropy[position]), digits=3, nsmall=3) current_line <- paste(position, contact, entropy_k) write(current_line, filename_contact, append= TRUE) } } pairs_i <- x[,1] #pair_i pairs_j <- x[,2] #pair_j res <- list() res$pairs_i <- names[pairs_i] res$pairs_j <- names[pairs_j] res$positions <- positions res$contacts <- contacts ## Writing top score positions score <- unlist(lapply(1:nb_pairs, function(a){corr_score[x[a,1], x[a,2]]})) index <- sort(score, decreasing= TRUE, index.return= TRUE)$ix ordered_x <- x[index,] write("pair_i pair_j score_ij", filename_score, append= FALSE) for(i in 1:nb_pairs){ pair_i <- names[ordered_x[i,1]] pair_j <- names[ordered_x[i,2]] score_ij <- score[index[i]] score_ij <- format(score_ij,digits=3, nsmall=3) score_line <- paste(pair_i, pair_j, score_ij) write(score_line, filename_score, append= TRUE) } return (res) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/top_pairs_analysis.R
# # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # write.entropy <- function(entropy, filepathroot=NULL){ if (missing(entropy)) { stop("A matrix of type 'entropy' is required.") } if (is.null(filepathroot)) { filename <- "ENTROPY.csv" } else { filename <- paste(filepathroot,"_ENTROPY.csv",sep = "") } nb_pos <- length(entropy) positions <- names(entropy) head <- paste("position", "entropy") write(head, file= filename, append= FALSE) for(i in 1:nb_pos){ pos <- positions[i] if (entropy[i]==''|is.na(entropy[i])){ val <- '' current_line <- paste(pos, val) write(current_line, file= filename, append= TRUE) } else { val <- as.numeric(entropy[i]) val <- format(val, digits=3, nsmall=3) current_line <- paste(pos, val) write(current_line, file= filename, append= TRUE) } } entropy <- as.numeric(na.omit(entropy)) if (is.null(filepathroot)) { filename <- "ENTROPY_HIST.png" } else { filename <- paste(filepathroot,"_ENTROPY_HIST.png",sep = "") } png(filename) hist(entropy) dev.off() }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/write.entropy.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # write.pca <- function(corr_pca, filepathroot=NULL, pc= NULL, entropy= NULL){ if (missing(corr_pca)) { stop("A PCA object created by the centered_pca function is required") } if (is.null(filepathroot)) { filename <- "PCA_COORD.csv" }else{ filename <-paste(filepathroot, "_PCA_COORD.csv", sep="") } pca_coord <- corr_pca$coord pca_positions <- rownames(pca_coord) pca_size <- length(pca_coord[,1]) if (is.null(pc)) { pca_dim <- length(pca_coord[1,]) } else { pca_dim <- pc } head <- "position" if(!is.null(entropy)) { head <- paste(head, "entropy") } lapply(1:pca_dim, function(dim){ head <<- paste(head, paste("PCA", dim, sep= "")) }) write(head, file= filename, append= FALSE) if(!is.null(entropy)) { for(pos in 1:pca_size){ pos_line <- pca_coord[pos,] position <- pca_positions[pos] entropy_val <- format(as.numeric(entropy[position]), digits=3, nsmall=3) #Ignoring possible NaN values if(sum(is.na(pos_line)) <= 0){ coord_tmp <- paste(pca_coord[pos, 1:pca_dim], collapse= " ") current_line <- paste(position, entropy_val, coord_tmp) write(current_line, file= filename, append= TRUE) } } } else { for(pos in 1:pca_size){ pos_line <- pca_coord[pos,] position <- pca_positions[pos] #Ignoring possible NaN values if(sum(is.na(pos_line)) <= 0){ coords <- "" for(dim in 1:pca_dim){ coords <- paste(coords, pca_coord[pos,dim]) } current_line <- paste(position, coords, sep= "") write(current_line, file= filename, append= TRUE) } } } }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/write.pca.R
# Package: Bios2cor # This file is part of Bios2cor R package. # Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # write.pca.pdb <- function(corr_pca, filepathroot=NULL, trio_comp= c(1:3)){ if (missing(corr_pca)) { stop("A PCA object created by the centered_pca function is required") } pca_coord <- corr_pca$coord pca_1 <- trio_comp[1] pca_2 <- trio_comp[2] pca_3 <- trio_comp[3] if (is.null(filepathroot)) { pdb_filepath <- paste("PCA_", pca_1, "_",pca_2, "_",pca_3, ".pdb", sep ="") pml_filepath <- paste("PCA_", pca_1, "_",pca_2, "_",pca_3, ".pml", sep ="") } else { pdb_filepath <- paste(filepathroot, "_PCA_", pca_1, "_",pca_2, "_",pca_3, ".pdb", sep ="") pml_filepath <- paste(filepathroot, "_PCA_", pca_1, "_",pca_2, "_",pca_3, ".pml", sep ="") } pca_positions <- rownames(pca_coord) pca_size <- length(pca_positions) #check whether data are from a MSA or a trajectory numbering <- grep(".", pca_positions, fixed= TRUE) alpha <- c("A","B","C","D") colors <- c("blue", "red", "green", "orange", "white") background_color <- paste("bg_color", colors[5]) write(background_color, file= pml_filepath, append= FALSE) for(i in 1:4){ chain_coloration <- paste("color", colors[i], ", chain", alpha[i]) write(chain_coloration, file= pml_filepath, append= TRUE) } #Configuring elements as_sphere <- "as sphere" sphere_scale <- "set sphere_scale, 1" write(as_sphere, file= pml_filepath, append= TRUE) write(sphere_scale, file= pml_filepath, append= TRUE) ## Writing PDB file pdb_line <- "REMARK PDB file created for visualization of PCA analysis" write(pdb_line, file= pdb_filepath, append= FALSE) ##fields required in each line #fields without change head <- "HETATM" atom_name <- "O" res_type <- "HOH" occupation <- "1.00" B_factor <- 100.00 atom_type <- "O" format <- "%6s%5s %-4s%3s %1s%4s %8s%8s%8s %4s%6s %4s" #fields with changes if(length(numbering) == 0){ for(pos in 1:pca_size){ x <- pca_coord[pos, pca_1] y <- pca_coord[pos, pca_2] z <- pca_coord[pos, pca_3] if (!is.na(x) & !is.na(y) & !is.na(z)) { residue_number <- pca_positions[pos] chain_ID <- "A" pdb_line <- sprintf(format, head, pos, atom_name, res_type, chain_ID, residue_number, round(x,digits=3), round(y,digits=3), round(z,digits=3), occupation, B_factor, atom_type) write(pdb_line, file= pdb_filepath, append= TRUE) } } last_number <- as.numeric(residue_number) }else{ resno <- sub("\\..*$", "", pca_positions) angle <- sub("^[0-9]*\\.","", pca_positions) for(pos in 1:pca_size){ x <- pca_coord[pos, pca_1] y <- pca_coord[pos, pca_2] z <- pca_coord[pos, pca_3] if (!is.na(x) & !is.na(y) & !is.na(z)) { residue_number <- resno[pos] angle_ID <- angle[pos] if (angle_ID == "chi1") {chain_ID <- "A"} if (angle_ID == "chi2") {chain_ID <- "B"} if (angle_ID == "chi3") {chain_ID <- "C"} if (angle_ID == "chi4") {chain_ID <- "D"} pdb_line <- sprintf(format, head, pos, atom_name, res_type, chain_ID, residue_number, round(x,digits=3), round(y,digits=3), round(z,digits=3), occupation, B_factor, atom_type) write(pdb_line, file= pdb_filepath, append= TRUE) } } last_number <- as.numeric(residue_number) } #Creating elements used to create axis max_x <- max(abs(na.omit(pca_coord[,1]))) norm <- round(max_x*2, digits=3) axis_fake_atoms <- c("O","C","N", "Pb") axis_fake_chain_ID <- "Z" x_pos <- pca_size+1 y_pos <- pca_size+2 z_pos <- pca_size+3 origin_pos <- pca_size+4 x_residue <- last_number+1 y_residue <- last_number+2 z_residue <- last_number+3 origin_residue <- last_number+4 x_name <- "XXX" y_name <- "YYY" z_name <- "ZZZ" origin_name <- "000" x_axis <- sprintf(format, head, x_residue, axis_fake_atoms[1], x_name, axis_fake_chain_ID, x_residue, norm, 0, 0, occupation, B_factor, axis_fake_atoms[1]) y_axis <- sprintf(format, head, y_residue, axis_fake_atoms[2], y_name, axis_fake_chain_ID, y_residue, 0, norm, 0, occupation, B_factor, axis_fake_atoms[2]) z_axis <- sprintf(format, head, z_residue, axis_fake_atoms[3], z_name, axis_fake_chain_ID, z_residue, 0, 0, norm, occupation, B_factor, axis_fake_atoms[3]) origin <- sprintf(format, head, origin_residue, axis_fake_atoms[4], origin_name, axis_fake_chain_ID, origin_residue, 0, 0, 0, occupation, B_factor, axis_fake_atoms[4]) write(x_axis, file= pdb_filepath, append= TRUE) write(y_axis, file= pdb_filepath, append= TRUE) write(z_axis, file= pdb_filepath, append= TRUE) write(origin, file= pdb_filepath, append= TRUE) #Connecting x, y and z to origin connect_x <- paste("CONECT", x_residue, origin_residue) connect_y <- paste("CONECT", y_residue, origin_residue) connect_z <- paste("CONECT", z_residue, origin_residue) write(connect_x, file= pdb_filepath, append= TRUE) write(connect_y, file= pdb_filepath, append= TRUE) write(connect_z, file= pdb_filepath, append= TRUE) write("END", file= pdb_filepath, append= TRUE) #Display CONECT elements when loading pml file enable_connections <- "show lines" write(enable_connections, file= pml_filepath, append= TRUE) hidden_spheres <- "hide spheres, chain Z" write(hidden_spheres, file= pml_filepath, append= TRUE) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/write.pca.pdb.R
# Bios2cor is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # See the GNU General Public License at: # http://www.gnu.org/licenses/ # write.scores <- function(correlation, entropy= NULL, filepathroot=NULL){ if (missing(correlation)) { stop("A correlation object is required.") } if (is.null(filepathroot)) { filename <- "CORR_SCORES.csv" }else{ filename <-paste(filepathroot, "_CORR_SCORES.csv", sep="") } corr_score <- correlation$score corr_Zscore <- correlation$Zscore names_c <- colnames(corr_score) nb_pos <- length(names_c) numbering1 <- grep(".", names_c, fixed= TRUE) if (!is.null(entropy)){ names_s <- names(entropy) numbering2 <- grep(".", names_s, fixed= TRUE) # Check that both entropy and correlation objects are from a MSA or a trajectory if(((length(numbering1) == 0) && (length(numbering2) != 0)) || ((length(numbering1) != 0) && (length(numbering2) == 0))){ print("Mismatch in the notation used for the correlation and entropy files. Please verify your files!") } } if(length(numbering1) != 0){ # Read the score_noauto and the Zscore_noauto matrices corr_score_noauto <- correlation$score_noauto corr_Zscore_noauto <- correlation$Zscore_noauto if (!is.null(entropy)){ head <- paste("pos_i", "pos_j", "score", "Zscore", "score_noauto", "Zscore_noauto", "entropy_i", "entropy_j") write(head, file= filename, append= FALSE) for(i in 1:nb_pos){ for(j in i:nb_pos){ pos_i <- names_c[i] pos_j <- names_c[j] if(pos_i != pos_j){ entropy_i <- format(as.numeric(entropy[pos_i]), digits=3, nsmall=3) entropy_j <- format(as.numeric(entropy[pos_j]), digits=3, nsmall=3) score <- format(as.numeric(corr_score[pos_i, pos_j]), digits=3, nsmall=3) Zscore <- format(as.numeric(corr_Zscore[pos_i, pos_j]), digits=3, nsmall=3) score_noauto <- format(as.numeric(corr_score_noauto[pos_i, pos_j]), digits=3, nsmall=3) Zscore_noauto <- format(as.numeric(corr_Zscore_noauto[pos_i, pos_j]), digits=3, nsmall=3) # Create the line to insert to the file current_line <- paste(pos_i, pos_j, score, Zscore, score_noauto, Zscore_noauto, entropy_i, entropy_j) write(current_line, file= filename, append= TRUE) } } } } else { head <- paste("pos_i", "pos_j", "score", "Zscore", "score_noauto", "Zscore_noauto") write(head, file= filename, append= FALSE) for(i in 1:nb_pos){ for(j in i:nb_pos){ pos_i <- names_c[i] pos_j <- names_c[j] if(pos_i != pos_j){ score <- format(as.numeric(corr_score[pos_i, pos_j]), digits=3, nsmall=3) Zscore <- format(as.numeric(corr_Zscore[pos_i, pos_j]), digits=3, nsmall=3) score_noauto <- format(as.numeric(corr_score_noauto[pos_i, pos_j]), digits=3, nsmall=3) Zscore_noauto <- format(as.numeric(corr_Zscore_noauto[pos_i, pos_j]), digits=3, nsmall=3) # Create the line to insert to the file current_line <- paste(pos_i, pos_j, score, Zscore, score_noauto, Zscore_noauto) write(current_line, file= filename, append= TRUE) } } } } } else { if (!is.null(entropy)){ head <- paste("pos_i", "pos_j", "score", "Zscore", "entropy_i", "entropy_j") write(head, file= filename, append= FALSE) for(i in 1:nb_pos){ for(j in i:nb_pos){ pos_i <- names_c[i] pos_j <- names_c[j] if(pos_i != pos_j){ entropy_i <- entropy[pos_i] entropy_j <- entropy[pos_j] score <- format(as.numeric(corr_score[pos_i, pos_j]), digits=3, nsmall=3) Zscore <- format(as.numeric(corr_Zscore[pos_i, pos_j]), digits=3, nsmall=3) if (entropy_i == '') { entropy_i <- 'ND' }else{ entropy_i <- format(as.numeric(entropy_i), digits=3, nsmall=3) } if (entropy_j == '') { entropy_j <- 'ND' }else{ entropy_j <- format(as.numeric(entropy_j), digits=3, nsmall=3) } #Creating the line to insert to the file current_line <- paste(pos_i, pos_j, score, Zscore, entropy_i, entropy_j) write(current_line, file= filename, append= TRUE) } } } }else{ head <- paste("pos_i", "pos_j", "score", "Zscore") write(head, file= filename, append= FALSE) for(i in 1:nb_pos){ for(j in i:nb_pos){ pos_i <- names_c[i] pos_j <- names_c[j] if(pos_i != pos_j){ score <- format(as.numeric(corr_score[pos_i, pos_j]), digits=3,nsmall=3) Zscore <- format(as.numeric(corr_Zscore[pos_i, pos_j]), digits=3,nsmall=3) #Creating the line to insert to the file current_line <- paste(pos_i, pos_j, score, Zscore) write(current_line, file= filename, append= TRUE) } } } } } }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/write.scores.R
"xyz2torsion" <- function(pdb, xyz, tbl = c("basic", "mainchain", "sidechain", "all", "phi", "psi", paste("chi", 1:5, sep="")), ncore = NULL) { if(length(pdb$xyz) != ncol(xyz)) stop("Number of atoms in PDB doesn't match xyz") ncore <- setup.ncore(ncore, bigmem = TRUE) tor.names <- c("phi", "psi", paste("chi", 1:5, sep="")) tbl <- match.arg(tbl) tbl <- switch(tbl, "basic" = tor.names[1:3], "mainchain" = tor.names[1:2], "sidechain" = tor.names[3:7], "all" = tor.names, tbl ) tor <- torsion.pdb(pdb) fill <- !is.na(t(tor$tbl[, tbl])) resno <- rep(pdb$atom[pdb$calpha, "resno"], each = length(tbl)) cname <- paste(resno[fill], rep(tbl, ncol(fill))[fill], sep=".") n <- sum(fill) pb <- txtProgressBar(min=0, max=nrow(xyz), style=3) if(ncore > 1) { tor <- big.matrix(nrow(xyz), n, init=NA, type="double") iipb <- big.matrix(1, nrow(xyz), init=NA) mclapply(1:nrow(xyz), function(i) { pdb$xyz <- xyz[i, ] tor[i, ] <- t(torsion.pdb(pdb)$tbl[, tbl])[fill] iipb[1, i] <- 1 setTxtProgressBar(pb, sum(!is.na(iipb[1,]))) return() } ) tor <- tor[,]; gc() } else { tor <- t( sapply(1:nrow(xyz), function(i) { pdb$xyz <- xyz[i, ] tor <- t(torsion.pdb(pdb)$tbl[, tbl])[fill] setTxtProgressBar(pb, i) return(tor) } ) ) } close(pb) colnames(tor) <- cname return(tor) }
/scratch/gouwar.j/cran-all/cranData/Bios2cor/R/xyz2torsion.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup, echo = FALSE------------------------------------------------------ library(Biostatistics)
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/doc/Biostatistics.R
--- title: "Biostatistics vignette" author: "Rob Knell" date: "January 2021" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Biostatistics vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo = FALSE} library(Biostatistics) ``` # The Biostatistics Package This package consists of a series of learnr tutorials for use in teaching statistics to biologists. They were written for use in undergraduate and postgraduate teaching in the UK but they could also be used for individual, self-directed learning. The subjects covered range from basic data visualistion and description through to reasonably advanced linear modelling. There are obviously many subjects which are not currently covered such as generalised linear models, mixed effects models and multivariate statistics and it is hoped that these will be incorporated in the future. There is a strong emphasis throughout the tutorial on analysing real data sets. This is much better for learning statistics than using synthetic example data because with real data comes all of the issues and uncertainty associated with real science. The data used here have mostly been made publicly available by the authors of papers published in the biological literature, mostly via the [Dryad data repository](https://datadryad.org/stash), and I would like to thank all of them for this. The tutorials are written for the [learnr](https://rstudio.github.io/learnr/) package which uses an [rmarkdown](https://rmarkdown.rstudio.com/) framework to render tutorials into [shiny](https://shiny.rstudio.com/) webapps. The rmarkdown files for all of the tutorials are available on the [author's github page](https://github.com/rjknell). ## Running tutorials There are two ways of running these tutorials. The easy way assumes you are using a recent version of RStudio. If this is the case then once you have installed the package the tutorials will show up in the 'Tutorial' tab in the RStudio pane that also includes the Environment and History tabs. Click the "Start Tutorial" button and the tutorial will render, which can take a few seconds, and then appear in the Tutorial tab. You'll probably want to maximise the pane within your RStudio window. If you want to finish the tutorial click on the 'Stop' sign button at the top left of the tab. If you would rather run your tutorial in a separate browser window then you can use the `run_tutorial()` function from the learnr package. You need to specify the name of the tutorial and the package, so `learnr::run_tutorial("02_Descriptive_statistics", package = "Biostatistics")` will run the Descriptive Statistics tutorial and `learnr::run_tutorial("17_Multiple_Regression", package = "Biostatistics")` will run the Multiple Regression tutorial. In my experience the first method, with the Tutorial pane, seems more stable and sometimes tutorials won't render using `run_tutorial()` for reasons that are not clear. ## List of tutorials The tutorials currently in the package are: 00_Introduction 01_Frequency_histograms 02_Descriptive_statistics 03_Boxplots 04_Scatterplots 05_Sampling_distributions 06_Standard_errors 07_Confidence_intervals 08_CIs_comparing_two_means 09_Paired_sample_t_tests 10_Two_sample_t_tests 11_Chi_square_tests 12_Correlation 13_Single_factor_ANOVA 14_Linear_Regression 15_Model_assumptions 16_Multi_factor_ANOVA 17_Multiple_regression 18_Factors_and_continuous_variables 19_Model_selection
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/doc/Biostatistics.Rmd
--- title: "Descriptive and Exploratory Statistics: Introduction" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Introduction to the biostatistics tutorials. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = FALSE) ``` ## Introduction to the biostatistics tutorials Welcome to the biostatistics tutorials. These are a set of interactive tutorials developed for teaching statistics to biologists at both undergraduate and postgraduate levels. The tutorials cover a range of subjects from basic data visualisation and descriptive statistics through the fundamentals of statistical testing to advanced linear model fitting. There is a strong emphasis throughout on analysing real datasets in order to give experience with the messiness and uncertainty that can arise when deling with real data. The concepts covered in these tutorials here are explained with both text and, in many cases, embedded video. There are interactive code exercises which are written for people with only a basic understanding of R, and there are quizzes that you can use to test your knowledge and understanding. It is hoped in future to extend these tutorials to cover further subjects such as multivariate statistics, mixed effects models and generalised linear models. Please note that the current release of RStudio will include some tutorials by defualt in the tutorials pane which are not a part of this package. These are: Data basics, Filter observations, Create new variables, Set Up and Hello, Tutorial! There is not currently an easy way to remove these. These tutorials were written by Rob Knell, School of Biological and Chemical Sciences, Queen Mary University of London. <a href="mailto:[email protected]">Send Email</a> ## License This content is licensed under a [GPL-3](https://www.gnu.org/licenses/gpl-3.0.en.html) license ## Interactive coding exercises *Here's an example of a simple coding exercise with an empty box provided for entering the answer. Note that there is a "hints" button. Most of the exercises will have either this or a "solution" button. These are here to help you if you get stuck. If there is more than one hint then there will be a "next hint" button visible when you bring the first hint up. When you want to run the code click the "Run code" button on the right.* Write the R code required to add two plus five: ```{r two-plus-two, exercise=TRUE} ``` ```{r two-plus-two-hint-1} # You can just use the number keys and the # "+" symbol ``` ```{r two-plus-two-hint-2} # This is the solution: 2 + 5 ``` *Some exercises come with code already in the box which you will need to edit.* This code will draw a scatterplot of two sets of random numbers. Change the plot symbol colour from "darkgreen" to "steelblue". ```{r plot1, exercise=TRUE, exercise.lines = 5} plot(rnorm(20) ~ runif(20), pch = 16, col = "darkgreen") ``` ```{r plot1-hint-1} # The argument that sets the colour is "col = " # You just need to replace "darkgreen" with "steelblue" # Don't forget the quote marks ``` ```{r plot1-hint-2} # This is the solution: plot(rnorm(20) ~ runif(20), pch = 16, col = "steelblue") ``` ## Multiple choice quizzes These are largely self explanatory. Some quizzes are4 "single best answer", meaning that only one answer per set is correct. Some have more than one answer that is correct and if this is the case you'll be told it in the text. ## Tutorial tips If you're running a tutorial in the tutorial pane of RStudio don't forget about the three buttons in the top left. The home button takes you back to the list of tutorials, the stop button ends the tutorial and the button in the middle labelled "Show in new window" opens the tutorial in a pop-up window. If you are using a small screen then the pop-up window can be a really good option. Once a tutorial is finished it should be self evident, but if in doubt when you come to the license information then you've got to the end. The tutorials don't automatically follow each other so you need to close the one you've finished and then if you wish open the next one. ##
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/00_Introduction/Introduction.Rmd
--- title: "Descriptive and Exploratory Statistics 1: Frequency Histograms" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Learn how to draw and use frequency histograms in R to understand the distribution of your data. Become familiar with the more common kinds of distribution. --- ```{r setup, include=FALSE} library(learnr) require(plotrix) knitr::opts_chunk$set(echo = TRUE) #Load longevity dataset load("longevity.rda") # Load parrots2 dataset load("parrots2.rda") ``` ## What is a frequency histogram? A frequency histogram is a way of displaying the data in a single variable which shows you many of the important features of the dataset: it shows where the centre of the data (the *central tendency*) is located, it shows how much *spread* or *dispersion* there is in the data and, importantly, it shows the shape of the dataset, giving you an indication of what the *frequency distribution* of the data might be. It also allows you to check your data for problems such as problem data points that might be the result of errors and which might show up as having much, much larger or smaller values than the rest, or which might have impossible values such as a negative count when only positive ones are possible. Before you start this tutorial, you're recommended to watch this video which explains some of the basics. ![](https://youtu.be/bQQsbmeungc) The simplest frequency histograms are those where the data are organised on an *ordinal* scale: one with discrete values that have a natural order to them. One example might be data which are collected in a survey and which are scored on a *Likert Scale* --- this is a scale where a respondent is asked to express how much the agree or disagree with a particular statement, usually on a five or seven point scale. If you've spent more than 30 seconds as a student you've most likely been asked to fill one or more of these in. Here are some data from a fictional survey of attitudes towards conservation: <br><br> **Statement 1:** Conservation work can only succeed if it brings benefits to the people living in the area to be conserved | Response | Strongly disagree | Disagree | Neutral | Agree | Strongly Agree | |:--------------------|:------------------|:---------|:--------|:-------|:----------------| | Number of responses | 12 | 20 | 29 | 61 | 41 | <br><br> **Statement 2:** Conservation can only succeed in the long term if it generates profits for business | Response | Strongly disagree | Disagree | Neutral | Agree | Strongly Agree | |:--------------------|:------------------|:---------|:--------|:-------|:----------------| | Number of responses | 44 | 27 | 15 | 53 | 24 | <br><br> You can look at those data and you can see some patterns without much thought, but it's easier to see if we plot the data as histograms. ```{r, fig.height = 3.5, fig.width = 5, echo = FALSE, fig.cap = "**Figure 1** Frequency histograms for responses to two statements about conservation"} par(mfrow = c(1,2)) responses <- rep( c( "Strongly disagree", "Disagree", "Neutral", "Agree", "Strongly Agree" ), times = c(12, 20, 29, 61, 41) ) responses_table <- table(responses) responses_table <- responses_table[c(5, 2, 3, 1, 4)] par(mar = c(8, 4, 2, 2)) barplot(responses_table, space = 0, las = 2, col = "darkorange3", main = "Statement 1", ylab = "Frequency", cex.main = 0.7, cex.axis = 0.7, cex.names = 0.7 ) responses <- rep( c( "Strongly disagree", "Disagree", "Neutral", "Agree", "Strongly Agree" ), times = c(44, 27, 15, 53, 24) ) responses_table <- table(responses) responses_table <- responses_table[c(5, 2, 3, 1, 4)] par(mar = c(8, 4, 2, 2)) barplot(responses_table, space = 0, las = 2, col = "darkorange3", main = "Statement 2", cex.main = 0.7, cex.axis = 0.7, cex.names = 0.7) ``` ```{r echo = FALSE} par(mar = c(5,4,2,2) + 0.1) par(mfrow = c(1,1)) ``` For statement 1, the most common response is "Agree" and the histogram shows a fairly sharp peak, with relatively few people disagreeing with the statement. For statement 2, however, even though the most common response is the same as statement 1 ("Agree"), the shape of the dataset is different, with a substantial second peak corresponding to "Strongly disagree" and relatively few neutral responses --- we call this a *bimodal* distribution. While the first statement seems to be something that most people tend to agree on, it looks like the second one is something that people are divided on, with strong opinions both for and against this idea. By plotting the whole dataset as a frequency histogram we have quickly visualised the important patterns in the data and reach a richer understanding than we might have done if we just looked at summary statistics such as the most common response. ## Frequency histograms for continuous data A lot of the time we are dealing with data that doesn't fit itself nicely into a small number of convenient categories, of course. You might have measurements of size, weight, or time, you might have counts of cells, immunogold staining densities, expression data from a transcriptomics study or estimates of viral load from qPCR --- the kinds of data that biologists deal with are endless. Here's one example from a paper published in 2014 which examined how ecology is related to maximum lifespan in 1368 species of birds and mammals^1^. I've extracted the data for just the mammalian order Artiodactyla (the even-toed ungulates) for which there are 101 different species with known maximum life spans, and this data frame is loaded as `longevity`. Here are the first twenty of these in years: <br> | Species | Common name | Maximum lifespan (years) | |:-----------------------|:------------------|:-------------------------| | *Elaphodus cephalophus * | Tufted deer | 22.7 | | *Eudorcas thomsonii * | Thomson's gazelle | 20.0 | | *Gazella dorcas * | Dorcas gazelle | 23.7 | | *Gazella gazella * | Mountain gazelle | 18.3 | | *Gazella leptoceros * | Rhim gazelle | 14.6 | | *Gazella subgutturosa * | Goitered gazelle | 16.3 | | *Giraffa camelopardalis* | Giraffe | 39.5 | | *Addax nasomaculatus * | Addax | 28.0 | | *Aepyceros melampus * | Impala | 25.6 | | *Alcelaphus buselaphus * | Hartebeest | 22.5 | <br> Our data on maximum lifespan don't fall into easy classes like our earlier example with survey data --- there are 77 unique values in our 101 species longevities, so simply counting the number of datapoints with each value would not be useful. What we do with data like this is to divide it into a series of "bins" each of which has a range of values, and then count the number of data points that fall into each bin. If we do this for our data from the artiodactyls we get this: <br> | Bin | Count | |:--------|:------| | 0-5 | 0 | | 5.1-10 | 1 | | 10.1-15 | 2 | | 15.1-20 | 26 | | 20.1-25 | 42 | | 25.1-30 | 19 | | 30.1-35 | 6 | | 35.1-40 | 3 | | 40.1-45 | 1 | | 45.1-50 | 0 | | 50.1-55 | 0 | | 55.1-60 | 0 | | 60.1-65 | 1 | <br> You can see, for example, that there are two species with maximum lifespans between 5.1 and 10 years, 19 with maximum lifespans between 20.1 and 25 years and none with maximum lifespans between 50.1 and 55 years. Using R we don't have to generate a table like this or count numbers because the `hist()` function will do this for us, so it's very easy to make a histogram. First we have to load the data. Note that we're using `subset()` to generate a new data frame with only the artiodactyl data from the big `longevity` data frame. ```{r} #Make order a factor longevity$order <- as.factor(longevity$order) #Subset out the artiodactyls artiodactyls <- subset( longevity, order == "Artiodactyla") ``` Now we can generate our frequency histogram. ```{r fig.height = 4, fig.width = 5, fig.cap = "**Figure 2** Frequency histogram of maximum lifespans for 101 species of artiodactyl"} #Generate the histogram hist(artiodactyls$maximum_lifespan_yr, #Nice label for the x-axis xlab = "Maximum lifespan in years", main = "Artiodactyls", #Graph title col = "darkorange3" #Tasteful colour ) ``` This gives a very nice visualisation of the shape of the dataset and we can get lots of information from looking at this. Firstly, we can see that these data have a *central tendency* --- the data are clustered around a maximum lifespan of between 20 and 25 years. Secondly, there is not a lot of *dispersion* in these data: the great majority of the values fall between 15 and 30. Thirdly, the shape of the histogram is not quite symmetrical. There is something of a tail extending towards the upper values, indicating that the *frequency distribution* of these data shows some degree of *positive skew*. This means that while most of the values are close to the centre of the distribution, there are some rather larger values than we would expect from data that conformed to something like a *normal distribution* --- more on this in a bit. Finally, there is one value that seems rather unexpectedly high even given the positive skew in the data. When you see a data point like this you need to ask whether it's just an extreme value or whether it might be an error: a species that's been misclassified into the wrong order, for example, or a mistake in recording the data. Let's find out which species that large value represents. ```{r} artiodactyls$species[which( artiodactyls$maximum_lifespan_yr > 60)] ``` Aha, it's the hippopotamus. This makes sense because hippos are quite unique animals, being in their own family within the artiodactyls, and are very different from other artiodactyls in lots of other ways, including being very much bigger than other animals in the order, with an average weight of between one and one and a half metric tonnes and occasionally reaching more than four tonnes. Rather heavier than the average gazelle. <br><br><hr> 1. Healy, K., *et al.* (2014) Ecology and mode-of-life explain lifespan variation in birds and mammals. Proceedings. Biological sciences, 281, 20140298 ## Exercise: draw your own frequency histogram You've seen that R makes it easy to generate frequency histograms with the `hist()` function. See if you can draw a frequency histogram like the one for artiodactyls but this time for a completely different order of animals, the Psittaciformes, otherwise known as the parrots. The longevity dataset has data on maximum life span for 72 species of parrot, and we can generate a new data frame just for these birds using `subset()`, just like we did with the artiodactyls. Psittaciformes is a bit much to type so we'll just call this one "parrots". ```{r prepare-histograms} #Make order a factor longevity$order <- as.factor(longevity$order) parrots <- subset( longevity, order == "Psittaciformes") ``` Now try to draw a frequency histogram. You can use the code we used above but you'll need to modify it by changing the variable to be drawn and the main title. If you want you can try changing the colour --- try `steelblue`, `grey20` or if you're feeling adventurous `hotpink`. ```{r histogram1, exercise = TRUE, exercise.lines = 6, exercise.setup = "prepare-histograms"} ``` ```{r histogram1-hint-1} #Remember that you need to specify the parrots #dataframe and give the name of the maximum_lifespan_years variable #with the two separated by a dollar symbol ``` ```{r histogram1-hint-2} #Use the main = argument to specify the title. #The text for the title needs to be in quote marks. #The col = "darkorange3" can be pasted in as is or you can change the #colour name as suggested above ``` ```{r histogram1-hint-3} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r histogram1-hint-4} #This is the solution: hist(parrots$maximum_lifespan_yr, #Generate the histogram xlab = "Maximum lifespan in years", #Nice label for the x-axis main = "Psittaciformes", #Graph title col = "hotpink" #Seriously tasteful colour ) ``` Now that you've drawn your histogram, have a think about what it's showing you and try to answer these questions. ```{r quiz, echo = FALSE} quiz( question("Which of the following are true? More than one answer can be correct.", answer("There are more than 50 species of parrot recorded in this dataset with a maximum lifespan of less than 20 years"), answer("The most frequent maximum lifespan for parrots is between 20 and 25 years", correct = TRUE), answer("The frequency distribution for maximum lifespan for parrots shows some positive skew", correct = TRUE), answer("The greatest maximum lifespan for a parrot species in this dataset is between 75 and 80 years", correct= TRUE), answer("The frequency distribution for parrot maximum lifespan is normal") ) ) ``` Here are the histograms for parrots and artiodactyls together. ```{r echo = FALSE, fig.height = 6, fig.width = 4} par(mfrow = c(2,1)) parrots <- subset( longevity, order == "Psittaciformes") hist(parrots$maximum_lifespan_yr, #Generate the histogram xlab = "Maximum lifespan in years", #Nice label for the x-axis main = "Psittaciformes", #Graph title col = "steelblue" ) hist(artiodactyls$maximum_lifespan_yr, #Nice label for the x-axis xlab = "Maximum lifespan in years", main = "Artiodactyls", #Graph title col = "darkorange3" #Tasteful colour ) ``` ```{r hist_quiz, echo = FALSE} quiz( question("Compare your histogram for parrots with the histogram for artiodactyls. Which of the following are true? More than one answer can be correct.", answer("There are many more parrot species recorded as having maximum lifespans between 30 and 50 years than there are artiodactyl species", correct = TRUE), answer("The most common maximum lifespan for artiodactyls is the same as that for parrots", correct = TRUE), answer("The shortest-lived artiodactyl species has a maximum lifespan shorter than the shortest-lived parrot"), answer("Because there are no obvious outlying data points we can be confident that all the data were recorded correctly"), answer("The longest-living parrot species has a longer maximum lifespan than the hippopotamus", correct = TRUE) ) ) ``` ## Refining the histogram One thing you might have noticed is that `hist()` drew the two histograms slightly differently: whereas the artiodactyl one used five-year bins, so the number of data points between 5.1 and 10 or 10.1 and 15 years were counted, the parrots one used ten-year bins, so the numbers between 0 and 10 or 10.1 and 20 were counted. `hist()` selects a bin size on the basis of what will make a nice looking histogram, but sometimes we want to change this. In this case, if we want to compare our two histograms we might want to draw them with the same bin size. We can do this by using the `breaks =` argument, which will take a vector of the maximum value for each bin. If we wanted 20 year bins, for example, we could use the argument `breaks = c(0,20,40,60,80,100)` as part of our `hist()` function call. See if you can redraw your histogram, but this time using 5-year bins, with a maximum of 80 and a minimum of zero. ```{r histogram2, exercise = TRUE, exercise.lines = 10, exercise.setup = "prepare-histograms"} ``` ```{r histogram2-hint-1} #You just need to use the code from before but add #an extra argument specifying the breaks. ``` ```{r histogram2-hint-2} #You don't need a zero in your vector of breaks, #it can start at 5 ``` ```{r histogram2-hint-3} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r histogram2-hint-4} #This is what you need to add: #breaks = c(5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80) #with commmas in the right places, so before and after unless #it's the last argument ``` ```{r histogram2-hint-5} #This is the solution: #Generate the histogram hist(parrots$maximum_lifespan_yr, #Nice label for the x-axis xlab = "Maximum lifespan in years", main = "Psittaciformes", #Graph title col = "steelblue", #More restrained colour breaks = c(5,10,15,20,25,30,35,40,45, 50,55,60,65,70,75,80) ) ``` That's interesting, and gives a rather different pattern to the one with the wider bins that we looked at before. There seems to be rather more structure to the dataset than was visible previously, and in particular there seem to be two peaks: one at around 25 to 30 years and a second, lower one at around 40 to 50 years. When we see a *bimodal* dataset like this it often indicates that there is some underlying structure in the data, with some group or groups that are somewhat different from the rest. In this case the obvious place to look is in the taxonomic structure of the order: the parrots are a diverse group of birds and it might be that the second peak we see in the histogram is associated with variablity within the order. Here's a "stacked histogram" with the three big families of the Psittaciformes shown in different colours. I'm importing an edited version of the parrots data with the families added here, and I'm using a function called `histStack()` from the `plotrix` package to draw this graph. ```{r fig.height = 4, fig.width = 5, fig.cap = "**Figure 3** Stacked histogram of maximum ages for the parrots showing the three main families"} library(plotrix) parrots2$family <- factor(parrots2$family, levels = c("Psittacidae", "Psittaculidae", "Cacatuidae")) histStack( maximum_lifespan_yr ~ family, data = parrots2, breaks = 16, xlab = "Maximum lifespan (years)", col = c("plum4", "aquamarine4", "darkorange"), xlim = c(0, 80) ) legend("topright", legend = c("Psittacidae", "Psittaculidae", "Cacatuidae"), fill = c("plum4", "aquamarine4", "darkorange") ) ``` The [Psittaculidae](https://en.wikipedia.org/wiki/Psittaculidae) is a group that includes many of the smaller parrots such as budgerigars, lorikeets and parakeets and these are mostly short lived, with no examples here with a maximum lifespan greater than 35 years. The [Cacatuidae](https://en.wikipedia.org/wiki/Cockatoo) are better known as the cockatoos, and these are all remarkably long-lived, with no example here with a maximum lifespan of less than 25 years. The [Psittacidae](https://en.wikipedia.org/wiki/Psittacidae) are the classic parrots such as African Grey parrots and Macaws, and these have intermediate maximum life expectancies, with some species being quite short lived, whereas some such as the African Grey parrots are contributing to the second peak at 40-50 years. Overall then, by looking at a carefully chosen frequency histogram we've exposed some of the underlying structure in this set of data: not everything we can see is explained by differences between families, but certainly we now know that the lower peak is associated with the Psittaculidae and the upper one with Cacatuidae. This will inform any further analysis we do on these data: it is tremendously important to be aware of whether your data has structure like this. As an example, if the Cacatuidae are generally heavy birds and the Psittaculidae are generally light (which is indeed the case) then we might see a positive relationship between weight and maximum lifespan in the parrots. Drawing any kind of conclusion about this would be very difficult however, because we wouldn't know whether this represented a causal relationship or whether the reason than the Cacatuidae live longer than the Psittaculidae is unrelated to their weight but determined by some other aspect of their biology which they share because they are closely related --- a statistician would say that there is a *confounding* variable here which would be relatedness. To put this slightly more formally, one of the fundamental things we assume when doing statistical analysis is that our datapoints are *independent* - in other words each one is a separate measure of the phenomenon of interest and that no pair of data points are likely to be more similar than we would expect by chance. Here, quite clearly, one cockatoo is more likely to be similar to another cockatoo than it is to a lorikeet, and we would need to take this into account in our analysis. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/01_Frequency_histograms/Frequency_histograms.Rmd
--- title: "Descriptive and Exploratory Statistics 2: Descriptive statistics" output: learnr::tutorial: theme: default css: "http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css" runtime: shiny_prerendered author: Rob Knell description: > This tutorial takes you through the most important descriptive statistics, covering the mean and median as well as the variance, standard deviation and inter-quartile range. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) load("height_immunity.rda") ``` ## What is the purpose of descriptive statistics? Figure 1 below is a frequency histogram showing measures of lysozyme activity for 194 men and women, from a study of the relationship between height and immunity in humans published in 2017^1^. Lysozyme is an enzyme that causes bacteria to lyse (hence the name) and its activity here was measured by incubating blood serum with a suspension of bacterial cells and comparing the optical absorption after 20 minutes with a control sample with no serum. The data are loaded as a data frame called `height_immunity` ```{r fig.cap = "**Figure 1** Frequency histogram of lysozyme activity for 194 people", fig.height = 4, fig.width = 4} # Draw the histogram hist(height_immunity$lysozyme, col = "aquamarine4", main = "", xlab = "Lysozyme activity" ) ``` When we visualise these data using a frequency histogram we can see that these values are centred around a value of roughly 0.35-0.4, with the majority of people in the sample having activity between 0.3 and 0.5 and almost all of them having values between 0.2 and 0.55. We can also see that the distribution of data is roughly symmetrical and approximately normally distributed, although there does seem to be some indication of slight negative skew --- the lower tail of the distribution looks rather fatter than the upper tail, and the lowest value is further from the centre of the distribution than the highest value is. We can see these things from just looking at the histogram, but if we want to go further we need to quantify these observations. If we want to know whether for example,some treatment we might use causes a change in lysozyme activity we need to calculate where the centre of the distribution is, and if we suspect that our treatment causes a change in the amount of variability in the data then we need to quantify the amount of spread, or *dispersion* there is in these data. Knowing this can also let us assess how likely or unlikely particular values might be: for example, if we had a person with an activity measured at 0.9 we might want to ask whether that high value was something that might be expected given the distribution of data, or whether that value might indicate an anomaly where perhaps there was something different about that person that might cause higher lysozyme activity, such as a genetic difference, a recent infection or something similar. We'll start with the ways that we can describe the centre of the distribution of data. <br><br><hr> ^1^ Pawłowski, B., Nowak, J., Borkowska, B., Augustyniak, D. & Drulis-Kawa, Z. (2017) Body height and immune efficacy: testing body stature as a signal of biological quality. Proceedings. Biological sciences / The Royal Society, 284: 0171372 ## The three **M**s: Mean, Median and Mode as Measures of Central Tendency Before starting on this section, you might like to watch this short video explaining means and medians: ![](https://youtu.be/alqrNPS7MYw) ### Mean The most common statistic used to describe where the middle of a dataset is is the *arithmetic mean*, usually just called the *mean*. This is also the figure that most people are thinking of when they say the *average*. It's dead simple to calculate: you add all of the numbers up, and then you divide that figure by the number of datapoints. To put this more formally, $$ \large{\bar{x} = \frac{\Sigma x}{n}}$$ Here, $\bar{x}$ represents the sample mean, the $\Sigma$ in the equation is a capital sigma and is used to mean "the sum of", $x$ refers to our data and $n$ is the sample size. So this is just saying "to get the mean, add all the data together and divide my the sample size" but in statisticalese. If we had only sampled lysozyme activity from 7 people then our data might look like this: <br> $$0.37, 0.22, 0.50, 0.53, 0.38, 0.35, 0.23$$ <br> and we could calculate the mean of these data as follows: <br> $$ \bar{x} = \frac{0.37 + 0.22 + 0.50+ 0.53+ 0.38+ 0.35+ 0.23}{7} = \frac{2.58}{7} = 0.37 $$ <br> We end up with a value for $\bar{x}$ of just under 0.4, which concides nicely with the peak of the distribution. Since our $n$ is actually 194 it would be a little cumbersome to write out the calculation for the mean of all the data, but fortunately R has a built in function to calculate the mean called, surprisingly, `mean()`. ```{r} mean(height_immunity$lysozyme) ``` Giving us a value for $\bar{x}$ which is surprisingly close to the one we got from just seven values. This is more by accident than design. ### Median The next most common statistic that you'll see used to describe the centre of a distribution is the *median*. This is, quite simply, the number in the middle of the distribution. You *rank* all your data from low to high (or *vice-versa*), and the number in the middle is the median. So, returning to our seven number subset from before: $$0.37, 0.22, 0.50, 0.53, 0.38, 0.35, 0.23$$ we can rank these, starting with the lowest: $$0.22, 0.23, 0.35, \underline{0.37}, 0.38, 0.50, 0.53.$$ The underlined value, 0.37, is the one in the middle and so is the median. This turns out to be exactly the same as the mean, and again this is more by accident than anything else. You can see that the median divides the data set into two equal halves, so half of the data have values greater than the median and half have values less than the median. If you have an even number of data, of course there isn't a value in the middle when you rank your data. What you do in this case to calculate the median is to take the two numbers that are in the middle and calculate the mean of those two to get your median. For the median of our overall lysozyme variable, again R can very simply calculate this for you using the `median()` function. ```{r} median(height_immunity$lysozyme) ``` This returns 0.38 so fractionally higher than the 0.37 for the mean. ### Mode The mode is the most common value in a dataset. It's not used as much as the mean or the median, mainly because when you have continuous data which can take any value the mode doesn't really have much meaning. Our lysozyme variable, for example, has 133 unique values out of 194 so asking which value is the most common isn't really going to tell us a lot. Modes can be valuable when you're looking at categorical data, where each value can only take one of a limited number of values such as juvenile/adult or green/blue/red/yellow or healthy/unwell/incapacitated/dead but in these cases you can usually just look at a frequency histogram. R doesn't even have a function to calculate the mode... somewhat perversely it does have a function called `mode()` but that tells you what the "storage mode" of a variable is: ```{r} mode(height_immunity$lysozyme) ``` So now you know not to get confused there... ### Central tendency exercises #### Means versus medians Here are some more numbers from our lysozyme data: 0.22 0.39 0.36 0.45 0.41 Use the `c()` function to set up a vector of these data called `X1`, and calculate the mean and the median. Remember that because `X1` is saved straight into the workspace and isn't part of a data frame you don't need to use $ or anything like that to refer to it: just use the name. ```{r mean1, exercise = TRUE} ``` ```{r mean1-hint-1} #Don't forget to put commas between each #number for the vector, and make sure #you have the brackets done correctly #for all the functions you use. # #Don't forget that R is case sensitive so #if you called your vector X1 then trying to #calculate the mean of x1 won't work # ``` ```{r mean1-hint-2} #For the vector you need to type this: X1 <- c(0.22, 0.39, 0.36, 0.45, 0.41) ``` ```{r mean1-hint-3} #This is the solution: X1 <- c(0.22, 0.39, 0.36, 0.45, 0.41) mean(X1) median(X1) ``` This should give you a value of 0.366 for the mean and 0.39 for the median. Now let's look at what happens when we add another datapoint with a very high value. Somehow you've managed to measure someone's lysozyme activity as 20. Generate a vector called X2 with the previous numbers you used and also a sixth value set at 20, and calculate the mean and the median again. ```{r mean2, exercise = TRUE} ``` ```{r mean2-hint-1} #For the vector you need to type this: X1 <- c(0.22, 0.39, 0.36, 0.45, 0.41, 20) ``` ```{r mean2-hint-2} #This is the solution: X2 <- c(0.22, 0.39, 0.36, 0.45, 0.41, 20) mean(X2) median(X2) ``` Compare the values for the mean and the median for both X1 and X2. What do you notice about how they respond to the addition of an extreme value to the data? Hopefully what you can see is that while the mean increased by about a factor of 10, from 0.37 to 3.64, the median hardly changed, going from 0.39 to 0.4. This illustrates a very important point about these two measures: the mean is *sensitive to outliers* which means that it responds strongly to very high or very low values, whereas the median does not and we would say that it is *robust to outliers*. In this case when we add the extra value to our data the mean changes to a number which is not really representative of the data at all: we have 5 values which are all well below 1, and 1 value of twenty, and somewhere in between them is the mean. The median on the other hand is still giving you some useful information about your vector of numbers. #### Summarising a new variable In the `height_immunity` data frame there is another variable called "phagocytic" which is a measure of the phagocytic response by the leucocytes for each person in the study. Calculate the mean and the median for this variable. *TIP* the `phagocytic` variable contains some missing data (`NA`). The default option for both the `mean()` and `median()` functions when presented with missing data is just to return `NA` and not calculate anything. If you want to get the mean or median calculated from the data that are present, you have to add this argument: `na.rm = TRUE` to your function call, so you'd have something like `median(my_variable_name, na.rm=TRUE)`. ```{r mean3, exercise = TRUE} ``` ```{r mean3-hint-1} #As for the lysozyme data, we have to #tell R to look in the height_immunity data frame #for the lysozyme vector. You do this by #typing the name of the data frame, a dollar #symbol and then the name of the variable. ``` ```{r mean3-hint-2} #Don't forget to put a comma between the #variable name and the na.rm = TRUE argument #for both function calls. # #Don't forget that R is case sensitive and #capital and lower case letters have to match # #Make sure that all your opening brackets #have closing brackets ``` ```{r mean3-hint-3} #This is the solution: mean(height_immunity$phagocytic, na.rm = TRUE) median(height_immunity$phagocytic, na.rm = TRUE) ``` Compare the values for the mean and the median. Are they roughly the same? If not what might this tell you about this variable? Let's have a look at the shape of the `phagocytic` variable. Use the `hist()` function to plot a frequency histogram for this variable `phagocytic`. Let's make it a nice plot so choose a suitable main title and set it using `main = "my_title"`, label the x axis "Phagocytic activity" using `xlab = ""` and use `col = ""` to set a colour for the bars. I'll suggest "steelblue" or "sienna3" as giving nice results, or alternatively [pick your own colour name from here](http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf). Have a look at the histograms tutorial if you're really stuck and can't remember anything about how `hist()` works. ```{r mean4, exercise = TRUE, fig.height = 5, fig.width =4, exericise.lines = 8} ``` ```{r mean4-hint-1} #Don't forget to put a comma between the #arguments in your hist() function call # #Don't forget that R is case sensitive and #capital and lower case letters have to match # #Make sure that all your opening brackets #have closing brackets ``` ```{r mean4-hint-2} #Remember that phagocytic is a variable in #the height_immunity data frame, so you have to refer #to it as height_immunity$phagocytic # #The text for main = "", xlab = "" and col = "" #must all have quote marks at the beginning and the end ``` ```{r mean4-hint-3} #This is the solution: hist(height_immunity$phagocytic, main = "Histogram of phagocytic activity", xlab = "Phagocytic activity", col = "sienna3" ) ``` You can see that the distribution of these data has some pronounced positive skew, with the main bulk of the data having values between 100 and 200, but some values being as high as 550-600. These high values at the end of the long positive tail in the data are having a bigger effect on the mean than they are on the median, so the value of the mean is pulled up such that it no longer really indicates where the majority of the data are to be found. The median, on the other hand, is once again still giving us a indication of where most of the data are. When dealing with skewed data, therefore, it's often better to use the median than the mean. ## Measures of dispersion Before starting the section on the variance, here's a video explaining how to calculate the mean and standard deviation which you might find helpful ![](https://youtu.be/ChDRxxLIFMU) ### Variance Let's look at our mini-vector of data sampled from the lysozyme activity variable again. The numbers in our dataset are: $$0.37, 0.22, 0.50, 0.53, 0.38, 0.35, 0.23$$ and we know that the mean is 0.37. What we want to quantify is how much spread there is in these data, or in other words how far from the mean our datapoints tend to be. We can find out how far from the mean each datapoint is by just subtracting the mean from it and then we could produce a summary of those numbers by adding them together. That's not especially useful, however, because some of the numbers will be negative and some positive, so what we do is square each difference, which will make them all positive, and add the squared differences together. | Value |$\bar{x}$ |$x-\bar{x}$|$\left(x-\bar{x}\right)^2$| |:--------|:---------------|:----------|:-------------------------| | 0.37 | 0.37 |0 |0 | | 0.22 | 0.37 |-0.15 |0.0225 | | 0.50 | 0.37 |0.13 |0.0169 | | 0.53 | 0.37 |0.16 |0.0256 | | 0.38 | 0.37 |0.01 |0.0001 | |0.35 | 0.37 |-0.02 |0.0004 | |0.23 | 0.37 |-0.14 |0.0196 | The sum of all of these squared differences is 0.0851, or to write it out formally: $$\Sigma \left(x-\bar{x}\right)^2 = 0.0851$$ Remember that $\Sigma$ means "the sum of" and $\bar{x}$ is the symbol for the sample mean. This quantity is called the *Sum of squared deviations from the mean* or more commonly just the *Sum of squares* and it becomes very useful when doing a very important kind of analysis called ANOVA. It's not especially useful for our present purposes, however, because it's not useful when you're trying to compare how much dispersion there is between datasets. This is because it depends on the sample size: other things being equal, a variable with a sample size of 10 will have double the sum of squares of one with a sample size of 5. To account for this we could divide the sum of squares by the sample size, but in fact we divide it by something called the *degrees of freedom* which in this case is $n-1$ where $n$ is the sample size. This gives us a quantity called the *variance*, usually indicated by $\sigma^2$, so $$ \sigma^2 = \frac{\Sigma \left(x-\bar{x}\right)^2}{n-1} = \frac{0.0851}{6} = 0.01418$$ The variance is one of the most commonly used measures of how much spread there is in a data set, and we don't need to calculate it by hand since R has a function to do it for us called `var()`, so we can calculate the variance of our 7 number subsample in R as follows: ```{r} X1 <- c(0.37, 0.22, 0.50, 0.53, 0.38, 0.35, 0.23) var(X1) ``` This gratifyingly gives us the same value as when we did the calculation by hand. ### Standard deviation The variance is a useful measure but because it consists of the sum of the *squared* deviations from the mean it is quite hard to relate back to the original data. Simply taking the square root of the variance, however, gives us a rather more intuitive measure, the *standard deviation*. This is usually represented by $\sigma$. For our data, the standard deviation is: $$\sigma = \sqrt{\sigma^2} = \sqrt{0.01418} = 0.119$$ As with the variance, there is a built in function to calculate $\sigma$ which is `sd()`, so ```{r} sd(X1) ``` The standard deviation is the average difference between a value in our data and the mean: so if you were to pick one of these data points and calculate $x - \bar{x}$, on average the value would be 0.119. As shown in figure 2, $\sigma$ also has some special meaning when we're dealing with normal distributions: if you think of one of those bell-shaped curves, the standard deviation is the distance from the middle to the *inflexion point* on the curve, which is where the slope stops getting steeper and starts getting shallower again. 68% (Roughly 2/3) of all the datapoints in a normally distributed dataset will be within this area. Also important is the fact that if your data are distributed following a normal distribution, 95% of all the datapoints will lie within 1.96 (roughly 2) standard deviations of the mean. That 1.96 is a bit of a magic number in statistics and crops up in all sorts of places. Finally, 99% of all the values in a normally distributed dataset will be within 2.58 standard deviations of the mean. ```{r echo=FALSE, fig.height = 5, fig.width = 6, fig.cap="**Figure 2** Probability density of a standard normal distribution with mean=0 and standard deviation=1 showing the areas defied by the mean plus or minus 1, 1.96 and 2.58 standard deviations. The code for this figure is given at the end of the chapter."} X1 <- seq(-3, 3, length = 300) Y1 <- dnorm(X1) plot(X1, Y1, type = "n", xlab = "x", ylab = "P(x)") abline(v=0, lwd=0.5,lty=2) x0 <- min(which(X1 >= -2.58)) x1 <- min(which(X1 >= -1.96)) x2 <- min(which(X1 >= -1)) x3 <- max(which(X1 <= 1)) x4 <- max(which(X1 <= 1.96)) x5 <- max(which(X1 <= 2.58)) polygon(x = c(X1[c(1, 1:x0, x0)]), y = c(0, Y1[1:x0], 0), col = "white", border = NA) polygon(x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#deebf7", border = NA) polygon(x = c(X1[c(x1, x1:x2, x2)]), y = c(0, Y1[x1:x2], 0), col = "#9ecae1", border = NA) polygon(x = c(X1[c(x2, x2:x3, x3)]), y = c(0, Y1[x2:x3], 0), col = "#3182bd", border = NA) polygon(x = c(X1[c(x3, x3:x4, x4)]), y = c(0, Y1[x3:x4], 0), col = "#9ecae1", border = NA) polygon(x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#deebf7", border = NA) polygon(x = c(X1[c(x5, x5:300, 300)]), y = c(0, Y1[x5:300], 0), col = "white", border = NA) points(X1, Y1, type = "l") abline(v=0, lwd=0.5,lty=2) text(0, 0.18, "68% of values \n within 1 sd \n of the mean", cex = 1,col="white") arrows(0.6,0.18,0.99,0.18,length=0.1,angle=20,col="white") arrows(-0.6,0.18,-0.99,0.18,length=0.1,angle=20,col="white") text(0, 0.04, "95% of values \n within 1.96 sd \n of the mean", cex = 1,col="white") arrows(0.72,0.03,1.95,0.03,length=0.1,angle=20,col="white") arrows(-0.72,0.03,-1.95,0.03,length=0.1,angle=20,col="white") text(2.5,0.1, "99% of values \n within 2.58 sd \n of the mean", cex = 1) arrows(2.58,0.06,2.58,0.015,length=0.1,angle=20) ``` ### Interquartile range The interquartile range, or IQR, is an alternative way of describing the amount of dispersion in a variable. Like the median, it relies on the use of *ranked* data. If we rank all of the data in our variable, we already know that the middle datapoint is the *median*, and that the median divides our data into two halves. If we then take only the bottom half of the data we can find the number which divides that in half, and the same with the upper half. This gives us three numbers (the *lower quartile*, the *median* and the *upper quartile*), which between them divide our data into four equal sized units. Before moving on to the rest of the tutorial, here's a short video explaining the IQR. ![](https://youtu.be/uGf-DcLvA5M) Here's an example. Let's say we have counted the number of solitary bee burrows in 25 2m quadrats in some grassland. We can set up a vector with our data in, and then get R to rank it for us: ```{r} counts <- c(4, 1, 2, 3, 7, 0, 7, 6, 5, 1, 3, 5, 3, 13, 8, 3, 6, 3, 3, 7, 4, 6, 5, 4, 4) counts.ranked <- counts[order(counts)] counts.ranked ``` There are 25 numbers so the 13th one, 4, is the median. The 7th number (3) is the lower quartile and the 19th (6) is the upper quartile 0 1 1 2 3 3 **3** 3 3 3 4 4 **4** 4 5 5 5 6 **6** 6 7 7 7 8 13 You can see how these divide the data into four equal subsets. The *interquartile range* is the range between the lower and upper quartiles --- in this case the IQR is 3. This is where the middle 50% of all the data are located so the interquartile range tells us how wide the spread of this middle half of the data set is. R has a couple of options for calculating the *IQR*. Firstly there is the function `IQR()` which does what it says on the tin. ```{r} IQR(counts) ``` Secondly, if you want to know what the values are for the quartiles rather than just the range use the `summary()` function which gives a series of useful statistics when you feed it a numerical vector: ```{r} summary(counts) ``` ### Measures of dispersion exercises Let's go back to our data on lysozyme activity. Use `var()` and `sd()` to calculate the variance and standard deviation of these data, and then use `IQR()` to calculate the interquartile range. ```{r spread1, exercise = TRUE, exercise.lines = 6} ``` ```{r spread1-hint-1} #Don't forget that lysozyme is a variable within #the height_immunity data frame, so you need to give the #name of the data frame, then a $ symbol, then the #name of the variable. # #Remember that R is case sensitive. # ``` ```{r spread1-hint-2} #For the variance you need this: var(height_immunity$lysozyme) #you chould be able to get the sd and IQR in a similar way ``` ```{r spread1-hint-3} #This is the solution: var(height_immunity$lysozyme) sd(height_immunity$lysozyme) IQR(height_immunity$lysozyme) ``` Now use `summary()` to tell you what the values are for the quartiles and the median for the lysozyme data. ```{r spread2, exercise = TRUE} ``` ```{r spread2-hint-1} #Don't forget that lysozyme is a variable within #the height_immunity data frame, so you need to give the #name of the data frame, then a $ symbol, then the #name of the variable. # #Remember that R is case sensitive. # ``` ```{r spread2-hint-2} #This is the solution: summary(height_immunity$lysozyme) ``` Using the numbers you've generated, try to answer these questions ```{r quiz, echo = FALSE} quiz( question("Assuming our lysozyme data are normally distributed, roughly 2/3 of the data should lie between which pair of values?", answer("0.07 and 0.64"), answer("0.37 and 0.39"), answer("0.22 and 0.54"), answer("0.34 and 0.43"), answer("0.29 and 0.46", correct = TRUE) ), question("If you were to rank the lysozyme data and divide it into four equal parts, which numbers would give you the range for the top quarter of the data (i.e. the part with the largest values)?", answer("0.38 and 0.43"), answer("0.43 and 0.64", correct = TRUE), answer("0.34 and 0.38"), answer("0.07 and 0.64"), answer("0.38 and 0.64") ) ) ``` Now use `summary()` to find out the quartiles for the phagocytic activity variable. ```{r spread3, exercise = TRUE} ``` ```{r spread3-hint-1} #Don't forget that phagocytic is a variable within #the height_immunity data frame, so you need to give the #name of the data frame, then a $ symbol, then the #name of the variable. # #Remember that R is case sensitive. # ``` ```{r spread3-hint-2} #This is the solution: summary(height_immunity$phagocytic) ``` Look at the values for the first quartile, the median and the third quartile and think about the difference between the first quartile and the median and the median and the third quartile. Now compare that to the same values for the lysozyme data. Do you notice a pattern? I've summarised them in this table just to make things easier. |Variable | 1st quartile | Median | 3rd quartile | |:-------------------|:-------------|:-------|:-------------| |Lysozyme activity |0.34 |0.38 |0.43 | |Phagocytic activity |145 |170 |231 | What you should see is that the quartiles for the lysozyme data, which is approximately normally distributed, are roughly symmetrical around the median: the difference between the first quartile and the median is 0.041 whereas the difference between the median and the third quartile is 0.048. For the phagocytic activity data, however, the quartiles are not symmetrical around the median. The difference between the first quartile and the median is 25, but the difference between the median and the third quartile is 61. This is because the phagocytic activity data are strongly positively skewed, so the second quarter of the data, below the median, occupies a narrow range whereas the third quarter, above the median, has a wider range. You can see this better on a histogram. ```{r fig.width = 4, fig.height = 6, echo = FALSE, fig.cap = "**Figure 3** Frequency histograms for lysozyme activity (A) and phagocytic activity (B). The vertical lines show the quartiles and medians and the shaded boxes show the interquartile ranges."} par(mfrow = c(2,1)) par(mar = c(3,3,2,2)) hist(height_immunity$lysozyme, main = "", xlab = "lysozyme activity", col = "sienna3") lys <- summary(height_immunity$lysozyme) polygon(x = c(lys[5], lys[5], lys[2], lys[2]), y = c(0,62,62,0), col = "#99ccff64", border = NA) abline(v = lys[2]) abline(v = lys[3]) abline(v = lys[5]) text(0.05, 55, "A", font.lab = 2) hist(height_immunity$phagocytic, main = "", xlab = "phagocytic activity", col = "sienna3") lys <- summary(height_immunity$phagocytic) polygon(x = c(lys[5], lys[5], lys[2], lys[2]), y = c(0,72,72,0), col = "#99ccff64", border = NA) abline(v = lys[2]) abline(v = lys[3]) abline(v = lys[5]) text(50, 60, "B", font.lab = 2) par(mfrow = c(1,1)) ``` You can see how the IQR for lysozyme activity is symmetrical, with the median in the centre, but that for phagocytic activity is not, and the median is towards the bottom end of the IQR. This is an example of why we sometimes prefer to use the median and IQR when we're describing skewed data, because these measures can capture the shape of the data better than, for example, the mean and standard deviation. There's lots more on this when we come to look at boxplots. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/02_Descriptive_statistics/Descriptive_statistics.Rmd
--- title: "Descriptive and Exploratory Statistics 3: Boxplots" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Boxplots are an important tool for visualising data, especially when there are multiple variables or groups wthin a variable. Learn how to draw them in R and how to interpret them. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) # worldbank <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/worldbank.csv") load("worldbank.Rda") worldbank$Region <- as.factor(worldbank$Region) ``` ## What is a boxplot? A boxplot is a way of visualising data which displays a lot of information while still being simple and easy to interpret. They are particularly useful when there you have multiple variables or different factor levels associated with a numeric variable and you want to compare them. You can watch a video explaining the basics here: ![](https://youtu.be/lL4XhpzbWA0) Just to recap the video content, let's look at the data on population growth from the `worldbank` dataset, a set of data on 186 countries from 2014 which I compiled from data published by the World Bank. The dataset is already loaded and We'll start by looking at a frequency histogram. ```{r fig.cap = "Frequency histogram of annual percentage population growth for 186 countries", fig.height = 6} hist(worldbank$Population_growth, #Specify the variable main = "", #No main title col = "aquamarine4", #Set the fill colour xlab = "Annual population growth per country (%)") #X axis label ``` You can see that this variable is roughly normally distributed, with a central tendency somewhere between 1 & 2 and a range from just below -4 to somewhere between 6 & 7. We can get more exact numbers using `summary()` ```{r} summary(worldbank$Population_growth) ``` Rather than looking at a frequency histogram, we can use a boxplot to give us much the same information. The function to do this in R is conveniently called `boxplot()`. ```{r fig.cap = "Boxplot of annual population growth for 186 countries", fig.height = 6} boxplot(worldbank$Population_growth, #Specify the variable col = "aquamarine4", #Set the colour ylab = "Annual population growth per country (%)") #X axis label ``` If you've not seen one of these before it might be a bit confusing so let's go through all the different bits one at a time. 1. There's a thick line in the middle, sometimes called the *hinge*. This indicates the *median*: the middle value when the data are ranked. 2. There's a *box* around the thick line. This extends from the *First quartile* to the *Third quartile*: so this is showing you the range of values within which the middle 50% of all the datapoints lie. NB the width of the box has no meaning, at least for a normal boxplot. 3. There are dotted lines extending some distance above and below the box. These are called the *whiskers* and they extend wait for it from the quartile (so the end of the box) to the data point which is nearest to the last datapoint which is less than 1.5 times the *interquartile range* from the box. This might sound a little random but there is a good reason behind it: if the data are drawn from a normal distribution, roughly 99% (actually 99.3%) of all the data should fall within this range. NB you will sometimes see boxplots drawn with the whiskers extending up to the maximum and down to the minimum values. There's nothing especially wrong with doing this but it does reduce the amount of information in the graph. 4. Finally, any datapoints which lie outside the whiskers are plotted individually. These are usually called *outliers* but be careful with this word. Recall that we'd expect about 99% of the data to be within the whiskers if the data are from a normal distribution, so with 100 datapoints we should expect at to see one or more of these "outliers" most of the time. Here we have 186 datapoints and three "outliers" and that is entirely unsurprising. There's no reason to think that there is anything strange about these datapoints, and there is no reason to think that they are perhaps from a differently distributed set of data to the rest. It's better to think of them as *extreme values* rather than *outliers* because the latter is often used for datapoints that are somehow in the wrong dataset. Overall then a boxplot shows you the median, the interquartile range, the region within which roughly 99% of datapoints would be expected if the underlying distribution is normal, and any datapoints outside this region. To make this clearer, here is our frequency histogram again, this time with the boxplot plotted above it. The code is a little complex but I've put it here in case you're interested in how this figure was generated. ```{r fig.height = 7, fig.width = 7, echo = TRUE} # Set plot area for histogram to the lower 70% of the total area par(fig = c(0, 1, 0, 0.7)) # Plot the frequency histogram hist( worldbank$Population_growth, main = "", xlab = "Population growth (%)", breaks = 15, col = "aquamarine4" ) # set the plot area for the boxplot to the upper 60% of the total area par(fig = c(0, 1, 0.4, 1), new = TRUE) # Plot the boxplot boxplot(worldbank$Population_growth, col = "aquamarine4", bg = FALSE, horizontal = TRUE, axes = FALSE) ``` You can see how the median and the interquartile range indicated on the boxplot correspond to the centre of the distribution and the region with the bulk of the data present, and you can also see how the whiskers cover almost all of the range of the data, with the few extreme values showing up individually. ### Exercise: draw a boxplot of CO~2~ production by country There is a variable in the worldbank dataset called CO2 which is the annual CO~2~ production of each country in tonnes per capita. See if you can draw a boxplot of this variable and label the y-axis "Annual Carbon Dioxide Production Per Capita (t)". ```{r boxplot1, exercise = TRUE, fig.height = 6} ``` ```{r boxplot1-hint-1} #Remember that you need to specify the worldbank #dataframe and give the name of the CO2 variable #with the two separated by a dollar symbol ``` ```{r boxplot1-hint-2} #Use the ylab = argument to specify the y-axis label. #The text for the label needs to be in quote marks. ``` ```{r boxplot1-hint-3} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r boxplot1-hint-4} #This is the solution: boxplot(worldbank$CO2, ylab = "Annual Carbon Dioxide Production Per Capita (t)") ``` This boxplot looks very different to the one we drew in the last section. It looks as though it's been squashed towards the bottom and stretched towards the top, and all of the extreme values that are shown are relatively large values. To understand what we're seeing here, it will help to plot a frequency histogram for CO~2~ production. Don't forget to label the axes. ```{r boxplot2, exercise = TRUE, fig.height = 6} ``` ```{r boxplot2-hint-1} # You need to use the hist() function # The y-axis label should just be "Frequency" # The x-axis needs a label which says what it is # Remember that the text for axis labels goes in quote marks ``` ```{r boxplot2-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r boxplot2-hint-3} #This is the solution: hist(worldbank$CO2, xlab = "Annual Carbon Dioxide Production Per Capita (t)", ylab = "Frequency", main = "") ``` Look at the boxplot and the frequency histogram and try to work out what's going on. <details><summary><b>Click here for explanation</b></summary> <br> You can see from looking at the frequency histogram that unlike the population growth data, per capita CO~2~ production is strongly positively skewed. This accounts for the different shapes of the two boxplots. The approximately normal distribution of the population growth data gives us a boxplot which is roughly symmetrical above and below the median, but the strong positive skew in the CO~2~ production data gives a very asymmetrical boxplot, with the lower whisker, quartile and the median being close together and the rest of the plot looking as though it's been stretched upwards. This shows us one of the great strengths of using boxplots as part of your initial exploration of your data: they don't just give you information on where the data are located, they tell you about the shape of the data as well. </details> ### Basic boxplot quiz Here's a boxplot showing another variable from this dataset, this time `Forest_area` which gives the percentage of a nation's land area which is covered by forest. ```{r fig.cap = "Boxplot of percentage forest area for 186 countries", fig.height = 6} boxplot(worldbank$Forest_area, ylab = "Percentage forest cover") ``` ```{r quiz, echo = FALSE} quiz( question("Which of the following are true? More than one answer can be correct.", answer("The median value for forest area is somewhere between 30 and 40", correct = TRUE), answer("The interquartile range for forest area is between about 15 and about 75"), answer("The frequency distribution for forest area probably has some positive skew", correct = TRUE), answer("Forest area is bimodal"), answer("The frequency distribution for forest area is normal") ), question("The boxplot shows no 'outliers'. Why might this be?", answer("There are no anomalous data points"), answer("Because of positive skew in the data the whiskers conceal them"), answer("The data are percentages so there cannot be values greater than 100 or less than zero, and the whiskers extend almost to these limits", correct = TRUE), answer("All the data were recorded correctly"), answer("None of the data are more than three standard deviations from the mean") ) ) ``` ## Comparing groups with boxplots Boxplots can show you the shape and the location of your data, but so can frequency histograms. Why don't we just plot histograms? The answer to this is that boxplots become really useful when we have multiple groups of data that we want to compare. If you have a variable and also a factor which divides the data in your variable into groups, then `boxplot()` will allow you to visualise this. Instead of entering a single variable name you need to enter a *formula*, with the variable on the left and the factor name on the right, with a tilde between: `boxplot(variable ~ factor)`. As an example, here are some data on particulate pollution expressed as the average exposure to fine particles ([so called "PM~2.5~" particles](https://en.wikipedia.org/wiki/Particulates)) plotted with the different regions in our worldbank dataset shown separately. There are a few things that you might not be familiar with: `par(mar = c(12,5,2,2))` sets the margins for the plot, in the order bottom, left, top, right. I've made the bottom margin much bigger than usual because the region names are long and need plenty room. I've also added the `las = 2` argument in the `boxplot()` function call. This makes the text for the axis labels perpendicular to the axis, so the x-axis labels are vertical. Finally, the `\n` in the `ylab` argument is an "escape character" that introduces a new line in the text. ```{r fig.cap = "Fine particulate exposure plotted for each region from the worldbank dataset", fig.height = 6} par(mar = c(12,5,2,2)) boxplot(PM25 ~ Region, data = worldbank, ylab = "Fine particulates\n (micrograms per cubic metre)", xlab = "", las = 2) ``` Now you can see how a plot like this can convey a huge amount of information. You can: * Find out about the central tendencies of each group, so North America has the lowest median exposure and South Asia the highest. * Make comparisons between groups regarding how much variability there is: so North America hs the lowest variability (but also only three countries), Latin America & the Caribbean has fairly low variability but the South Asia region has the most variability: some South Asian countries have exposure which is comparable to the lowest exposure in other parts of the world, but others have very high exposure. * Use it to check the likely error distribution: here you can see that most of the boxplots are at least roughly symmetrical, so there's probably going to be no problem with using "standard" analysis techniques that assume normal errors. * Use it to check for potentially problematic datapoints --- there aren't any here that stand out as being obvioiusly problematic (i.e. impossible values of <0, values many times greater than all the others). ### Exercise: draw a boxplot of CO~2~ production by Income Group The worldbank dataset also includes a variable called `Income_group` which splits the countries into four classes: Low income, Lower middle income, Upper middle income and High income. Let's generate a boxplot which compares the CO~2~ production data for these four groups. Before we draw the boxplot we'll have to declare `Income_group` as a factor and just to make the graph make more sense we'll change the order of the factor levels: R orders factor levels alphabetically by default, but that would generate a plot that's not so easy to interpret. The following piece of code does this for us: ```{r eval = FALSE, echo = TRUE} worldbank$Income_group <- factor( worldbank$Income_group, levels = c( "Low income", "Lower middle income", "Upper middle income", "High income" ) ) ``` ```{r prepare-boxplot-3, echo = TRUE} worldbank$Income_group <- factor( worldbank$Income_group, levels = c( "Low income", "Lower middle income", "Upper middle income", "High income" ) ) ``` Now that we've sorted out our factor levels, we just need the code for the boxplot. Remember what we did for the last plot with changing the margins with `par(mar = ...` and using the `las=` argument in the `boxplot()` call to change the angle that the axis labels were written at. ```{r boxplot3, exercise = TRUE, exercise.lines = 9, exercise.setup = "prepare-boxplot-3", fig.height = 6, fig.cap = "Annual CO~2~ Production in tonnes per capita plotted by income group"} ``` ```{r boxplot3-hint-1} # You can use the same code we used for the previous example. # You just need to change the two variable names and the axis labels. ``` ```{r boxplot3-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r boxplot3-hint-3} #This is the solution par(mar = c(12,5,2,2)) boxplot(CO2 ~ Income_group, data = worldbank, xlab = "", ylab = "Per capita CO2 production (tonnes)", las = 2 ) ``` Have a look at the boxplot and think about the patterns you see. What do you conclude? <details><summary><b>Click here for explanation</b></summary> <br> Once again this boxplot gives us a great deal of information. You can clearly see the strong relationship between income group and per capita CO~2~ production, and you can also see from the asymmetric shape of the boxplots that the frequency distribution is somewhat positively skewed within each group, although not as strongly as when we plotted the whole variable by itself in the last section. This is something that we would need to bear in mind if we wished to analyse these data using, for example, ANOVA, which assumes that the error distribution within each group is normal. A further concern would arise from the increase in variance as the median gets larger --- you can see that the overall variation within the low income group is very small by comparison with the high income group, and again this *heteroscedasticity* would be a concern if we wished to analyse these data using ANOVA. </details> ### Exercise: Log-transforming the y-axis One possible solution to the skewed errors and heteroscedasticity in this dataset would be to log transform the data prior to analysis. Before doing this it would be a good idea to plot the data on a log scale to make sure that the transformation is making the data behave as we'd like. There are several ways to do this in R: we could just log transform the variable, either before plotting it: `logCO2 <- log10(worldbank$CO2` `boxplot(logCO2 ~ worldbank$Income-group...` or within the `boxplot()` function call: `boxplot(log(CO2) ~ Income_group, data = worldbank, ...`. An alternative is to transform the *scale* rather than the data. This converts the y-axis (in this case) to a log scale and then the untransformed data are plotted. For purposes of visualisation this often works better because the scale retains the real values of the data. We can ask R to plot the data in this way by adding another argument to the `boxplot()` function call, `log = "y"`. Note that if we were plotting a scatterplot then we could plot a log-scale x-axis instead with `log = "x"` or we could have both axes on a log scale with `log = "xy"`. Because the only continuous axis in our boxplot is the y-axis it only makes sense to change the scale of that one. Try to plot your boxplot with a log-scaled y-axis. ```{r boxplot4, exercise = TRUE, exercise.lines = 9, exercise.setup = "prepare-boxplot-3", fig.height = 6, fig.cap = "Log annual CO~2~ production in tonnes per capita plotted by income group"} ``` ```{r boxplot4-hint-1} # You can use the same code as before # You just need to add the new argument ``` ```{r boxplot4-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r boxplot4-hint-3} #This is the solution par(mar = c(12,5,2,2)) boxplot(CO2 ~ Income_group, data = worldbank, xlab = "", ylab = "Log scaled per capita\n CO2 production (tonnes)", las = 2, log = "y" ) ``` Plotting the data on a log scale gives us a very different graph indeed. The variances are roughly equivalent between the groups and there is only a hint of asymmetry, suggesting that the skew that was present before has been largely dealt with. If you were to wish to compare the mean CO~2~ production figures between these groups using ANOVA there's nothing to indicate potential problems from heteroscedasticity or skewed error distributions here. ### Multiple boxplot quiz Here's a boxplot showing a somewhat more sophisticated plot than the ones we've seen before. This is showing the percent forested area data again, but this time we've divided it up by two factors: one called `Income_binary` which has income coded simply as "high" or "low" and a second one dividing nations depending on whether they are in the tropics or not called `Climate_region`. There's a fair bit of code to draw this, mainly because we want the x-axis labels to be nice. If you're interested I've copied it in after the quiz. ```{r fig.cap = "Percentage forest cover by income and climate region", fig.height = 6, echo = FALSE} par(mar = c(5,4,2,2) + 0.1) #Reset the margins to the default values #Draw the boxplot boxplot( Forest_area ~ Income_binary * Climate_region, #Variables to be plotted: note the use of * to specify the interaction between the two factors data = worldbank, #Use the worldbank data frame ylab = "Percentage of area forested", #set the y-axis label xlab = "", #no x-axis label xaxt = "n" #don't draw the x-axis ) #Draw in the x-axis axis( side = 1, #Draw the axis at the bottom at = 1:4, #Where to put the tick marks padj = 0.6, #Adjust the text down a bit labels = c( #Vector of text for the labels "High income\n Temperate\n or Polar", #The \n inserts a new line "Low income\nTemperate\n or Polar", "High Income\nTropical\n", "Low income\nTropical\n" ) ) ``` Have a look at the graph and try to answer these questions ```{r quiz2, echo = FALSE} quiz( question("Which of the following are true? More than one answer can be correct.", answer("The interquartile range for low income tropical countries is the largest of all the groups"), answer("The upper quartile for low income temperate or polar countries is less than the lower quartile for high income temperate or polar countries", correct = TRUE), answer("No high income tropical country has a percentage forest cover lower than all the low income temperate or polar countries"), answer("The median percentage forest cover values are roughly the same for all groups aside from the low income temperate or polar countries", correct = TRUE), answer("The variance of each of the four groups is likely to be roughly the same") ), question("Regarding the two temperate or polar groups only, which of the following are correct? More than one answer can be correct.", answer("There is an outlier in the low income group that should be removed"), answer("The frequency distributions of both groups are likely to be roughly normal, albeit with some extreme values", correct = TRUE), answer("If you plotted both income groups as a single variable you might see a bimodal distribution", correct = TRUE), answer("There is an effect of income whereby the median percentage forest cover for high income countries is roughly three times that of low income countries", correct = TRUE), answer("There is strong positive skew in the data for low income countries only") ) ) ``` ### Script for plot with multiple factors ```{r eval = FALSE} par(mar = c(5,4,2,2) + 0.1) #Reset the margins to the default values #Draw the boxplot boxplot( Forest_area ~ Income_binary * Climate_region, #Variables to be plotted: note the use of * to specify the interaction between the two factors data = worldbank, #Use the worldbank data frame ylab = "Percentage of area forested", #set the y-axis label xlab = "", #no x-axis label xaxt = "n" #don't draw the x-axis ) #Draw in the x-axis axis( side = 1, #Draw the axis at the bottom at = 1:4, #Where to put the tick marks padj = 0.6, #Adjust the text down a bit labels = c( #Vector of text for the labels "High income\n Temperate\n or Polar", #The \n inserts a new line "Low income\nTemperate\n or Polar", "High Income\nTropical\n", "Low income\nTropical\n" ) ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/03_Boxplots/Boxplots.Rmd
--- title: "Descriptive and Exploratory Statistics 4: Scatterplots" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Scatterplots are the most common way to visualise bivariate data where there are two variables measured per individual. This tutorial explains how to plot them in R and gives examples of the most common patterns to watch for. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, fig.width = 5, fig.height = 5) load("malawi_carbon.rda") load("quolls.rda") load("worldbank.Rda") load("carnivores.rda") load("mammal_longevity.rda") ``` ## Introduction Very often we find ourselves dealing with data where there is more than one measurement made on each individual that we are studying, and a lot of the time we are interested in the relationships between these variables --- as an example, if we're studying plant diseases we might want to know whether there is a relationship between the number of leaves showing infection on a plant and the growth rate of the plant, or we might be interested in whether people with an autoimmune disease also have high rates of reactivity to IgA antibodies. When we have *bivariate* data like this (bivariate meaning that for each individual we have two measurements) we can of course go through our normal exploratory look at each individual variable, using histograms or boxplots to visualise the shape of the distribution and to give us an idea of where the variables are centred and how much spread there is. Once we've done that, however, we can go further and look at the two variables together. For numeric data this would usually be done by plotting a *scatterplot* with one variable on the x-axis and one on the y-axis. Scatterplots are particularly useful tools for exploratory data analysis because you can easily see whether there appears to be any relationship between your variables, you can check for anomalous data, you can see if there is any curvature in the relationship and you can check for any signs of *structure* or *grouping* in your data. Here's a video which runs through the basics of what scatterplots are and what the common patterns that you might see are. ![](https://youtu.be/sqVqNygszL0) ## Scatterplot basics ```{r echo = FALSE} # quolls <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/ChartersetalQuolls.csv") # quolls$sex <- as.factor(quolls$sex) # colours1 <- c("steelblue", "sienna3") # worldbank <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/worldbank.csv", stringsAsFactors=TRUE) # carnivores <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/carnivores.csv") # mammal_longevity <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/mammal_longevity.csv", stringsAsFactors = TRUE) # malawi_carbon <- read.csv("http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/malawi_carbon.csv") ``` Here's an example of a scatterplot, and the R code used to draw it. The `plot()` function in R will generate a scatterplot if you give it two numerical variables. You might notice the way that the two variables are entered as a *formula* so the y-axis variable comes first, followed by the x-axis one and they are separated by a tilde `~`. You can also specify `x=` and `y=` as separate arguments for a plot and you'll get the same result. The best thing to do is to pick one method and stick with it so there is no confusion. The data are from a study published by Charters *et al.* in 2018^1^ looking at physical performance in [Northern Quolls](https://en.wikipedia.org/wiki/Northern_quoll), a small marsupial predator found in Australia. Note that both grasping force and maximum oxygen uptake have been 'standardised' --- converted to variables with a mean of zero and a standard deviation of 1 --- this is for a multidimensional analysis that the authors did, but it doesn't really affect us here except that units don't really make much sense for these variables. The data are loaded as the `quolls` dataframe. ```{r} plot(quolls$grasp ~ quolls$Max_O2_consump, #Variables to be plotted pch = 16, #Plot with solid circles for plot characters col = "steelblue", #Nice colour for plotting points xlab = "Maximum oxygen consumption", #x-axis label ylab = "Grasping force", #y-axis label ) ``` You can look at this scatterplot and you can see a pattern: the animals with high levels of oxygen consumption have stronger bite forces than do the animals with low levels of oxygen consumption --- there is a *positive correlation*. You can also see that there is a fair amount of noise in the data, so there are some animals with high oxygen consumption and low grasping force, and some with low oxygen consumption and fairly high grapsing forces. Overall though the pattern is reasonably clear (if you're an ecologist you'd be pleased with this degree of correlation, most physicists on the other hand would give up and do something else if they got data like these). There are no obvious problem data points, and there is no indication of a curved relationship, or any obvious clustering that might indicate groups in the data. I'd happily go on to do further analysis on these data without any concerns. <br><br><hr> 1. Charters, J.E., Heiniger, J., Clemente, C.J., Cameron, S.F., Amir Abdul Nasir, A.F., Niehaus, A.C. & Wilson, R.S. (2018) Multidimensional analyses of physical performance reveal a size‐dependent trade‐off between suites of traits. Functional ecology, 32, 1541–1553. ## Two simple scatterplots The data set `malawi_carbon` contains data on two lung immunity measures (oxidative burst activity and phagocytic activity) from people exposed to chronic carbon particulates in their homes in Malawi. These data were originally published by Rylance *et al.* (2015)^1^ as part of a study of how exposure to smoke from cooking fires affects lung function. Let's have a look at how these two variables relate to particulate exposure. Start with the phagocytic activity data. Use the R code we used in the previous example as a guide. You'll need to use the `plot()` function and to specify the x- and y- variables by either putting in a formula of the form `y~x` or by specifying `x=` and `y=` arguments. The variables in question are called `log_carbon`, which should go on the x-axis, (the carbon data has been log transformed: more on this later) and the variable for the phagocytic activity is called `phagocytosis` and this should go on the y-axis. Both of them are in the `malawi_carbon` data frame so you need either to refer to them using the dataframe$variable syntax or just give the names of the variables but add a `data = malawi_carbon` argument to your function call. Finally, you'll need to put in some sensible x-axis and y-axis labels using `xlab = ""` and `ylab = ""`. I suggest "Log carbon score" for the x-axis and "Phagocytosis activity (%)" for the y-axis --- the phagocytosis score is actually a the percentage of macrophages which took up flourescent silica beads. ```{r scatterplot01, exercise = TRUE, exercise.lines = 8} ``` ```{r scatterplot01-hint-1} #You can specify the x- and y- variables like this: plot(phagocytosis ~ log_carbon,... ) #in which case you need to specify data = malawi_carbon,... ) #as one of your arguments. #Alternatively, you can do it like this plot(malawi_carbon$phagocytosis ~ malawi_carbon$log_carbon, #If you do this you don't need to #say where the data are. #Don't forget to add your other arguments. ``` ```{r scatterplot01-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot01-hint-3} #This is the solution plot(phagocytosis ~ log_carbon, data = malawi_carbon, xlab = "Log carbon score", ylab = "Phagocytic activity (%)") ``` Have a look at the plot. Do you see any relationship between carbon exposure and phagocytic activity? Are there any clusters of data or obviously problematic data points with unlikely values? Don't overthink this: your first impressions will probably be correct. Noew let's plot our other measure of lung immune function, oxidative burst activity, against carbon score. You can use the same code as before but you need to replace `phagocytosis` with `oxidative_burst` and you'll need a new y-axis label. I suggest "Oxidative burst index": it doesn't really have units. ```{r scatterplot02, exercise = TRUE, exercise.lines = 8} ``` ```{r scatterplot02-hint-1} #You can specify the x- and y- variables like this: plot(oxidative_burst ~ log_carbon,... ) #in which case you need to specify data = malawi_carbon,... ) #as one of your arguments. #Alternatively, you can do it like this plot(malawi_carbon$oxidative_burst ~ malawi_carbon$log_carbon, #If you do this you don't need to #say where the data are. #Don't forget to add your other arguments. ``` ```{r scatterplot02-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot02-hint-3} #This is the solution plot(oxidative_burst ~ log_carbon, data = malawi_carbon, xlab = "Log carbon score", ylab = "Oxidative burst index") ``` Compare this plot to the previous one. Do they look similar? Is there a pattern in one of them that you don't see in the other? <br> <details><summary><b>Click here for explanation</b></summary> Neither plot shows a strong pattern, there are no obvious problem data points. Looking at the first there seems to be no relationship between carbon exposure and phagocytic activity, either positive or negative. Looking at the second plot, however, we can see what apppears to be a negative correlation between oxidative burst index and carbon score, with the higher values of oxidative burst activity all being associated with low carbon scores. It's not clear, and the plot doesn't give us much confidence that this isn't simply a consequence of sampling error --- it's possible that the pattern we can see could have arisen simply by chance. For those who are familiar with statistical testing we can look at the significance of the correlation coefficient to give us an indication of how likely this is. ```{r} cor.test(malawi_carbon$oxidative_burst, malawi_carbon$log_carbon) ``` The p-value for the correlation is 0.034 which is indeed less than the cut-off for statistical significance at 0.05 so we do have a statistically significant negative correlation. The data are noisy though and the sample size is fairly small so we should be very cautious interpreting this pattern. </details> <br><br><hr> 1. Rylance, J., Chimpini, C., Semple, S., Russell, D.G., Jackson, M.J., Heyderman, R.S. & Gordon, S.B. (2015) Chronic Household Air Pollution Exposure Is Associated with Impaired Alveolar Macrophage Function in Malawian Non-Smokers. PloS one, 10, e0138762. ## Increasing variance Now we'll use a dataframe loaded called `worldbank` which has data for 186 countries from 2014 as published by the World Bank. One of the variables is called `Forest_area` and represents the percentage of the land surface for each nation which is forested. A second variable is called `Precipitation` and is the annual precipitation in mm. Try drawing a scatterplot of forest area, on the y-axis, versus precipitation on the x-axis. We'll make the plot look a bit nicer by using filled circles for our plot symbols, which we can do by using the argument `pch = 16`. We'll give them some colour as well: I suggest `aquamarine4` or `steelblue` which we can specify by using the `col = "colour name"` argument. Don't forget those quote marks. ```{r scatterplot1, exercise = TRUE, exercise.lines = 8} ``` ```{r scatterplot1-hint-1} # You can use the same code we used for the first example. # You just need to change the two variable names and the axis labels. ``` ```{r scatterplot1-hint-2} #Remmeber that the two variables are in the worldbank #data frame so you need to use the syntax #worldbank$Forest_area (data frame name, $, variable name), #or you need to add a data = worldbank argument to the #function call. #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot1-hint-3} #This is the solution plot(worldbank$Forest_area ~ worldbank$Precipitation, pch = 16, col = "steelblue", xlab = "Precipitation (mm)", ylab = "Forest area (%)") ``` Have a look at this and see what patterns you can see. Can you see a correlation? Are there any obviously problematic data? Are there any other patterns in the data? <br> <details><summary><b>Click here for explanation</b></summary> The spread of data indicates a positive correlation: in general nations with low percentages of forest cover have low precipitation, and *vice-versa*. There are no obviously anomalous datapoints (we might look for percentages <0 or >100, or unfeasibly high precipitation). This plot is rather different from the previous one, however, because the amount of dispersion in the data increases as the amount of precipitation increases --- when precipitation is low there is very little variance in the data and all the nations with low precipitation have little or no forest cover. This is hardly surprising since these countries are essentially deserts. When precipitation is high, however, there is a complete range of percent forest area, from almost none to close to 100%. This increasing variance (which statisticians refer to by a very polysyllabic word, *heteroskedasticity*, meaning that the variance is heterogeneous) is a common pattern in all sorts of data and is something that can cause problems with certain analyses, so you need to be aware of it. </details> ## Skew data and log transformation The `carnivores` data frame contains brain mass and body mass data for 199 species of carnivorous mammals, originally published as part of a larger dataset by Burger *et al.* in 2019^1^. The variable for brain mass is called `Mean_brain_mass_g` and that for body mass is `Mean_body_mass_Kg`. Plot a scatterplot of brain mass against body mass and have a look at it. This plot is better with open symbols so use the default option: you can remove the `pch = 16` line. ```{r scatterplot2, exercise.lines = 8, exercise = TRUE} ``` ```{r scatterplot2-hint-1} # For the plot,you can use the same code we used for the previous #example again. # # You just need to change the two variable names and the axis labels. ``` ```{r scatterplot2-hint-2} #Remember that the two variables are in the carnivores #data frame and you need to use the syntax #data frame name, $, variable name #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot2-hint-3} #This is the solution plot( carnivores$Mean_brain_mass_g ~ carnivores$Mean_body_mass_Kg, col = "steelblue", xlab = "Mean body mass (Kg)", ylab = "Mean brain mass (g)" ) ``` What do you see? <br> <details><summary><b>Click here for explanation and more</b></summary> It looks as though there is a increasing, but curved, relationship between brain mass and body mass but the plot is very hard to interpret because almost all of the data are very close to the origin. This is because both brain mass and body mass are strongly positively skewed: here's a histogram of body mass to make the point. ```{r fig.width = 4, fig.height = 5, fig.cap = "**Figure 3** Frequency histogram of body mass for 199 species of carnivore"} hist(carnivores$Mean_body_mass_Kg, breaks = 30, col = "sienna3", main = "", xlab = "Mean body mass (Kg)" ) ``` When we have data like this then one way to try to make the plot more readable is to *log-transform* the data, or alternatively to plot it on a log scale. To log transform it you would use the `log()` function, so your function call might look like this: ```{r eval = FALSE} plot(log(carnivores$Mean_brain_mass_g) ~ log(carnivores$Mean_body_mass_Kg),... ``` but we're going to plot the data on a log scale instead. This involves transforming the *axes* rather than transforming the individual data points, and you can do this in R by adding the `log = "xy"` argument to your `plot()` function call. Note that if you just wanted the y-axis on a log scale you could use `log = "y"` and for just the x-axis `log = "x"`. Have a go at generating a new plot, but with the axes on a log scale. *TIP* if you just add the `log = "xy"` argument R will plot the graph with scientific notation on the x-axis, so 1e+03 instead of 1000. To stop this, insert this before the `plot()` command: `options(scipen = 999)` This sets a *global option* in R and stops it using scientific notation. ```{r scatterplot3, exercise = TRUE, exercise.lines = 10} ``` ```{r scatterplot3-hint-1} # You can use the same code we used for the previous example again. # You just need to add the log = "xy" argument, and add the options... line before the plot function call. ``` ```{r scatterplot3-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot3-hint-3} #This is the solution options(scipen = 999) plot( carnivores$Mean_brain_mass_g ~ carnivores$Mean_body_mass_Kg, col = "steelblue", xlab = "Mean body mass (Kg)", ylab = "Mean brain mass (g)", log = "xy" ) ``` You can see that when these data are plotted on a log-log scale (notice how the x-axis ticks go from an increase of 900g between the first and second and 900Kg between the last two) has completely changed the way the data are visualised, and now we have an impressively tight straight line relationship. </details> <br><br><hr> 1. Burger, J.R., George, M.A., Leadbetter, C. & Shaikh, F. (2019) The allometry of brain size in mammals. Journal of mammalogy, 100, 276–283. ## Data with structure The longevity dataset has data on maximum recorded lifespan, body mass and various ecological factors for 909 species of birds and mammals, from a publication by Healy *et al.* (2014)^1^. We're going to look at the relationship between longevity and body mass for a subset of these data consisting of the data from the mammal orders with more than 30 species represented. This subset of data is in the `mammal_longevity` data frame. The variables you need to plot are `maximum_lifespan_yr` and `mass_g`. This time, since the name of the data frame is long and unwieldy, we'll just use the variable names in the `plot()` call and then add an argument `data = mammal_longevity` which will tell R where to look. These data need to be plotted in log space to be visualised as with the previous example, so you'll need to have the `log = "xy"` argument in there. Don't forget to put some sensible axis names in using `xlab = ` and `ylab = `. I'd recommend plotting this with the default plot symbols, so you don't need to specify anything with `pch = `. ```{r scatterplot4, exercise.lines = 15, exercise = TRUE} ``` ```{r scatterplot4-hint-1} # You can use the same code we used for the previous # example again, just modified for the present plot. ``` ```{r scatterplot4-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. ``` ```{r scatterplot4-hint-3} #This is the solution options(scipen = 999) plot( maximum_lifespan_yr ~ mass_g, col = "steelblue", xlab = "Mean body mass (g)", ylab = "Maximum lifespan (years)", log = "xy", data = mammal_longevity ) ``` Have a good look at the plot. Is there a relationship? Are there any problem data points? Is there any clustering or other evidence of structure? <br> <details><summary><b>Click here for explanation and more</b></summary> This is kind of a weird plot... there seems to be a positive relationship, but there is something strange going on with some of the data for very small mammals giving a "hump" sticking upwards, and there is a bit of clustering in the rest of the data that might represent some underlying structure there. It's likely that at least some of this structure reflects the phylogenetic relationships in the mammalia, so let's see what happens when we colour-code our data by order. This can be done very easily by just adding `col = order` as an argument to your `plot()` function call, but you have to remember to declare order as a factor first before you start the `plot()` function. Use this line of code to do this: ```{r eval = FALSE} mammal_longevity$order <- as.factor(mammal_longevity$order) ``` We will also need a legend so that we know which colour corresponds to which order, and I'll give you the code for this, which needs to be added after you've finished the `plot()` instruction. ```{r eval = FALSE} legend( "bottomright", #Put the legend in the bottom #right corner of the plot fill = 1:5, #Filled boxes with colours 1 - 5 from #the default R colour palette legend = levels(mammal_longevity$order), #Extract the #names of the levels of the order factor cex = 0.8 #Make the text a bit smaller ) ``` ```{r scatterplot5, exercise.lines = 25, exercise = TRUE} ``` ```{r scatterplot5-hint-1} # You can use the same code we used for the previous # example again, just modified for the present plot. ``` ```{r scatterplot5-hint-2} #Check that there's a comma between all arguments #and that all your brackets and quote marks are #matched. # #Make sure that the code for the legend is after #the plot function is complete, so after the #closing bracket ``` ```{r scatterplot5-hint-3} #This is the solution mammal_longevity$order <- as.factor(mammal_longevity$order) options(scipen = 999) plot( maximum_lifespan_yr ~ mass_g, col = order, xlab = "Mean body mass (g)", ylab = "Maximum lifespan (years)", log = "xy", data = mammal_longevity ) legend( "bottomright", #Put the legend in the bottom #right corner of the plot fill = 1:5, #Filled boxes with colours 1 - 5 from #the default R colour palette legend = levels(mammal_longevity$order), #Extract the #names of the levels of the order factor cex = 0.8 #Make the text a bit smaller ) ``` This new plot reveals some detailed structure within this dataset related to the relatedness of the animal species plotted. Have a think about what it shows and then have a go at answering these questions. NB Chiroptera are bats and Artiodactyla are even-toed ungulates. ```{r quiz1, echo = FALSE} quiz( question("Which of the following are true? More than one answer can be correct.", answer("The artiodactyls and carnivora have relatively long lives for their body weights"), answer("Rodents live surprisingly long given that most of them are small"), answer("On average, a primate of a particular body mass will live longer than an artiodactyl of the same mass", correct = TRUE), answer("From looking at the graph, it doesn't appear that longevity increases with mass in the Chiroptera, but it does in all the other orders shown", correct = TRUE), answer("Some bats live for as long as other mammals that are 10000 times their mass", correct = TRUE), answer("The longest lived bat lives for longer than the longest lived rodent", correct = TRUE), answer("The heaviest bat weighs about 300g"), answer("The Carnivora and the Artiodactyla tend to cluster together in terms of both mass and longevity, although the smaller carnivores are lighter than the smaller artiodactyls", correct = TRUE), answer("The longest lived species in this dataset is a primate") ) ) ``` </details> <br><br><br> ^1^ Healy, K., Guillerme, T., Finlay, S., Kane, A., Kelly, S.B.A., McClean, D., Kelly, D.J., Donohue, I., Jackson, A.L. & Cooper, N. (2014) Ecology and mode-of-life explain lifespan variation in birds and mammals. Proceedings, Biological sciences, 281, 20140298. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/04_Scatterplots/Scatterplots.Rmd
--- title: "Sampling and uncertainty 1: Sampling distributions" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > What are sampling distributions and why are they important?? --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) ``` ## Welcome Welcome to the sampling and uncertainty tutorials. These aim to teach how we quantify uncertainty in statistics via understanding sampling distributions, standard errors and confidence intervals. There are four tutorials in total and each builds on the previous one. There is some knowledge of R assumed: if you are unhapy with concepts such as arguments in functions, saving data into vectors etc. then you should probably review these before attempting these. ## Introduction There's a fundamental problem in much of science which revolves around the simple fact that we are usually unable to measure all of the things we would like to measure. Let's say you're a climate change biologist and you're concerned that chinstrap penguin populations in Antarctica might be negatively impacted by rising temperatures because the availability of krill, their main food, will decline as the temperature increases. To monitor this you want to know how much the penguins in your population weigh at the start of the reproductive season, so that you can see if their mean weight is declining over time. There are estimated to be around 8 million chinstrap penguins on the planet, and in your study population there are around 55,000 breeding pairs, so a total population of somewhere above 110,000. It's next to impossible to catch and weigh 110,000 penguins, of course, so what you would do would be to weigh a smaller number, maybe 30, maybe 100, maybe 500 depending on the availability of people, time, and equipment for penguin capture and and weighing. This group of penguins represents a *sample* of the total *population*. What you are trying to find out is the mean weight for the population, the *population mean* ($\mu$) and the hope is that the mean weight of the penguins in your sample (the *sample mean*, $\bar{x}$) is sufficiently close to the mean weight of all the penguins in your population to be a useful number. ![Chinstrap penguin](images/chinstrap.jpg){width="400"} Chinstrap penguin on Deception island. Photo Christopher Michel, released on a creative commons CC BY 2.0 licence. Assuming you've managed to avoid any bias in your sampling, you still have to think about how representative your sample mean is likely to be of the true population mean. Because of chance events in the sampling process, a completely random sample will usually be different from the population it's sampled from in some way. In our penguin example you might have a bigger proportion of heavy or light penguins in your sample than in the population, giving you a sample mean that is too high or too low. There is, then, *uncertainty* regarding how representative a sample mean is of the population mean, and it's important to be able to measure this uncertainty in some way: you need to know whether the mean that you've produced is likely to be a good estimate of the population mean or not. This series of tutorials teaches a series of important concepts related to this: sampling distributions, standard errors and confidence intervals. These are the ideas and tools that statisticians use to describe and quantify the amount of uncertainty associated with a particular estimate. ## The sampling distribution of the mean To try to understand how your sample mean might deviate from the population mean we need to think about the possible outcomes when you sample from a given population. As another example, let's say you want to know what the average systolic blood pressure is for women between 20 and 30 years old in the UK. Measuring the blood pressure of every woman aged between 20 and 30 in a population of 66 million people would take a long time, however, so as with our penguins what you would do instead is sample from the population: select a smaller number of individuals at random, measure them and hope that the value you calculate from your sample (the sample mean, denoted by $\bar{x}$) is sufficiently similar to the population mean (denoted by $\mu$) to give you a good idea of what the population level value is. We can simulate this using R's excellent random number generation tools. The `rnorm()` function generates values drawn from a normal distribution with a specified standard deviation and mean, with the arguments for the function being `n = ` for the sample size, `mean = ` for the mean and `sd = ` for the standard deviation. Here I've used `rnorm()` to generate 30 numbers from a normal distribution with `mean = 120` and `sd = 17` which is close to what we might see in women of the age class we're interested in. ```{r} # Set the random number seed so we get the same # result each time set.seed(10) # Generate sample and save as a vector # called sample1 sample1 <- rnorm(n = 30, mean = 120, sd = 17) # Round the values off to 1 decimal place sample1 <- round(sample1, 1) # Show us the numbers! print(sample1) ``` What's the mean of our sample? See if you can calculate it using the `mean()` function. ```{r mean1-setup, echo = FALSE} set.seed(10) # Generate sample and save as a vector # called sample1 sample1 <- rnorm(n = 30, mean = 120, sd = 17) # Round the values off to 1 decimal place sample1 <- round(sample1, 1) ``` ```{r mean1, exercise = TRUE} ``` ```{r mean1-hint-1} # You just need to put the name of the vector # (sample1) as an argument for the mean() function ``` ```{r mean1-hint-2} # This is the solution mean(sample1) ``` So we simulated sampling 30 individuals from a population with a population mean of 120, and our sample mean is 114.7. That's somewhat close to the actual population mean but purely by random chance we got more low values than high values in our sample and so our sample mean underestimates the population mean by 5.3 mmHg. If you were to do this exercise with in the real world, however, you would have no idea at all how representative your sample mean was. It could be exactly right, it could be slightly low, it could be much, much too high. What about taking a second sample? This code is very similar to the previous exercise, but there are three mistakes that mean it won't run as it is. See if you can fix them. ```{r mean2, exercise = TRUE, exercise.lines = 15} set.seed(2) # Generate sample and save as a vector # called sample1 sample2 <- rnorm(n = 30, mean = 120 sd = 17) # Round the values off to 1 decimal place sample2 <- round(sample1, 1 # Calculate the mean mean(sample2) ``` ```{r mean2-hint-1} # Check that all the arguments are separated by commas ``` ```{r mean2-hint-2} # Check that all the arguments are separated by commas # # Check that all the brackets pair up # # Check that all the object names are correct ``` ```{r mean2-hint-3} # Check that all the arguments are separated by commas # # There's a missing comma in the rnorm(... function call # # Check that all the brackets pair up # # There's a missing bracket in the round(... function call # # Check that all the object names are correct # # There's a mis-named object in the round(... function call ``` ```{r mean2-hint-4} # This is the solution: set.seed(2) # Generate sample and save as a vector # called sample1 sample2 <- rnorm(n = 30, mean = 120, sd = 17) # Round the values off to 1 decimal place sample2 <- round(sample2, 1) # Calculate the mean mean(sample2) ``` This time our sample mean overestimates the population mean somewhat, so the random number generator, purely by chance, gave us more high values. The value we obtained from the second sample is different from the first one simply because we have sampled (or simulated sampling) a separate set of individuals and they will not all have the same blood pressures as the women in the first sample. Now we have a somewhat better idea of what the real population mean might be: we took two samples of thirty, one of which gave a mean of 114.7 and one a mean of 123.9. You might now be a bit more confident that the true value of the mean lies somewhere in the region of these two values. Now think about what would happen if you repeated this many times, maybe a hundred. You select 30 women from the population, measure their blood pressure and calculate a mean, then you do it again 99 more times. Obviously no one would actually do this, but if you did you would generate a dataset of 100 sample means. Some of these would be very close to the actual population mean, but some would not: simply by chance in one sample you might get a lot of people with rather high blood pressure, meaning that your sample mean was rather higher than the population mean, or you might get several people with very low blood pressure leading to a low estimate. If you were to plot a frequency histogram of your 100 means it might look like this: ```{r} #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 100, mean(rnorm( n = 30, mean = 120, sd = 17 ))) #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Mean systolic blood pressure (mm Hg)", main = "Frequency histogram for 100 blood \npressure samples") ``` We can see that the majority of our sample means are relatively close to the true population mean of 120 mm Hg, but some are not. The distribution overall is roughly symmetrical as you might expect, with about the same number of high values as low values. What might this look like if we increased our sample size from 100 to 10000? Here's the code we used before, try to change it to have a sample of 10000 means instead of 100. ```{r sampling1, exercise = TRUE, exercise.lines = 20} #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 100, mean(rnorm( n = 30, mean = 120, sd = 17 ))) #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Mean systolic blood pressure (mm Hg)", main = "Frequency histogram for 100 blood\npressure samples") ``` ```{r sampling1-hint-1} #This is very simple: just change the #"n = 100" argument in the replicate() function call #You probably want to change the title as well ``` ```{r sampling1-hint-2} #This is the solution #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm( n = 30, mean = 120, sd = 17 ))) #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Mean systolic blood pressure", main = "Frequency histogram for 10000 blood pressure samples") ``` Now you can really see the pattern in our distribution of means. The shape of this histogram should be familiar to you because it follows a *normal distribution* and you can clearly see that famous bell curve. If we repeatedly sample from the same population, and calculate a mean for each sample, therefore, it seems that the distribution of means follows a normal distribution, centred around the population mean. Recall that our aim is to quantify how accurate an estimate of the population mean our sample mean is likely to be, and have another look at this histogram. Knowing the shape of this *sampling distribution* of means tells us that any individual sample mean is most likely to be where the bulk of our means are located, so between about 117 and 123 mm Hg, or in other words within about 3mm Hg of the true population mean. ## Sampling distributions of means from non-normal populations What if we don't sample from a normal distribution though? What might this *sampling distribution* of means look like if we drew them from a population with a distribution that didn't itself follow that bell curve? The `runif()` function in R draws numbers from a *uniform* distribution --- an even distribution lying between a maximum and a minimum. The arguments it takes are `n = ` for the sample size, `min = ` to define the minimum value and `max = ` to define the maximum. Here's some example data generated by `runif()`: ```{r} # Generate random numbers fdrawn from uniform distirbution samples <- runif(n = 120, min = 0, max = 100) # Draw histogram # NB the \n in the main = argument is an 'escape character" # which tells R to put a new line in the text at that point hist(samples, breaks = 20, xlab = "Value", main = "Frequency histogram for 120 numbers drawn\nat random from a uniform distribution") ``` As you can see, the data in this single sample are distributed fairly evenly across the whole range. Some of our bins have a few more datapoints than others, but overall there's no overall pattern. What happens if we use `runif()` to sample repeatedly from a uniform distribution and plot a frequency histogram of how our means are distributed? See if you can modify the code we used earlier to replace the `rnorm()` function generating the data we saw before with `runif()`, with a sample size of 30, a minimum of 105 and a maximum of 135. It's probably a good idea to change the title of the histogram to reflect what you're doing. ```{r sampling2, exercise = TRUE, exercise.lines = 20} #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Values", main = "Frequency histogram for 100 blood pressure samples") ``` ```{r sampling2-hint-1} # What you need to change is the # rnorm(n = 30, mean = 120, sd = 17) # part in the lines of code which begin # "samples <-" ``` ```{r sampling2-hint-2} # What you need to change is the # rnorm(n = 30, mean = 120, sd = 17) # part in the samples <- lines of code #You need to replace it with runif(n = 30, min = 105, max = 135) ``` ```{r sampling2-hint-3} #What you need to change is the # rnorm(n = 30, mean = 120, sd = 17) # part in the samples <- lines of code #You need to replace it with # runif(n = 30, min = 105, max = 135) # Make sure all the arguments are # separated by commas and that all # the brackets match ``` ```{r sampling2-hint-4} #This is the solution #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(runif(n = 30, min = 105, max = 135) )) #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Values", main = "Frequency histogram for the means of 10000 samples\nfrom a uniform distribution") ``` This histogram looks almost identical to the previous one where the means were generated from data drawn from a normal distribution, even though the population the data were drawn from this time had a distribution that in no way resembles a normal distribution. This illustrates a hugely important point: > If you sample repeatedly from the same population, as the number of samples increases **the sampling distribution of the means will always tend towards a normal distribution** with the mean of the sampling distribution tending to get closer to the population mean as the number of samples increases. For very large numbers of samples the sampling distribution of the means will be normal, and the mean for the sampling distribution will be the same as the population mean. ## The effect of the size of each sample OK, so if we sample repeatedly and calculate lots of means the frequency distribution for those means will always tend towards a normal distribution, no matter what the underlying distribution. What if we increase the sample size each of those means is calculated from? Instead of taking 10000 samples of 30 values each and calculating a mean for each, what happens if we take 10000 samples of 100 values each and calculate a mean for each. Here's some code that will draw two sets of samples from the same normal distribution and draw two histograms. The part of the code beginning with `samples2 <-` needs to be filled in. Use the code from the `samples <-` lines but change it so that each mean is calculated from 100 values instead of 30. For the two histograms, we want them to be drawn with the same x-axis scaling so you'll need to add the argument `xlim = c(105, 135)` to the code for each one, and you'll want to change the titles as well to reflect what you're plotting. ```{r sampling3, exercise = TRUE, exercise.lines = 35, fig.height = 8} #Draw two plots one above the other par(mfrow = c(2,1)) #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #This is the bit you need to complete samples2 <- #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Mean systolic blood pressure", main = "Frequency histogram for 10000 blood pressure samples") #Plot frequency histogram of means hist(samples2, breaks = 20, xlab = "Mean systolic blood pressure", main = "Frequency histogram for 10000 blood pressure samples") ``` ```{r sampling3-hint-1} # Use the code from samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #starting with replicate. Change the sample size #in the rnorm() function call to 100 ``` ```{r sampling3-hint-2} # You only need to change 1 number, # the n = 30, from the rnorm function call # need to be changed to n = 100, # For the xlim = c(105, 135) arguments just add # this code in for each one ``` ```{r sampling3-hint-3} # Make sure all the arguments are # separated by commas and that all # the brackets match ``` ```{r sampling3-hint-4} # This is the solution #Draw two plots one above the other par(mfrow = c(2,1)) #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #This is the bit you need to complete samples2 <- replicate(n = 10000, mean(rnorm(n = 100, mean = 120, sd = 17) )) #Plot frequency histogram of means hist(samples, breaks = 20, xlim =c(105, 135), xlab = "Mean systolic blood pressure", main = "Sample size = 30") #Plot frequency histogram of means hist(samples2, breaks = 20, xlim =c(105, 135), xlab = "Mean systolic blood pressure", main = "Sample size = 100") ``` What you can see is that when the mean for each sample is calculated from more data, the amount of spread in the sampling distribution of means is lower: there are fewer sample means with very high or very low values. This makes sense of course: the larger the sample used to calculate the sample mean, the more accurate an estimate of the population mean it is likely to be. ## How does the variance of the population affect the sampling distribution? Something else that might influence our sampling distribution is the amount of variability in the population that is being sampled from. Let's repeat the previous exercise but instead of changing the sample size for the second histogram try to change the standard deviation of the population being sampled from 17 to 10. We need to plot our histograms on the same x-axis scale so you need to add the argument `xlim = c(105, 135)` to each `hist()` function call. ```{r sampling4, exercise = TRUE, fig.height = 6, exercise.lines = 35} #Draw two plots one above the other par(mfrow = c(2,1)) #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #This is the bit you need to complete samples2 <- #Plot frequency histogram of means hist(samples, breaks = 20, xlab = "Mean systolic blood pressure", main = "Frequency histogram for 10000 blood pressure samples") #Plot frequency histogram of means hist(samples2, breaks = 20, xlab = "Mean systolic blood pressure", main = "Frequency histogram for 10000 blood pressure samples") ``` ```{r sampling4-hint-1} # Use the code from samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #starting with replicate. Change the #standard deviation (sd) #in the rnorm() function call to 7 ``` ```{r sampling4-hint-2} # You only need to change 1 number, # the n = 30, from the rnorm function call # need to be changed to n = 100, # For the xlim = c(105, 135) arguments just add # this code in for each one ``` ```{r sampling4-hint-3} # Make sure all the arguments are # separated by commas and that all # the brackets match ``` ```{r sampling4-hint-4} # This is the solution #Draw two plots one above the other par(mfrow = c(2,1)) #Set random number seed set.seed(20) #Generate the means of 100 samples of 30 #datapoints each, drawn from a normal #distribution with mean 120 and sd 17 samples <- replicate(n = 10000, mean(rnorm(n = 30, mean = 120, sd = 17) )) #This is the bit you need to complete samples2 <- replicate(n = 10000, mean(rnorm(n = 100, mean = 120, sd = 10) )) #Plot frequency histogram of means hist(samples, breaks = 20, xlim =c(105, 135), xlab = "Mean systolic blood pressure", main = "Standard deviation = 17") #Plot frequency histogram of means hist(samples2, breaks = 20, xlim =c(105, 135), xlab = "Mean systolic blood pressure", main = "Standard deviation = 10") ``` The second histogram shows that when samples are taken from a population with a lower standard deviation, the sampling distribution of the means is similarly reduced. This makes sense: if the individuals in the population are not especially variable then those in the sample are unlikely to show much variability, and *vice-versa*. ## Summary and quiz What are the take-home messages from this? We know how sample means are likely to be distributed around population means, and we've seen that larger sample sizes and lower dispersion in the population being sampled will both lead to sample means that, on average, are closer to population means. If we want to quantify how certain we are, one option would be to repeartedly sample and to use the sampling distribution of means to tell us what the real population mean is likely to be. That's not usually going to be an option, however, but what we can do is use what we've found out in this tutorial to help us to quantify uncertainty for a single sample --- this takes us to the next tutorial, on standard errors. We'll finish up with a short quiz to see if you've managed to grasp the main points so far. ```{r quiz, echo = FALSE} quiz( question("Consider a situation where we have sampled repeatedly from a population, calculated a mean for each sample and drawn a frequency histogram of the means. Which of the following is correct?", answer("The distribution of means will always be normal"), answer("The distribution of means will always be centred on the population mean"), answer("The distribution of means will show positive skew if the underlying population is itself positively skewed"), answer("As the number of samples increases, the distribution of means will become closer to a normal distribution", correct = TRUE), answer("If the number of samples is greater than 30, the mean of the sampling distribution will be equal to the population mean") ), question("Which of these cases would have the narrowest sampling distribution of means?", answer("500 samples each of 100 from a population with standard deviation = 2", correct = TRUE), answer("500 samples each of 50 from a population with standard deviation = 2"), answer("1000 samples each of 50 from a population with standard deviation = 3"), answer("500 samples each of 100 from a population with standard deviation = 3"), answer("10000 samples each of 50 from a population with standard deviation = 4") ) ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/05_Sampling_distributions/Sampling_distributions.Rmd
--- title: "Sampling and Uncertainty 2: Standard errors" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > How the standard error related to the sampling distribution and why it is important? --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) ``` ## Standard errors introduced In the tutorial on sampling distributions we saw that if we were to sample repeatedly from any population and calculate a mean for each sample, then as our number of samples got larger the sampling distribution of means would follow a normal distribution more and more closely. The mean of the sampling distribution will tend towards the population mean and the amount of spread in the sampling distribution will be determined by the standard deviation of the population and by size of the samples that we're calculating our means from. Knowing this can give you an indication of how representative of the population mean a single sample mean is: large sample sizes and small population standard deviations will, in general, give sample means that better reflect the population mean. That's all a bit vague, and it is often important to be more precise about how much uncertainty there is around an estimate. In this tutorial we'll look at one way of doing this via a measure caled the *standard error*. Since the sampling distribution is going to approximate to a normal distribution, we can ask what the standard deviation of the sampling distribution itself should be. Surprisingly, it is very simple to calculate an estimate for this value if we only have a single sample: the standard deviation of the sampling distribution of means can be estimated as: $$ Standard \: error =\frac{s}{\sqrt{n}}$$ where $n$ is the sample size and $s$ is the sample standard deviation. This is called the *standard error of the mean*, or more usually just the *standard error*, often abbreviated to SE. Just to clear up any confusion, this means that you can easily calculate an estimate of how much spread there would be in the sampling distribution of means from a single sample: take your sample, calculate the standard deviation and divide by the square root of the sample size. ## Standard errors versus standard deviations People often get confused between these two statistics and it's important to be clear about what they are telling you: * The sample standard deviation is an estimate of the population standard deviation, and it tells you how much dispersion there is in the data around the mean. * The standard error is, fundamentally, a measure of the uncertainty associated with a sample mean: it gives you a measure of how accurate an estimate of the population mean it is likely to be. When you calculate a standard deviation, it tells you how much spread there is in your data. When you take a sample from a population, the standard deviation you calculate is an estimate of the true value of the population standard deviation --- the population you're sampling from has a certain degree of dispersion in the data, and you are estimating that by calculating a sample standard deviation. As with your estimate of the mean, as you increase the sample size your estimate of the standard deviation should get more accurate. We can illustrate this by using R to sample repeatedly from a population with known standard deviation as follows. We're using something called a for loop to do this repetitive task: if you haven't met these before there's a video explaining them here ![](https://www.youtube.com/watch?v=0_Bc0a6op-I). ```{r} set.seed(3) # Set up a vector of sample sizes from 2 to 500 sample.sizes <- 2:500 sd.estimates <- numeric(length = 499) # Using a for... loop, use rnorm to sample from # a normal distribution with mean 5 and sd 3, one with each # sample size, and calculate the standard deviation of each one for (i in 1:499) { sd.estimates[i] <- sd(rnorm(n = sample.sizes[i], mean = 5, sd = 3)) } # Plot the standard deviations against sample size plot(sd.estimates ~ sample.sizes, pch = 16, cex = 0.4, xlab = "Sample size", ylab = "Sample standard deviation" ) ``` Here, we've sampled repeatedly from a population with a standard deviation of 3. You can see that the amount of spread in our estimated standard deviation decreases as the sample size gets bigger, or in other words our estimates of the population standard deviation become more accurate. There are lots of estimates which are quite a long way from the true value of 3 when the sample size is less than about 100, but when it's greater than about 250 you can see that all of the estimates are nicely close to the population value of 3. What happens to the standard error as the sample size increases? See if you can edit this code to plot the standard error against sample size instead of the standard deviation. ```{r sampling5, exercise = TRUE} set.seed(3) # Set up a vector of sample sizes from 2 to 1000 sample.sizes <- 2:500 sd.estimates <- numeric(length = 499) # Using a for... loop, use rnorm to sample from # a normal distribution with mean 5 and sd 3, one with each # sample size, and calculate the standard deviation of each one for (i in 1:499) { sd.estimates[i] <- sd(rnorm(n = sample.sizes[i], mean = 5, sd = 3)) } # Plot the standard deviations against sample size plot(sd.estimates ~ sample.sizes, pch = 16, cex = 0.4, xlab = "Sample size", ylab = "Sample standard deviation" ) ``` ```{r sampling5-hint-1} # You don't need to change the for loop code at all. # You have a vector of standard deviations and a # vector of sample sizes, so you need to create a # new vector after the for loop which is simply the # vector of standard deviations divided by the vector # of sample sizes. Then swap out the name of your # new vector for the sd.estimates vector name in # the plot command, and change the y-axis title. # # The sqrt() function will calculate the square roots # for you ``` ```{r sampling5-hint-2} # This is the code you need to add to calculate the standard errors se.vector <- sd.estimates / sqrt(sample.sizes) ``` ```{r sampling5-hint-3} # Make sure all the arguments are # separated by commas and that all # the brackets match ``` ```{r sampling5-hint-4} # Don't forget that you have to change the plot code so # that your new vector of standard errors is the one # plotted against sample size ``` ```{r sampling5-hint-5} # Don't forget that you have to change the plot code so # that your new vector of standard errors is the one # plotted against sample size # This is the code for the plot plot(se.vector ~ sample.sizes, pch = 16, cex = 0.4, xlab = "Sample size", ylab = "Standard error" ) ``` ```{r sampling5-hint-6} # This is the solution set.seed(3) # Set up a vector of sample sizes from 2 to 1000 sample.sizes <- 2:500 sd.estimates <- numeric(length = 499) # Using a for... loop, use rnorm to sample from # a normal distribution with mean 5 and sd 3, one with each # sample size, and calculate the standard deviation of each one for (i in sample.sizes) { sd.estimates[i - 1] <- sd(rnorm(n = sample.sizes[i - 1], mean = 5, sd = 3)) } se.vector <- sd.estimates / sqrt(sample.sizes) # Plot the standard deviations against sample size plot(se.vector ~ sample.sizes, pch = 16, cex = 0.4, xlab = "Sample size", ylab = "Standard error" ) ``` The pattern here is completely different from the one for the standard deviation. As the sample size increases so the standard error gets smaller, albeit with a declining slope. If you had a very large sample size it would be close to zero. This demonstrates what is special about the standard error: it is an estimate of how accurate your estimate of the mean is likely to be. With a small sample size and a large standard deviation, you will have a large SE and this will indicate to you that you should have little confidence that your estimate of the population mean is close to the real value. Conversely, with a large sample size and a small standard deviation you will have a very small standard error and this will indicate that your sample mean is likely to be close to the true value of the population mean. You will often see mean values quoted as plus or minus one standard error, or graphs with error bars showing the standard error. It's important to remember that these are not indicating the degree of dispersion in the data, rather they are showing the accuracy of the estimate of the mean. It's arguable that the standard error is an inferior measure of the accuracy of an estimate, however, and that what we should really use is the *95% confidence interval* which is calculated from the standard error. This is what we'll look at next. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/06_Standard_errors/Standard_errors.Rmd
--- title: "Sampling and uncertainty 3: Confidence intervals" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > How to calculate the confidence interval for a mean and what it means --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) ``` ## Confidence intervals Remember that the sampling distribution of means will approximate to a normal distribution, and that we can generate an estimate of what the standard deviation of this distribution of means is by calculating the standard error or SE. One thing you might recall is that if we know the standard deviation of a normal distribution that tells us things about the distribution of data wthin a normal distribution: 68% of all the data will lie within one standard deviation of the mean, 95% of all the data will lie within 1.96 standard deviations of the mean and 99% of all the data will lie within 2.58 standard deviations of the mean, as illustrated here. ```{r echo=FALSE, fig.height = 5, fig.width = 6, fig.cap="**Figure 1** Probability density of a standard normal distribution with mean=0 and standard deviation=1 showing the areas defied by the mean plus or minus 1, 1.96 and 2.58 standard deviations."} X1 <- seq(-3, 3, length = 300) Y1 <- dnorm(X1) plot(X1, Y1, type = "n", xlab = "x", ylab = "P(x)") abline(v=0, lwd=0.5,lty=2) x0 <- min(which(X1 >= -2.58)) x1 <- min(which(X1 >= -1.96)) x2 <- min(which(X1 >= -1)) x3 <- max(which(X1 <= 1)) x4 <- max(which(X1 <= 1.96)) x5 <- max(which(X1 <= 2.58)) polygon(x = c(X1[c(1, 1:x0, x0)]), y = c(0, Y1[1:x0], 0), col = "white", border = NA) polygon(x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#deebf7", border = NA) polygon(x = c(X1[c(x1, x1:x2, x2)]), y = c(0, Y1[x1:x2], 0), col = "#9ecae1", border = NA) polygon(x = c(X1[c(x2, x2:x3, x3)]), y = c(0, Y1[x2:x3], 0), col = "#3182bd", border = NA) polygon(x = c(X1[c(x3, x3:x4, x4)]), y = c(0, Y1[x3:x4], 0), col = "#9ecae1", border = NA) polygon(x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#deebf7", border = NA) polygon(x = c(X1[c(x5, x5:300, 300)]), y = c(0, Y1[x5:300], 0), col = "white", border = NA) points(X1, Y1, type = "l") abline(v=0, lwd=0.5,lty=2) text(0, 0.18, "68% of values \n within 1 sd \n of the mean", cex = 1,col="white") arrows(0.6,0.18,0.99,0.18,length=0.1,angle=20,col="white") arrows(-0.6,0.18,-0.99,0.18,length=0.1,angle=20,col="white") text(0, 0.04, "95% of values \n within 1.96 sd \n of the mean", cex = 1,col="white") arrows(0.72,0.03,1.95,0.03,length=0.1,angle=20,col="white") arrows(-0.72,0.03,-1.95,0.03,length=0.1,angle=20,col="white") text(2.5,0.1, "99% of values \n within 2.58 sd \n of the mean", cex = 1) arrows(2.58,0.06,2.58,0.015,length=0.1,angle=20) ``` Let's flip that around: if we have an estimate of the mean and we know the standard error (which is the standard deviation of the sampling distribution of means), how often will the population mean lie within one standard error of our sample mean? Well, if 68% of the data lie within one standard deviation of the mean, then 68% of the time will we get a sample mean that's one standard error or less from the population mean. Let's get R's random number generators to test this out for us. ```{r} set.seed(15) # Setup vectors for results means <- numeric(1000) se <- numeric(1000) # Generate 1000 means and SEs sampling from # the same population for (i in 1:1000) { sample <- rnorm(n = 50, mean = 10, sd = 2) means[i] <- mean(sample) se[i] <- sd(sample)/sqrt(50) } # Is the population mean within # one SE of the sample mean? If yes then "Hit" # If no then "Miss" miss <- ifelse(10 < means - se | 10 > means + se, "Miss", "Hit") # Count the number of hits and misses table(miss) ``` 677 out of a 1000 times, or 67.7% of the time our random number generator produces a sample which is within one standard error of the population mean. This is very close to 68%, which is the percentage of data that should lie within one standard deviation of the mean in a normal distribution, and if we did this for, say, a million samples instead of a thousand it would be very close to 68% indeed. If we know the mean and the SE, therefore, we can quote a range of values within which we should find the population mean, the value we're really interested in, 68% of the time. We could call this the *68% confidence interval*. $$ 68\% \: CI = \textrm{from} \:\bar{x} - SE \: \textrm{to} \: \bar{x} + SE $$ ## 95% Confidence intervals Now we are able to really quantify how uncertain we are about our estimate of the population mean, and we can put a figure on it. 68% confidence is a bit of a small value though, and it would be better if we could express a range which was more likely to include the true mean. Recall that 95% of the values in a normally distributed dataset should lie within 1.96 standard deviations of the mean. Is it the case that 95% of the time the population mean should lie within 1.96 standard errors of the sample mean? Sort of... if we know what the *population* standard deviation is then we know the standard error without bias and that will be true. If we don't know the population standard deviation, however, our sample standard deviation, from which we calculate our standard error, will actually be slightly biased towards lower values, especially for small samples. What we do to becase of this is to multiply the standard error by a value derived not from a normal distribution but from something called the *t-distribution*, which corrects for this bias. ```{r echo = FALSE, fig.cap="**Figure 2** Normal distribution compared to two t-distributions. Black line: probability density of a standard normal distribution with mean=0 and standard deviation=1, Blue line: probability density of a *t* distribution on 2 degrees of freedom, Red line: probability density of a *t* distribution on 5 degrees of freedom. Note that the t-distribution is lower and wider than the normal distribution. This is because it is corrected for the probability of missing extreme large or small values at small sample sizes."} X1 <- seq(-3, 3, length.out = 200) Y1 <- dnorm(X1) Y2 <- dt(X1, df = 2) Y3 <- dt(X1, df = 5) plot(Y1 ~ X1, type = "l", col = "black", xlab = "", ylab = "Probability density" ) points(Y2 ~ X1, type = "l", col = "steelblue" ) points(Y3 ~ X1, type = "l", col = "firebrick4" ) legend("topleft", legend = c("Normal", "t 5df", "t 2df"), lty = 1, col = c("black", "firebrick4", "steelblue") ) ``` We can find the value of *t* corresponding to that 1.96 value for a normal distribution by using R's `qt()` function which gives us the *quantiles* of the t-distribution, or in other words the value of *t* below which a given proportion of the data in a t-distribution should lie. As an example: ```{r} qt(0.5, df = 100) ``` The t-distribution is symmetrical around zero, like a standard normal distribution, so 50% of the distribution lies below zero and 50% above. the `df = 100` argument is telling `qt()` how many *degrees of freedom* the t-distribution should have --- the t-distribution changes in shape with different sample sizes, so we need to tell `qt()` which distribution to use, and we actually use the degrees of freedom here which in this case would be n-1. We don't want the 50% quantile for *t* though, we want the quantile which corresponds to that 1.96 value which we would use in a simpler world. To get this we actually ask for the 97.5% quantile. This might sound weird but it makes sense because our sample mean can be both larger and smaller than the population mean, so if we want the value that excludes 5% of the distribution we actually want to exclude the 2.5% at the upper extreme of the distribution and the 2.5% at the lower extreme of the distribution. for a sample size of 30, therefore, we can find the value of *t* to multiply the SE by using: ```{r} qt(0.975, df = 29) ``` Note that our df is 29 (n-1) for a sample size of thirty. To generate the *95% confidence intervals* for the mean of a sample of size thirty, therefore, we would use this formula: $$ 95\% \: CIs = \textrm{from} \: \bar{x} - t \times SE\: \: \textrm{to} \: \bar{x} + t \times SE.$$ Let's go back to our blood pressure example. If you sampled 30 women between 20 and 30 at random and measured their systolic blood pressure you might get these values: 102, 129, 95, 129, 136, 101, 77, 99, 126, 100, 143, 124, 119, 97, 106, 109, 119, 109, 119, 87, 115, 147, 131, 111, 154, 130, 142, 123, 118, 109 See of you can calculate what the 95% confidence intervals for this sample are. To help I've started off with a vector called "bp" with the data in it. Remember, the mean can be calculated with the `mean()` function, the SE is the standard deviation (`sd()`) divided by the square root of the sample size, and you know the appropriate value of *t* from the calculation above. ```{r CI1, exercise = TRUE, exercise.lines = 35} bp <- c(102, 129, 95, 129, 136, 101, 77, 99, 126, 100, 143, 124, 119, 97, 106, 109, 119, 109, 119, 87, 115, 147, 131, 111, 154, 130, 142, 123, 118, 109) ``` ```{r CI1-hint-1} # You can calculate the mean simply as mean1 <- mean(bp) ``` ```{r CI1-hint-2} # You can calculate the mean simply as mean.bp <- mean(bp) # For the SE you need to divide the # standard deviation by the square # root of the sample size ``` ```{r CI1-hint-3} # You can calculate the mean simply as mean.bp <- mean(bp) # For the SE you need to divide the # standard deviation by the square # root of the sample size, like this: SE.bp <- sd(bp)/sqrt(30) ``` ```{r CI1-hint-4} # You can calculate the mean simply as mean.bp <- mean(bp) # For the SE you need to divide the # standard deviation by the square # root of the sample size, like this: SE.bp <- sd(bp)/sqrt(30) # For the upper 95% CI you need to # multiply the SE by the value of t (2.045) # and add it to the mean: for the lower # 95% CI subtract it from the mean ``` ```{r CI1-hint-5} # You can calculate the mean simply as mean.bp <- mean(bp) # For the SE you need to divide the # standard deviation by the square # root of the sample size, like this: SE.bp <- sd(bp)/sqrt(30) # For the upper 95% CI you need to # multiply the SE by the value of t (2.045) # and add it to the mean: for the lower # 95% CI subtract it from the mean, like # this: upper.CI <- mean.bp + 2.045*SE.bp lower.CI <- mean.bp - 20.45*SE.bp ``` ```{r CI1-hint-6} # You can calculate the mean simply as mean.bp <- mean(bp) # For the SE you need to divide the # standard deviation by the square # root of the sample size, like this: SE.bp <- sd(bp)/sqrt(30) # For the upper 95% CI you need to # multiply the SE by the value of t (2.045) # and add it to the mean: for the lower # 95% CI subtract it from the mean, like # this: upper.CI <- mean.bp + 2.045*SE.bp lower.CI <- mean.bp - 2.045*SE.bp # Lastly you need to ask R to print out your # values. You could just put in the object names # like this upper.CI lower.CI # You can use cat() to # make it more intelligible: cat("Upper 95% CI = ", upper.CI) cat("Lower 95% CI = ", lower.CI) ``` So the 95% CIs for the mean systolic blood pressure for women between 20 and 30 years of age are from 110.1 mmHg to 123.7 mmHg. What does this mean? ```{r quiz1, echo = FALSE} quiz( question("Which of these statements is true?", answer("95% of women between 20 and 25 have blood pressures between 110.1 and 123.7 mmHg"), answer("The true population mean cannot be greater than 123.7 mmHg"), answer("If you sampled 30 times from our population of women repeatedly, the true population mean would be between 110.1 and 123.7 mmHg 95% of the time"), answer("The 95% confidence intervals are only reliable because the data they are drawn from are normally distributed"), answer("The 95% confidence intervals tell us about the likely location of the population mean", correct = TRUE) ) ) ``` ## Comparing two penguin populations Back to the penguins which we looked at in the first tutorial! Let's say that you've managed to measure the weights of 15 chinstrap penguins in one of your study populations. The weights that you've measured are (in Kg) 6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7 Here's some code that will calculate the 95% confidence intervals for our penguin sample. There are two mistakes: see if you can spot them. Once you've fixed them, run the code. ```{r CI2, exercise = TRUE, exercise.lines = 35} penguins <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) # Calculate the mean mean1 <- mean(penguins) # Calculate the SE SE1 <- sd(penguins)/sqrt(14) # Calculute the value of t t1 <- qt(0.95, df = 14) # Calculate the CIs lowerCI <- mean1 - SE1 * t1 upperCI <- mean1 + SE1 * t1 cat("Lower CI =", lowerCI) cat("Upper CI =", upperCI) ``` ```{r CI2-hint-1} # The first error is in the line SE1 <- sd(sample1)/sqrt(14) ``` ```{r CI2-hint-2} # The first error is in the line SE1 <- sd(sample1)/sqrt(14) # The second error is in the line t1 <- qt(0.95, df = 14) ``` ```{r CI2-hint-3} # The first error is in the line SE1 <- sd(sample1)/sqrt(9) # The standard deviation is being divided # by 14 (the df): it should be 15 ``` ```{r CI2-hint-4} # The first error is in the line SE1 <- sd(sample1)/sqrt(14) # The standard deviation is being divided # by 14 (the df): it should be 15 # The second error is in the line t1 <- qt(0.95, df = 14) # the value 0.95 in the qt() call should # read 0.975 ``` ```{r CI2-hint-5} # The two lines should read # Calculate the SE SE1 <- sd(sample1)/sqrt(15) # Calculate the value of t t1 <- qt(0.975, df = 14) ``` Now you compare your penguin weights with a second set of weights from a population which you know have been feeding in an area where the krill population has been severely reduced by a combination of human fishing and climate effects. This time you managed to get the weights of 17 penguins, and the values you get are (in Kg): 3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1 Just looking at these values you can see that these penguins have some individuals that are pretty light by comparison with your previous sample. Some are not however and one is heavier than the heaviest penguin in your previous sample. Maybe we could get some guidance by comparing the mean and 95% confidence interval values for our two samples. See if you can adapt the code from the previous example to calculate the new values. ```{r CI3, exercise = TRUE, exercise.lines = 35} penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) ``` ```{r CI3-hint-1} # You need to change the variable name # to "penguins2" where necessary, and # you need to change the sample size and # df values because your sample size # has changed ``` ```{r CI3-hint-2} # You need to change the variable name # to "penguins2" where necessary, and # you need to change the sample size and # df values because your sample size # has changed # The sample size is 17 so the # sd needs to be divided by 17 to # calculate the SE. ``` ```{r CI3-hint-3} # You need to change the variable name # to "penguins2" where necessary, and # you need to change the sample size and # df values because your sample size # has changed # The sample size is 17 so the # sd needs to be divided by 17 to # calculate the SE. # # The df for the calculation of t # should be 16 (17-1) ``` ```{r CI3-hint-4} # The solution is: penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) # Calculate the mean mean2 <- mean(penguins2) # Calculate the SE SE2 <- sd(penguins2)/sqrt(17) # Calculute the value of t t2 <- qt(0.975, df = 16) # Calculate the CIs lowerCI2 <- mean2 - SE2 * t2 upperCI2 <- mean2 + SE2 * t2 cat("Lower CI =", lowerCI2) cat("Upper CI =", upperCI2) ``` ## Plotting confidence intervals The confidence intervals for our second penguin sample are distinctly lower than those for the first. Let's plot a graph with *error bars* showing the two regions. You can see that I'm using the `arrows()` function to draw in the error bars, which is the easiest way to do this in base R graphics. ```{r error.bars-setup, echo = FALSE} penguins <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) # Calculate the mean mean1 <- mean(penguins) # Calculate the SE SE1 <- sd(penguins)/sqrt(14) # Calculute the value of t t1 <- qt(0.95, df = 14) # Calculate the CIs lowerCI <- mean1 - SE1 * t1 upperCI <- mean1 + SE1 * t1 penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) # Calculate the mean mean2 <- mean(penguins2) # Calculate the SE SE2 <- sd(penguins2)/sqrt(17) # Calculute the value of t t2 <- qt(0.975, df = 16) # Calculate the CIs lowerCI2 <- mean2 - SE2 * t2 upperCI2 <- mean2 + SE2 * t2 ``` ```{r error.bars} # Dummy variable for x-axis X1 <- c(1,2) # Make vector of the means Y1 <- c(mean1, mean2) # Plot graph with means. No x-axis plot(Y1 ~ X1, xlim = c(0.5, 2.5), ylim = c(3.5, 6), xaxt = "n", pch = 16, cex = 1.5, col = "aquamarine4", ylab = "Weight (Kg)",, xlab = "") # Add error bars using arrows arrows(x0 = X1, y0 = c(lowerCI, lowerCI2), x1 = X1, y1 = c(upperCI, upperCI2), code = 3, angle = 90, length = 0.05, lwd = 2, col = "aquamarine4") # Draw in x-axis axis(side = 1, at = c(1,2), labels = c("Sample 1", "Sample 2")) ``` Looking at the graph, you can see that the 95% confidence intervals for sample 1 and sample 2 don't overlap at all. This means that it's unlikely that the population mean for sample 1 lies anywhere close to the population mean for sample 2, so we can be confident that the differences between our two samples unlikely to have arisen just from sampling error --- in other words, they are *significantly different*. As a rule of thumb, if the 95% confidence intervals for two measures don't overlap then they will be significantly different if you were to do a formal hypothesis test, so comparing confidence intervals is a quick way of seeing some of the broad patterns in your data. If they overlap a lot (so that the sample mean of one is within the 95% CIs of the other, for example) then you can be reasonably confident that there is probably not a significant difference. If there is a small amount of overlap, however, you can't be sure and you'd need to go on to do a formal significance test to get an idea of how confident you can be in your observed difference. Be careful! The idea that if the 95% CIs overlap there is no significant difference is widespread and you will come across it frequently. Unfortunately the real situation is more nuanced: *no overlap = significantly different*, *lots of overlap = not significantly different* but *some overlap = not sure need to do more analysis*. <br><br> <hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/07_Confidence_intervals/Confidence_intervals.Rmd
--- title: "Sampling and uncertainty 4: Confidence intervals for the difference between two means" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > How to calculate the confidence interval for the difference between two means and how to interpret its --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE) ``` ## Calculating the confidence interval of the difference between means So far we've only looked at the confidence interval of the mean. You can calculate confidence intervals for lots of other measures as well: proportions, the slopes and intercepts of lines and so on. Here we'll look at one useful example, the confidence interval of the difference between two means. Continuing with our penguin example, we've seen that we can get a rough indication of whether the mean weights of the penguins at our two colonies are different by looking at the confidence intervals of the two means. These show us the likely locations of the population means, and we've concluded that since the CIs don't overlap we can be reasonably sure that the population means are different. Another approach to this is to think about the difference between the two means, rather than the two means themselves. Just as we can think about the sampling distribution of the mean and calculate a standard error for the mean, so too can we think about the sampling distribution of the difference between two means and calculate a standard error for that. The confidence interval of a mean is the region where the population mean $\mu$ would lie 95% of the time if you repeatedly sampled from the population: similarly, the confidence interval for the difference between two sample means, $\bar{x}_1 -\bar{x}_2$ is the region where the difference between the two population means, $\mu_1 - \mu_2$ would lie 95% of the time if you were to repeatedly sample both means and calculate their difference. The formula for the 95% confidence interval of the difference between two means is a bit more complicated than that for a single mean. I'm going to give you the maths but it's not necessary to understand it in detail. Fortunately it follows the same principle: you add or subtract the *standard error of the difference* ($SE_{diff}$), multiplied by a *t* value, from the difference between the sample means. In order to calculate the $SE_{diff}$, however, you have to calculate something called the *pooled standard deviation* or $s_p$ which is calculated as: $$s_p = \sqrt{ \frac{\left( n_1 -1 \right) s^2_1 + \left( n_2 -1 \right) s^2_2}{n_1 + n_2 -2}}$$ Once we know this then (assuming the two population variances are roughly equal) $$SE_{diff} = s_p \sqrt{\frac{1}{n_1} + \frac{1}{n_2}}$$ Once you know this the calculation is the same. The *df* for the calculation of *t* are now $n_1 + n_2 -2$. $$ \textrm{Upper} \: 95\% \: \textrm{CI} = \bar{x}_1 -\bar{x}_2 + t \times SE_{diff}$$ $$ \textrm{Lower} \: 95\% \: \textrm{CI} = \bar{x}_1 -\bar{x}_2 - t \times SE_{diff}$$ Here, $\bar{x}_1$ is the mean of sample 1 and $\bar{x}_2$ is the mean for sample 2. Similarly, $s^2 _1$ is the variance of sample 1 and $s^2 _2$ is that of sample 2, and $n_1$ and $n_2$ are the sample sizes for samples 1 and 2 respectively. The value of t is the appropriate value on $n_1 + n_2 -2$ degrees of freedom. See if you can complete the code here to calculate the upper and lower confidence intervals for the difference between the mean weights for our two penguin samples. ```{r CIdiff1, exercise = TRUE, exercise.lines = 45} penguins1 <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) # Calculate means mean1 <- mean(penguins1) mean2 <- mean(penguins2) # Calculate variances var1 <- var(penguins1) var2 <- var(penguins2) # Calculate sample sizes n1 <- length(penguins1) n2 <- length(penguins2) # Value of t t1 <- qt(0.975, df = n1 + n2 -2) # Pooled standard deviation sp <- sqrt(((n1 - 1)*var1 + (n2 - 1)*var2)/ (n1 + n2 -2)) # Standard error of the difference SEdiff <- # Calculate the confidence interval upperCI <- lowerCI <- ``` ```{r CIdiff1-hint-1} # To calculate SEdiff you need to use the formula given above ``` ```{r CIdiff1-hint-2} # To calculate SEdiff you need to use the formula given above SEdiff <- sp * sqrt((1/n1) + (1/n2)) ``` ```{r CIdiff1-hint-3} # For the lower and the upper confidence intervals # you need to add in the calculation as given in the formula above # Make sure all your arguments are separated by commas and # that your brackets all match. ``` ```{r CIdiff1-hint-4} # For the lower and the upper confidence intervals # you need to add in the calculation as given in # the formula above # You also need to ask R to print out the values # calculated, maybe use cat() for a nice output ``` ```{r CIdiff1-hint-5} # For the lower and the upper confidence intervals # you need to add in the calculation as given in # the formula above # For the upper CI upperCI <- mean1 - mean2 + t1 * SEdiff # You also need to ask R to print out the values # calculated, maybe use cat() for a nice output cat("Upper 95% CI =", upperCI) ``` ```{r CIdiff1-hint-6} # For the lower and the upper confidence intervals # you need to add in the calculation as given in # the formula above # For the upper CI upperCI <- mean1 - mean2 + t1 * SEdiff # For the lower CI lowerCI <- mean1 - mean2 - t1 * SEdiff # You also need to ask R to print out the values # calculated, maybe use cat() for a nice output cat("Upper 95% CI =", upperCI) cat("Lower 95% CI =", lowerCI) ``` ```{r CIdiff1-hint-7} # This is the complete solution penguins1 <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) # Calculate means mean1 <- mean(penguins1) mean2 <- mean(penguins2) # Calculate variances var1 <- var(penguins1) var2 <- var(penguins2) # Calculate sample sizes n1 <- length(penguins1) n2 <- length(penguins2) # Value of t t1 <- qt(0.975, df = n1 + n2 -2) # Pooled standard deviation sp <- sqrt(((n1 - 1)*var1 + (n2 - 1)*var2)/ (n1 + n2 -2)) # Standard error of the difference SEdiff <- sp * sqrt((1/n1) + (1/n2)) # Calculate the confidence intervals upperCI <- mean1 - mean2 + t1 * SEdiff lowerCI <- mean1 - mean2 - t1 * SEdiff # You also need to ask R to print out the values # calculated, maybe use cat() for a nice output cat("Upper 95% CI =", upperCI) cat("Lower 95% CI =", lowerCI) ``` You should get the answer that the confidence intervals for the difference between means are from 0.59 to 1.83. This means that if you were to repeat this exercise many times, 95% of the time the difference in mean weight between the two penguin colonies would be between 0.59 and 1.83 Kg. There are two important things to notice here: firstly the 95% confidence intervals for this difference do not overlap zero. This tells us that there is a less than 5% chance that the true value is zero or close to it, and this means that we have a *statistically significant* difference between means. The second, arguably even more important thing to think about is that we have quantified the *effect size* --- the difference in mean weights between the two colonies --- and we have quantified how certain we are in of the accuracy of that number. That's really important in all sorts of areas of science. In the example above we calculated the confidence intervals of the difference between the means from first principles, but fortunately we don't have to do this every time we wish to know this. The `t.test()` function in R carries out a t-test for the statistical significance of the difference between a pair of means, among other things. If you've not met the t-test yet don't worry, what you need to know for our present purposes is that by default it will give the 95% confidence intervals for the difference between two means. Here are our chinstrap penguin means compared using `t.test()`. I'm using the argument `var.equal = TRUE` because otherwise R will assume that we are dealing with means from populations with differing variances and will adjust the output accordingly. ```{r echo = FALSE} penguins1 <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) penguins2 <- c(3.5, 4.7, 3.7, 4.4, 3.3, 4.8, 3.7, 3.7, 3.1, 3.2, 3.4, 3.7, 3.9, 5.2, 6.3, 3.6, 4.1) ``` ```{r} t.test(penguins1, penguins2, var.equal = TRUE) ``` The "95 percent confidence interval:" in this output gives the 95% confidence intervals for the difference between the means, and you can see that the numbers given here are the same as the numbers we calculated above. As a final exercise, here are some more penguin weights from a third colony. 4.0, 2.9, 4.4, 4.8, 6.5, 3.0, 2.2, 7.4 What is the mean weight of these penguins? ```{r} penguins3 <- c(4.0, 2.9, 4.4, 4.8, 6.5, 3.0, 2.2, 7.4) mean(penguins3) ``` The mean of these weights is 4.4Kg, which is rather low compared to the mean weight of the penguins from the first colony, which is 5.2Kg. How confident are we that the difference we are seeing likely reflects a true difference between the population means, rather than just arising from sampling error? Use the `t.test()` function to compare the mean weight of these animals with the mean weight of the animals from your third colony. Don't worry about the rest of the output, just look at the 95% CIs for the difference between means. Don't forget to set var.equal = TRUE. ```{r CIdiff2-setup, echo = FALSE} penguins1 <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) penguins3 <- c(4.0, 2.9, 4.4, 4.8, 6.5, 3.0, 2.2, 7.4) ``` ```{r CIdiff2, exercise = TRUE} ``` ```{r CIdiff2-hint-1} # Use t.test() with the names of both # vectors as arguments and the var.equal # argument set to TRUE ``` ```{r CIdiff2-hint-2} # Use t.test() with the names of both #vectors as arguments and the var.equal # argument set to TRUE t.test(penguins1, penguins3, var.equal = TRUE) ``` What you can see is that in this case the confidence intervals for the difference overlap zero: the lower value is negative and the upper value is positive. It's quite plausible that the actual value of the difference between the population means is zero or close to zero, so we have little confidence that the apparent difference between these two colonies isn't simply a consequence of sampling error. That's also relfected in the p-value which we obtain for the t-test and which is greater than 0.05 but that is something for another day. ## Final quiz Here's a quiz which should test how well you've understood all of the subjects we've covered in this group of tutorials. All questions have a single correct answer. ```{r echo = FALSE, fig.height = 4, fig.width = 5, fig.cap = "Means and 95% confidence intervals for the sample of 15 penguins from colony 1 and 8 penguins from colony 2"} penguins <- c(6.0, 5.8, 4.5, 3.7, 6.0, 4.3, 5.0, 6.0, 5.8, 3.5, 5.5, 5.7, 6.2, 4.7, 5.7) # Calculate the mean mean1 <- mean(penguins) # Calculate the SE SE1 <- sd(penguins)/sqrt(14) # Calculute the value of t t1 <- qt(0.95, df = 14) # Calculate the CIs lowerCI <- mean1 - SE1 * t1 upperCI <- mean1 + SE1 * t1 penguins2 <- c(4.0, 2.9, 4.4, 4.8, 6.5, 3.0, 2.2, 7.4) # Calculate the mean mean2 <- mean(penguins2) # Calculate the SE SE2 <- sd(penguins2)/sqrt(17) # Calculute the value of t t2 <- qt(0.975, df = 16) # Calculate the CIs lowerCI2 <- mean2 - SE2 * t2 upperCI2 <- mean2 + SE2 * t2 # Dummy variable for x-axis X1 <- c(1,2) # Make vector of the means Y1 <- c(mean1, mean2) # Plot graph with means. No x-axis plot(Y1 ~ X1, xlim = c(0.5, 2.5), ylim = c(3.5, 6), xaxt = "n", pch = 16, cex = 1.5, col = "aquamarine4", ylab = "Weight (Kg)", xlab = "") # Add error bars using arrows arrows(x0 = X1, y0 = c(lowerCI, lowerCI2), x1 = X1, y1 = c(upperCI, upperCI2), code = 3, angle = 90, length = 0.05, lwd = 2, col = "aquamarine4") # Draw in x-axis axis(side = 1, at = c(1,2), labels = c("Colony 1", "Colony 3")) ``` ```{r quiz2, echo = FALSE} quiz( question("The figure above shows the means and 95% confidence intervals for the weights of penguins from the first and third penguin colonies. Why might the confidence interval for colony 3 be wider than that for colony 1?", answer("The sample size for colony 3 is much smaller", correct = TRUE), answer("The mean is closer to zero"), answer("The frequency distribution of weights at colony 3 has strong negative skew"), answer("The standard error for colony 3 is negative"), answer("The difference between the means is not statistically significant") ), question("Which of these is the correct calculation for the standard error of a mean?", answer("Variance divided by the degrees of freedom"), answer("Standard deviation divided by the square root of the sample size", correct = TRUE), answer("Standard deviation divided by the degrees of freedom"), answer("Sum of squared deviations from the mean divided by the degrees of freedom"), answer("qt(0.975, df = 15) * SE") ), question("As the sample size increases, what happens to the standard error?", answer("It becomes a more accurate estimate of the population standard error"), answer("It tends towards the population mean"), answer("It becomes more accurate"), answer("It will tend to decrease", correct = TRUE), answer("It approaches a normal distribution") ), question("If the 95% confidence intervals for the difference between two means are -0.18 and 5.4, and the difference between the means is 2.8, what can you conclude?", answer("Repeating the study with a larger sample size would probably find a significant difference"), answer("The true value of the difference between the population means is zero"), answer("If we were to calculate the confidence intervals for each mean separately they would not overlap"), answer("There is a reasonable probability that the true difference between the population means is close to zero", correct = TRUE), answer("There is a statistically significant effect") ), question("Under what circumstances will the sampling distribution of the mean not tend towards a normal distribution as the number of samples increases?", answer("When sampling from a bimodal distribution"), answer("When the standard deviation of the population is high"), answer("When the population being sampled has strong positive skew"), answer("When the number of individuals sampled for each mean is small"), answer("None of the above: it will always tend towards a normal distribution", correct = TRUE) ) ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/08_CIs_comparing_two_means/CIs_for_the_difference_between_two_means.Rmd
--- title: "Basic Analysis 1: Paired t-tests" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > The paired t-test is a simple statistical hypothesis test carried out on paired data, where there are two measurements made per individual. In this tutorial we use the paired t-test as an example to illustrate the basic principles of statistical hypothesis tests, and you will learn how to do a paired t-test in R and how to interpret the results --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) library(ggplot2) library(dplyr) library(patchwork) library(cowplot) graveyard <- c(1:6, 1:6) treatment <- c("control", "shaded") %>% rep(., each = 6) %>% as.factor() ants <- c(57, 9, 3, 27, 2, 11, 34, 7, 3, 9, 0, 0) fruiting <- c(19, 3, 1, 12,1,5,8,2,0,6,0,0) ant_data <- data.frame(graveyard, treatment, ants, fruiting) ``` ## Paired t-tests explained The paired t-test is a statistical *hypothesis test* which allows us to test for effects when data are *paired*: when measurements are taken before and then after some intervention or treatment, for example, or when pairs of individuals are used and one is given one treatment and one used as a control. The following video explains the basic concept behind a statistical test and how the paired t-test works. ![](<https://youtu.be/62ygJ3OUle4>) ## Exercise: zombie ant graveyards and the problem of sampling error Fungal parasites of insects are fascinating to biologists for many reasons, one of which is their tendency to control the behaviour of infected hosts. These fungi tend to kill those insects unfortunate enough to become infected, and they then produce infectious spores from the infected cadaver which are released into the environment. In many cases the dying insect's behaviour is completely changed in ways that increase the probability that these infectious spores will contact a new host and transmit the infection. One of the best known examples of such behavioural manipulation comes from a genus of fungus called *Ophiocordyceps*, which infects ants in tropical forests. The dying, infected ants (so-called "zombie ants") climb up vegetation and then bite onto a leaf or a stalk. This means that the sporulating cadaver is firmly attached to the vegetation, and the height gained means that the infectious spores will rain down onto the ants foraging on the forest floor. ![](images/Ophiocordyceps.png){width=500} **Figure 1: *Ophiocordyceps* infected ants**. a) Recently dead infected ant (*Camponotus rufipes*) holding onto vegetation with its mandibles. b) Ant approximately a week after death with mandibles clasping a leaf. The structure emerging from behind its head (labelled *st* for *stroma*) is the developing fungal fruiting body. c) Ant between one and two weeks after death with a fully developed fruiting body labeled *as* for *ascoma*. Image from Andriolli, F.S., Ishikawa, N.K., Vargas-Isla, R., Cabral, T.S., de Bekker, C. & Baccaro, F.B. (2019) Do zombie ant fungi turn their hosts into light seekers? Behavioral ecology 30, 609--616. How this behavioural manipulation works is still the subject of active research. One question that a group of scientists led by Fernando Andriolli^1^ investigated was the role of light: are the infected ants in fact moving towards the light when they climb upwards in the vegatation? To test this idea they identified a number of areas where infected ants are commonly found (known as ant "graveyards") and they shaded half of each graveyard with creens which reduced the amouont of light. Each graveyard was monitored weekly for the appearance of new infected ants in both the shaded and unshaded areas for a period of six months. Whether the ants had fungal fruiting bodies present was also recorded. Here are the counts of infected ants and ants with fruiting bodies from each graveyard. ```{r} print(ant_data) ``` Let's visualise these data to make it easier to spot any patterns. NB these figures are generated using a package called ggplot2, and another called cowplot to do the layout. The code is reproduced at the end of the tutorial if you are interested but it's not important right now. ```{r echo = FALSE, fig.width = 5, fig.height = 4} p1 <- ggplot(data = ant_data, aes(x = treatment, y = ants)) + geom_line(aes(x = treatment, y = ants, group = as.factor(graveyard)), colour = "grey70") + geom_point(aes(colour = treatment), size = 3) + labs(x = "Treatment", y = "Number of infected ants") + theme_bw() + theme(legend.position="none") p2 <- ggplot(data = ant_data, aes(x = treatment, y = fruiting)) + geom_line(aes(x = treatment, y = fruiting, group = as.factor(graveyard)), colour = "grey70") + geom_point(aes(colour = treatment), size = 3) + geom_point(data = ant_data[c(5,11),], aes(x = treatment, y = fruiting, colour = treatment), size = 5) + labs(x = "Treatment", y = "Number of ants with fruiting bodies") + theme_bw() + theme(legend.position="none") plot_grid(p1, p2, labels = c("A", "B")) ``` **Figure 1.** Numbers of infected ants (A) and numbers of cadavers with fruiting bodies (B) found in the shaded and control areas of the six ant "graveyards". Lines connect values from the same graveyard. NB Larger data points indicate multiple data points plotted in the same place. As you can see there is a lot of variability between the different graveyards, and also between the control and shaded locations. Just looking at the data, we can't be confident of whether there is a difference between the control and shaded parts of each graveyard: there's certainly a tendency for the shaded parts to have fewer ants and also fewer ants with fruiting bodies than the control parts but it's difficult to be confident that this pattern has not arisen because of chance events during sampling. ### Paired t-test for the ant data To get some more insight into how confident we can be, we need to carry out a paired t-test. To emphasise where these numbers come from, we'll start by calculating our test statistics from first principles, for the ant data only. As a reminder, the way that a statistical hypothesis test works is that we first calculate the *effect*, which in this case is the mean of the differences between the control and shaded plots ($\bar{x}$). We then transform the effect so that it can be compared with the distribution that we would expect to see if the null hypothesis (in this case, H~0~ is that there is no difference between the control and shaded areas) were true. To do this for a paired sample t-test we divide by the *standard error*, which is the standard deviation ($s$) divided by the square root of the sample size ($n$). This gives us the formula for our test statistic, *t*: $$ t = \frac{\bar{x}}{s / \sqrt{n}}. $$ Let's calculate *t*. Remember the function to calculate a mean is `mean()` and for standard deviation it's `sd()`. Here's a code framework: see if you can fill in the parts marked with X to produce our test statistic. ```{r t_calc, exercise = TRUE, exercise.lines = 15} # Calculate differences diffs <- ant_data$ants[ant_data$treatment == "control"] - ant_data$ants[ant_data$treatment == "shaded"] # The mean of the differences mean_diffs <- mean(X) # The variance of the differences sd_diffs <- X # Now calculate t X/(X/sqrt(X)) ``` ```{r t_calc-solution} # Calculate differences diffs <- ant_data$ants[ant_data$treatment == "control"] - ant_data$ants[ant_data$treatment == "shaded"] # The mean of the differences mean_diffs <- mean(diffs) # The variance of the differences sd_diffs <- sd(diffs) # Now calculate t mean_diffs/(sd_diffs/sqrt(6)) ``` Now that we know that our value of *t* is 2.385, we can ask how likely we would be to observe this value, or a greater value, if the null hypothesis were true. This is because we know the *probability distribution* of *t* given the null hypothesis. We can calculate this using the `dt()` function, plot it and then add in our value of *t*. Again, the code for this figure is reproduced at the end of the tutorial if you want to know how this figure was generated. ```{r echo = FALSE} # Set up dummy x-variable X1 <- seq(-4, 4, length.out = 200) # Calculate probability density of t for each value Y1 <- dt(X1, df = 5) # Draw curve plot(Y1 ~ X1, type = "l", xlab = "", ylab = "Probability density of t") # Add arrow and text indicating the location of our test statistic arrows(x0 = 2.385, y0 = 0.15, x1 = 2.385, y1 = 0.05, length = 0.1) text(x = 2.385, y = 0.175, labels = "Calculated \nvalue of t", adj = 0.5) # Draw in areas showing the upper and lower 2.5% quartiles x0 <- min(which(X1 >= qt(0.001, 5))) x1 <- min(which(X1 >= qt(0.025, 5))) x4 <- max(which(X1 <= qt(0.975, 5))) x5 <- max(which(X1 <= qt(0.999, 5))) polygon( x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#9ecae1", border = NA ) polygon( x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#9ecae1", border = NA ) ``` **Figure 2.** *t*-distribution on 5 degrees of freedom. The blue shaded areas indicate the area of the graph within which we would expect 5% of the values to be found: in other words, if we selected a value at random from the t-distribution it would be found in one of the shaded areas 5% of the time. As you can see, our value of *t* is within the area of the curve where we would expect to find a value of t drawn from a random distribution 95% of the time. This suggests that if the null hypothesis were true, the probability of finding an effect as large as, or larger than, our observed one, is somewhat higher than 0.05. We can check this by calculating the exact probability of observing this using the `pt()` function. The R code to do this is a little obscure, so here it is. You just need to add the t-value you've calculated where it says `X1` and the degrees of freedom (n-1) where it says `X2`. ```{r t_pvalue, exercise = TRUE} 2 * pt(X1, df = X2, lower.tail = FALSE) ``` ```{r t_pvalue-solution} 2 * pt(2.385, df = 5, lower.tail = FALSE) ``` So the p-value for our paired-sample t-test is equal to 0.0627. This is slightly higher than the conventional cut-off for statistical significance of 0.05, telling us that we can have little confidence that the pattern we've observed is not simply a consequence of sampling error. In other words, the difference between the control and shaded plots is not statistically significant. ### Paired t-test in R In R we don't have to go through this process every time we do a t-test. Instead we can use the `t.test()` function. You need to tell it the name of the data (`ant_data$ants`), a tilde `~` and then the name of the factor separating the data into groups (`ant_data$treatment`). You then need another argument which is `paired = TRUE` so that R knows to do a paired t-test. ```{r t_test1, exercise = TRUE} ``` ```{r t_test1-hint-1} # Make sure you have commas between all your arguments # Make sure all your cases match: if you add or remove a # capital letter R gets confused ``` ```{r t_test1-hint-2} # Here's a code framework which you can edit t.test(ant_data$VARIABLE1 ~ ant_data$VARIABLE2, paired = TRUE) ``` ```{r t_test1-hint-3} # Here's the solution t.test(ant_data$ants ~ ant_data$treatment, paired = TRUE) ``` As an aside, for a paired t-test, R assumes that the values to be treated as paired are in numerical order, so the first value for the control treatment is paired with the first value for the shaded treatment and so on. This is the case for our data but if you are doing this on your own data it's a good idea to check that this is indeed correct and if not to re-order your data. This gives us quite a lot of information but if you look on the second row of the main block of output you can see the t-statistic, the df and the p-value, which should be the same as the values you calculated earlier. You also get a confidence interval, which is the confidence interval for the difference between means, and it also gives you the value of the mean difference between the values. You can see that the confidence interval for the difference between the means has a negative lower value and a positive higher value: in other words, our 95% confidence interval includes zero, which is another indicator that the difference between means is not statistically significant. <br><br> <hr> 1. Andriolli, F.S., Ishikawa, N.K., Vargas-Isla, R., Cabral, T.S., de Bekker, C. & Baccaro, F.B. (2019) Do zombie ant fungi turn their hosts into light seekers? Behavioral ecology 30, 609--616. ## Paired t-test for the fruiting bodies Now that we've worked through one example from first principles and then seen how to do it much quicker in R using `t.test()`, let's look at the other response variable in the dataset, namely the numbers of ant cadavers in each area that developed fruiting bodies of the parastic fungus. These data can be seen in the left-hand panel of figure 1 on the previous page. Have a look and compare them to the data on the number of infected ants. Do you think there is more of a difference between the control and shaded treatments, or less of a difference, or is it difficult to tell? Let's do the same type of statistical test that we used before, but this time testing whether the mean difference in the numbers of ants with fruiting bodies is statistically significant. See if you can do a paired sample t-test on these data using `t.test()`, and then try to answer the questions. Remember that the variable for the count of cadavers with fruiting bodies in the `ant_data` data frame is called `fruiting`. ```{r t_test2, exercise = TRUE} ``` ```{r t_test2-hint-1} # You can just use the code from the last t-test # and replace the "ants" variable with the "fruiting" one ``` ```{r t_test2-hint-2} # Make sure you have commas between all your arguments # Make sure all your cases match: if you add or remove a # capital letter R gets confused ``` ```{r t_test2-hint-3} # Here's a code framework which you can edit t.test(ant_data$VARIABLE1 ~ ant_data$VARIABLE2, paired = TRUE) ``` ```{r t_test2-hint-4} # Here's the solution t.test(ant_data$fruiting ~ ant_data$treatment, paired = TRUE) ``` ```{r quiz1, echo=FALSE} quiz(caption = "Paired t-tests quiz", question("Which of the following statements are correct? More than one answer can be correct.", answer("The difference between the means is not statistically significant at the p<0.05 level", correct = TRUE), answer("The difference between the means is statistically significant at the p<0.05 level", message = "Answer 2. Because the calculated p-value is greater than 0.05 we reject the null hypothesis and accept the alternative. Conventionally this means that we have a statistically significant difference"), answer("Because p>0.05 we are unable to reject the null hypothesis", correct = TRUE), answer("Because p<0.05 we cautiously reject the null hypothesis and reject the alternative"), answer("A statistically significant result means that we can be sure that there is a real effect", message = "Answer 5. A statistically significant result tells you nothing about how 'real' an effect might be. It only tells you the probability of observing your data if the null hypothesis were correct"), answer("A statistically non-significant result would mean that the means were the same", message = "Answer 6. A statistically non-significant result only tells us that we have little confidence that any observed difference is not a consequence of sampling error. It definietly does not mean that we can conclude that two means are the same.") ), question("The p-values for both test are close to 0.05 and if you look at the data there certainly seems to be a pattern. What should we do? More than one answer can be correct.", answer("We should do a few more replicates of the experiment and reanalyse with a larger sample size. If the result remains non-significant we carry on generating more data until the result is significant", message = "Answer 1. No. Adding extra data until you get a significant result is generally considered to be extremely poor practice."), answer("We can conclude that there is some evidence for an effect of shading but we are not confident in this result", correct = TRUE), answer("We should examine our data very carefully and look for any possible outliers or subgroups that we can remove to try to push the p-value into significance", message = "Answer 3. No. This is what we call *p-hacking* and is widely regarded as malpractice"), answer("If we wanted to get a clearer answer to our question we should repeat the experiment with a larger sample size and analyse the new results separately to the old results", correct = TRUE) ), question("Regarding the 95% confidence interval in the t-test results, which of the following are correct? More than one answer can be correct.", answer("The lower boundary is negative and the upper boundary is positive, meaning that the 95% confidence interval does include zero. This is further confirmation that we can have little confidence that the mean difference is unlikely to have arisen by sampling error", correct = TRUE), answer("Because the upper and lower boundaries are both a long way from zero we can have a lot of confidence that this result is unlikely to have arisen by sampling error", message = "Answer 2. In fact the lower 95% boundary is below zero, which is confirmation that this apparent effectis not especially strongly supported."), answer("The size of the 95% confidence interval indicates that there is a lot of variance in our data", message = "Answer 3. Confidence intervals are determined by both the amount of variance in the data and also the sample size, so by themselves they do not tell you anything useful about the variance."), answer("The 95% confidence interval tells us the region within which the true value of the difference between the means would lie 95% of the time if we were to do the experiment many times", correct = TRUE), answer("If the confidence intervals were smaller we would have a better estimate of the true value of the difference between means", correct = TRUE) ) ) ``` ## One-tailed versus two tailed tests If you were to look in the paper by Andriolli *et al.* which originally reported these data, you would find that their p-values are rather different from the ones we've calculated above. Whereas our analysis found that neither the overall number of ants (paired sample t-test, t = 2.39, 5df, p = 0.062) nor the number of ant cadavers with fruiting bodies (t = 2.54, 5df, p=0.052) differed significantly between the control and shaded areas, the paper by Andriolli et al reported that both were statistically significant with p-values of 0.031 and 0.026 respectively. How come? Here's the plot of the probability density of *t* with the test statistic indicated and the areas within which we would expect to find the most extreme 5% of values. ```{r echo = FALSE} # Set up dummy x-variable X1 <- seq(-4, 4, length.out = 200) # Calculate probability density of t for each value Y1 <- dt(X1, df = 5) # Draw curve plot(Y1 ~ X1, type = "l", xlab = "", ylab = "Probability density of t") # Add arrow and text indicating the location of our test statistic arrows(x0 = 2.385, y0 = 0.15, x1 = 2.385, y1 = 0.05, length = 0.1) text(x = 2.385, y = 0.185, labels = "Calculated value \nof t for the first \npaired t-test", adj = 0.5, cex = 0.8) # Draw in areas showing the upper and lower 2.5% quartiles x0 <- min(which(X1 >= qt(0.001, 5))) x1 <- min(which(X1 >= qt(0.025, 5))) x4 <- max(which(X1 <= qt(0.975, 5))) x5 <- max(which(X1 <= qt(0.999, 5))) polygon( x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#9ecae1", border = NA ) polygon( x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#9ecae1", border = NA ) ``` **Figure 3.** Probability density for the *t* distribution on 5 degrees of freedom. Blue shaded areas indicate the region within which the most extreme 5% of the values lie. The arrow indicates the value of *t* calculated for the first t-test, carried out on the data for the total number of ant cadavers found in the shaded and control areas. This value (2.385) is outside the area containing the most extreme 5% of values, indicating a non-significant result. You can see that the areas highlighted correspond to both extreme positive and extreme negative values: in other words, this is a *two-tailed* test and we would record a significant result if our value of *t* had a sufficiently high positive value or a sufficiently low negative value. In the case of a t-test this means that we are not implying any directionality in our predictions, so we would record a significant result whether the effect was that group A had a higher mean than group B or a lower mean, so long as the difference was large enough. What if we had a strong *a priori* hypothesis that an effect should be directional, so our prediction would be (for a t-test) that one group should have a *larger* mean than the other? In that case, for the conventional threshold of statistical significance of p=0.05 we could only consider the area under the curve containing the largest 5% of our values of *t*, as shown here. ```{r echo = FALSE} # Set up dummy x-variable X1 <- seq(-4, 4, length.out = 200) # Calculate probability density of t for each value Y1 <- dt(X1, df = 5) # Draw curve plot(Y1 ~ X1, type = "l", xlab = "", ylab = "Probability density of t") # Add arrow and text indicating the location of our test statistic arrows(x0 = 2.385, y0 = 0.15, x1 = 2.385, y1 = 0.05, length = 0.1) text(x = 2.385, y = 0.185, labels = "Calculated value \nof t for the first \npaired t-test", adj = 0.5, cex = 0.8) # Draw in areas showing the upper and lower 2.5% quartiles x0 <- min(which(X1 >= qt(0.001, 5))) x1 <- min(which(X1 >= qt(0.025, 5))) x4 <- max(which(X1 <= qt(0.975, 5))) x5 <- max(which(X1 <= qt(0.999, 5))) x6 <- max(which(X1 <= qt(0.95, 5))) polygon( x = c(X1[c(x6, x6:x5, x5)]), y = c(0, Y1[x6:x5], 0), col = "#9ecae1", border = NA ) # polygon( # x = c(X1[c(x0, x0:x1, x1)]), # y = c(0, Y1[x0:x1], 0), # col = "#9ecae1", # border = NA # ) # # polygon( # x = c(X1[c(x4, x4:x5, x5)]), # y = c(0, Y1[x4:x5], 0), # col = "#9ecae1", # border = NA # ) ``` **Figure 4.** Probability density for the *t* distribution on 5 degrees of freedom. The shaded area indicates the area containing the largest 5% of values. Note that the value of *t* calculated for the t-test on the total number of ant cadavers (2.385) is now within this region. This is an example of what we would call a *one-tailed test* because we are *only* considering one tail of the t-distribution. Is this what accounts for the difference between our results and the published results? Here is the code for the first t-test, on the total number of ant cadavers. Try to modify it to do a one-tailed test. We can do a one-tailed test by adding the argument `alternative = "greater"` to our `t.test()` function call (for the hypothesis that one mean is *less* than another we would use `alternative = less`). ```{r one_tailed1, exercise = TRUE} t.test(ant_data$ants ~ ant_data$treatment, paired = TRUE) ``` ```{r one_tailed1-solution} t.test(ant_data$ants ~ ant_data$treatment, paired = TRUE, alternative = "greater") ``` Now do the same for the second t-test, on the numbers of cadavers with fruiting bodies. ```{r one_tailed2, exercise = TRUE} t.test(ant_data$fruiting ~ ant_data$treatment, paired = TRUE) ``` ```{r one_tailed2-solution} t.test(ant_data$fruiting ~ ant_data$treatment, paired = TRUE, alternative = "greater") ``` Now we have replicated the analysis from the paper and the p-values are the same. Note that the values of *t* and the degrees of freedom are unchanged from our previous analyses: it is only the p-values and the confidence intervals for the difference that have changed. Were the authors justified in using a one-tailed test in this study? You can make your own mind up on this matter. Most researchers are very cautious about using one tailed tests because they are less conservative (i.e. more likely to give a significant result) than two-tailed tests, and because of a general feeling in the community that people only use one-tailed tests to get significant results when their two-tailed tests don't. As a rule of thumb, you should only use a one-tailed test when your hypothesis has a strong directional element and when your decision to use a one-tailed test was made *before analysing your data*. Ideally you should have some written record of this decision in case you are challenged on the matter later. <br><br> <hr> #### Code for figure 1. ```{r eval = FALSE} # Load the two packages library(ggplot2) library(cowplot) # Generate the first plot p1 <- ggplot(data = ant_data, aes(x = treatment, y = ants)) + geom_line(aes(x = treatment, y = ants, group = as.factor(graveyard)), colour = "grey70") + geom_point(aes(colour = treatment), size = 3) + labs(x = "Treatment", y = "Number of infected ants") + theme_bw() + theme(legend.position="none") # Generate the second plot p2 <- ggplot(data = ant_data, aes(x = treatment, y = fruiting)) + geom_line(aes(x = treatment, y = fruiting, group = as.factor(graveyard)), colour = "grey70") + geom_point(aes(colour = treatment), size = 3) + labs(x = "Treatment", y = "Number of ants with fruiting bodies") + theme_bw() + theme(legend.position="none") # Specify the layout and the labels plot_grid(p1, p2, labels = c("A", "B")) ``` #### Code for figure 2 ```{r eval = FALSE} # Set up dummy x-variable X1 <- seq(-4, 4, length.out = 200) # Calculate probability density of t for each value Y1 <- dt(X1, df = 5) # Draw curve plot(Y1 ~ X1, type = "l", xlab = "", ylab = "Probability density of t") # Add arrow and text indicating the location of our test statistic arrows(x0 = 2.385, y0 = 0.15, x1 = 2.385, y1 = 0.05, length = 0.1) text(x = 2.385, y = 0.175, labels = "Calculated \nvalue of t", adj = 0.5) # Draw in areas showing the upper and lower 2.5% quartiles x0 <- min(which(X1 >= qt(0.001, 5))) x1 <- min(which(X1 >= qt(0.025, 5))) x4 <- max(which(X1 <= qt(0.975, 5))) x5 <- max(which(X1 <= qt(0.999, 5))) polygon( x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#9ecae1", border = NA ) polygon( x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#9ecae1", border = NA ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/09_Paired_sample_t_tests/Basic_Analysis_1_Paired_sample_t_tests.Rmd
--- title: "Basic Analysis 2: Two-sample t-tests" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > The two-sample t-test is the "classic" t-test which allows us to compare the means between two groups of observations. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) load("weaver.rda") weaver$Treatment <- as.factor(weaver$Treatment) weaver_small <- subset(weaver, weaver$GrpSize <= 4) library(ggplot2) library(dplyr) ``` ## Two sample t-tests explained In the previous tutorial we looked at the paired-sample t-test. This is a test carried out when we are comparing *paired* observations and the null hypothesis is that the mean difference between pairs of observations is zero. In the more common case of unpaired data we use the two-sample t-test, with the null hypothesis being that the difference between the means for the two groups is equal to zero. This video explains how the two-sample t-test works and how it differs from the paired-sample t-test. ![](https://youtu.be/YUDo_8hNdCA) ## Costs of reproduction in a cooperative breeder Reproduction is costly to the individuals reproducing: this is widely accepted in biology and, of course, obvious to anyone who has ever had children. The nature of these costs is not always clear, however --- in addition to straightforward costs in terms of energy and resources used there might be other costs incurred because of the degree of stress that reproducing organisms often experience. One aspect of this is potential oxidative damage caused when reactive oxygen species damage proteins, lipids and DNA. Normally these reactive oxygen species are kept at low levels by the body's built in system of antioxidant defences, but during periods of stress this system might function less efficiently leading to increased oxidative damage during these periods. Data on the role of oxidative stress during breeding is rare, especially from field systems, but in 2015 Cram and co-authors^1^ published a study of the white-browed sparrow weaver, *Plocepasser mahali* which addressed this. White-browed sparrow weavers are cooperatively breeding Southern African birds which live in groups of 2-12 birds. One pair in each group is the dominant pair which monopolises breeding, and the other birds provide assistance by helping provision the young as well as assisting with other tasks. ![](images/white_browed_sparrow_weaver.jpg){width="400"} **Figure 1** White browed sparrow weaver. Photo copyright Peter Steward, released under an [Attribution Non-Commercial 2.0 Creative Commons license](https://creativecommons.org/licenses/by-nc/2.0/). Cram *et al.* carried out an experiment where some nests had eggs removed, meaning that the costs of reproduction were reduced for the birds in the group associated with that nest, and other control nests were left as they were. The birds were caught and a variety of measures taken during the breeding season, including weight, the concentration of malondialdehyde (MDA, a product of lipid peroxidation and an indicator of oxidative stress) and superoxide dismutase activity (SOD, an enzyme which is important in antioxidant defence). The idea that they wished to test was that birds rearing young in small groups (four birds or fewer) should pay more costs than birds rearing young in large groups (more than four birds) and that this would be reflected in the response to removing the clutch from the nest: in small groups the effect of egg removal, which removes the costs associated with rearing young, would be stronger than in large groups. The actual analysis carried out by Cram *et al.* involved a complex model fitting process which took account of some non-independence in the data, but here we will focus on using two-sample t-tests to compare responses from birds in small or large groups to clutch removal. Let's start with the weights of the birds. The dataframe is loaded as `weaver` and we need to declare the `Treatment` variable to be a factor: ```{r eval = FALSE} weaver$Treatment <- as.factor(weaver$Treatment) ``` As usual when we import a data set we use `str()` to have a look at its structure and make sure that everything's as it should be. ```{r} str(weaver) ``` We have our response variables: `mass_final`, `MDA_final` and `SOD_final`, plus the ID of the individual bird, the treatment and the group size. We'll analyse the mass data to start with. Before we go any further with our analysis we should plot a graph of mass versus treatment for our large and our small groups. Since our sample size is small we'll use a strip chart rather than a boxplot, which we can do with the `stripchart()` function. Here, the code is all there for the first stripchart which is for birds from groups of four of fewer animals. See if you can add in a similar block of code that will draw the equivalent plot but for birds from large groups with more than four animals. ```{r stripchart1, exercise = TRUE, exercise.lines = 28, fig.width = 6} # Plot two graphs side by side par(mfrow = c(1,2)) # stripchart for birds from small groups stripchart( weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "Final bird mass (g)", main = "Small groups" ) # stripchart for birds from large groups ``` ```{r stripchart1-hint-1} # Use the code for the first stripchart. # You only need to change the group size # and the title ``` ```{r stripchart1-hint-2} # Here is the solution # Plot two graphs side by side par(mfrow = c(1,2)) # stripchart for birds from small groups stripchart( weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "Final bird mass (g)", main = "Small groups" ) # stripchart for birds from large groups stripchart( weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize >4, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "Final bird mass (g)", main = "Large groups" ) ``` What we're looking for here is firstly any sort of pattern in the data that might indicate some sort of effect, and secondly any indication of potential problems with the data such as data points with unlikely, anomalous or impossible values such as birds with negative weight or which weighed so much that they couldn't fly; obvious and severe skew or other weird frequency distributions and so on. There aren't any immediately obvious problems like this with these data: the values are all believeable and there's no skew or similar that can be seen. In terms of patterns in the data, you can see that there seems to be an effect of clutch removal for the small group birds but not the large group birds, such that birds from groups with clutches rmoved are all heavier in small groups but not in large groups. We can't have much confidence in this pattern at the moment, however, because we cannot dismiss the idea that this pattern could have arisen by sampling error. The small sample size makes this uncertainty greater. <br><br><hr> 1. Cram, D.L., Blount, J.D. & Young, A.J. (2015) The oxidative costs of reproduction are group-size dependent in a wild cooperative breeder. Proceedings of the Royal Society B: Biological sciences, 282: 20152031. ## t-test for the weight data in small groups What we can do is a statistical test to ask whether the differences between the mean bird masses for the two groups are significantly different for both birds from small groups and birds from large groups. We know that this *two-sample t-test* works in a similar way to the *paired sample t-test* which we looked at in the last tutorial, but this time we are askign whether the *difference between the means* is sufficiently far from zero as to be considered unlikely, rather than the *mean of the differences* which is what we look at in a paired sample test. The formula to calculate our test statistic, *t* for a two sample t-test with equal or unequal sample sizes is: $$ t = \frac{\bar{x}_1 - \bar{x}_2}{s_p \sqrt{\frac{1}{n_1} + \frac{1}{n_2}}},$$ where $\bar{x}_1$ and $\bar{x}_2$ are the means of the two groups. $s_p$ is something called the "pooled standard deviation". Its calculation is a bit intimidating but conceptually it's reasonably straightforward --- it is the average standard deviation for all groups in the dataset, with the individual groups *weighted* by their sizes so that larger groups have more influence. To put it another way, you might recall that you can think of the standard deviation as the average distance of all the data in a sample from the mean. The pooled standard deviation is the average distance of all the data in a grouped sample from the group mean. This is the formula for $s_p$ when there are two groups. Don't worry if you don't like the look of this equation, you don't need to know it to understand the t-test. $$s_p = \sqrt{\frac{\left(n_1 - 1 \right)s^{2}_1 + \left(n_2 - 1 \right)s^{2}_2}{n_1 + n_2 -2}},$$ where $s^{2}_1$ and $s^{2}_2$ are the variances of groups 1 and 2, and $n_1$ and $n_2$ are their respective sample sizes. Here is a code framework that should calculate *t* for you if you fill in the missing parts denoted by XXs. Notice that we're using *subscripts* to specify only the data that correspond to certain factor levels in *Treatment* --- if this is something you've forgotten you might need to revise this from your introductory R work. Some things you might want to know: * The two levels of the `Treatment` factor are `Eggs_removed` and `Eggs_left` * `mean()` calculates the mean of a vector * `var()` calculates the variance * `sqrt()` calculates the square root and * `length()` returns the number of items in the vector: this is a way of getting the sample size for a group. ```{r t_test1, exercise = TRUE, exercise.lines = 20} # Use subset to create a new data frame with only the birds from small groups weaver_small <- subset(weaver, weaver$GrpSize <= 4) # Calculate the means mean1 <- mean(weaver_small$mass_final[weaver_small$Treatment == "Eggs_removed"]) mean2 <- mean(weaver_small$mass_final[weaver_small$Treatment == "XXXX"]) # Calculate the two variances var1 <- var(weaver_small$mass_final[weaver_small$Treatment == "XXXXX"]) var2 <- XXX(weaver_small$mass_final[weaver_small$Treatment == "Eggs_left"]) # Calculate the sample sizes n1 <- length(XXXX) n2 <- XXXXX(weaver_small$mass_final[weaver_small$Treatment == "Eggs_left"]) # Calculate the pooled standard deviation sp <- sqrt(((n1-1)*var1 + (n2-1)*var2)/(n1 + n2 -2)) # Calcaulate t t_calc <- (XXXXX - XXXXX)/(sp + sqrt(1/n1 + XXXXX/XXXXX)) # Output cat("The value of t is ", t_calc) ``` ```{r t_test1-solution} # Use subset to create a new data frame with only the birds from small groups weaver_small <- subset(weaver, weaver$GrpSize <= 4) # Calculate the means mean1 <- mean(weaver_small$mass_final[weaver_small$Treatment == "Eggs_removed"]) mean2 <- mean(weaver_small$mass_final[weaver_small$Treatment == "Eggs_left"]) # Calculate the two variances var1 <- var(weaver_small$mass_final[weaver_small$Treatment == "Eggs_removed"]) var2 <- var(weaver_small$mass_final[weaver_small$Treatment == "Eggs_left"]) # Calculate the sample sizes n1 <- length(weaver_small$mass_final[weaver_small$Treatment == "Eggs_removed"]) n2 <- length(weaver_small$mass_final[weaver_small$Treatment == "Eggs_left"]) # Calculate the pooled standard deviation sp <- sqrt(((n1-1)*var1 + (n2-1)*var2)/(n1 + n2 -2)) # Calculate t t_calc <- (mean1 - mean2)/(sp * sqrt(1/n1 + 1/n2)) # Output cat("The value of t is ", t_calc) ``` If the null hypothesis, which in this case is that both groups are drawn from populations with equal means, were true, we would expect this value to follow a t-distribution on n~1~ -1 + n~2~ -1 degrees of freedom. n~1~ is 4 and n~2~ is five, so the number of degrees of freedom is seven. As we did with the paired-sample t-test, we can plot out the probability distribution of *t* on 7 df and see how likely we might be to get a value as big as (or bigger than) our calculated value. ```{r echo = FALSE} # Set up dummy x-variable X1 <- seq(-5, 5, length.out = 200) # Calculate probability density of t for each value Y1 <- dt(X1, df = 7) # Draw curve plot(Y1 ~ X1, type = "l", xlab = "", ylab = "Probability density of t") # Add arrow and text indicating the location of our test statistic arrows(x0 = 3.42, y0 = 0.1, x1 = 3.42, y1 = 0.02, length = 0.1) text(x = 3.42, y = 0.125, labels = "Calculated \nvalue of t", adj = 0.5) # Draw in areas showing the upper and lower 2.5% quartiles x0 <- min(which(X1 >= qt(0.0001, 7))) x1 <- min(which(X1 >= qt(0.025, 7))) x4 <- max(which(X1 <= qt(0.975, 7))) x5 <- max(which(X1 <= qt(0.9999, 7))) polygon( x = c(X1[c(x0, x0:x1, x1)]), y = c(0, Y1[x0:x1], 0), col = "#9ecae1", border = NA ) polygon( x = c(X1[c(x4, x4:x5, x5)]), y = c(0, Y1[x4:x5], 0), col = "#9ecae1", border = NA ) ``` **Figure 2.** *t*-distribution on 7 degrees of freedom. The blue shaded areas indicate the area of the graph within which we would expect 5% of the values to be found: in other words, if we selected a value at random from the t-distribution it would be found in one of the shaded areas 5% of the time. Here, you can see that our calculated value is well within the region of the curve where we would only expect a value of *t* to fall 5% of the time if the null hypothesis were true. This means that if the null hypothesis were true we would only see a difference between the two means as large as, or larger than, the one we got somewhat less than 5% of the time. We can get an exact p-value by asking R: ```{r t_pvalue-solution} 2 * pt(3.42, df = 7, lower.tail = FALSE) ``` What does this p-value mean? ```{r quiz1, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("The difference between the means is not statistically significant", message = "Answer 1. Because the calculated p-value is less than 0.05 we reject the null hypothesis and accept the alternative. Conventionally this means that we have a statistically significant difference"), answer("The difference between the means is statistically significant",correct = TRUE), answer("Because p>0.05 we are unable to reject the null hypothesis"), answer("Because p<0.05 we cautiously reject the null hypothesis and accept the alternative", correct = TRUE), answer("A statistically significant result means that we can be sure that the birds that were weighed from the egg removal treatments weighed more than the others", message = "Answer 5. The birds from the egg removal treatment that were weighed all weighed more than the ones form the control group. This would be true no matter what the p-value was. The statistical test is telling us how confident we can be that the pattern we observe has not arisen by simple random chance. "), answer("A statistically non-significant result would mean that there is no effect of removing eggs", message = "Answer 6. A statistically non-significant result only tells us that we have little confidence that any observed difference is not a consequence of sampling error. Rather than telling us that there is no effect it tells us that if there is an effect, we have failed to detect it.") ) ``` ## t-test for the weight data using the `t.test()` function We've seen that we can calculate our test statistic from first principles and calculate a p-value from that, but of course R has a built in function to do t-tests, called, as you might recall from the paired sample tutorial, `t.test()`. Whereas before we used `subset()` to generate a new data frame with only those birds from small groups, when we're using `t.test()` we can include a `subset = ` argument in the function call itself to analyse only a part of a dataset. For a two-sample t-test we can give the *response variable* (in this case `mass_final`) and the *explanatory variable* (`Treatment`) as the first and last components of a formula, separated by a tilde, much as you would for a plot. So our t-test function call looks like this. ```{r} t.test(weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4) ``` So that all agrees with our calculated values then and everything's fine? No.... this doesn't give us the same output. The t-value is different (-3.66 versus 3.42 --- don't worry about the signs which are arbitrary for a two-tailed t-test, it's the diffence in absolute values that's important), the degrees of freedom are different and, in this case, fractional (6.37 versus 7) and the p-value is different (0.0095 versus 0.011). Why the difference? The answer can be found in the first line of the output where it reads "Welch Two Sample t-test". This is the default option for a two sample t-test in R, and it uses an adjustment of the standard t-test which makes allowance for differences between the variances of the two groups. The 'ordinary' Student's t-test only really works well when the two groups being compared have roughly equal variances. The Welch t-test works when the variances differ between groups. The value of *t* is calculated in a slightly different way, and the degrees of freedom are adjusted according to how different the variances are, which accounts for the differences between the values we've seen here. It's been argued that the Welch t-test should be used a lot more ^2^, and some people have argued that it should be used as standard unless there is good reason not to ^3^. If we want to run a basic Student's t-test without adjusting for unequal variances, we can add an argument to our t-test function call to do this. Have a look at the help file for t.test and see if you can work out how to do this. <details><summary>**Click here for more on t-tests without the Welch method**</summary> Hopefully you found out that the argument to add is `var.equal = TRUE`. See if you can adjust our t-test function call to give a standard t-test. This should give the same values for t, df and p as we calculated in the last section. ```{r welch, exercise = TRUE} t.test(weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4) ``` ```{r welch-solution} t.test(weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4, var.equal = TRUE) ``` </details> <br><br> Finally, let's do another t-test to compare body mass between egg-removal and control treatments for birds from groups larger than 4. Remember that with `t.test()` you can use the `subset = ` argument to choose a particular set of observations. We'll use the default option of the Welch t-test which allows us to use unequal variances. ```{r t_test2, exercise = TRUE} ``` ```{r t_test2-solution} # Here is the solution t.test(weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize > 4) ``` Take a look at this output, and try to answer the following questions ```{r quiz2, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("The difference between the means is not statistically significant", correct = TRUE), answer("The difference between the means is statistically significant",message = "Answer 2. Because the calculated p-value is greater than 0.05 we have no reason to reject the null hypothesis and accept the alternative. Conventionally this means that we do not have a statistically significant difference"), answer("Because p>0.05 we are unable to reject the null hypothesis, that both means are drawn from populations with the same mean mass", correct = TRUE), answer("Because p<0.05 we cautiously reject the null hypothesis and accept the alternative, that these means are drawn from populations with different mean masses"), answer("We are reasonably confident that the value for the population difference lies somewhere between -1.45 and 2.99", correct = TRUE) ) ``` <br><br><hr> 2. Ruxton, G.D. (2006) The unequal variance t-test is an underused alternative to Student’s t-test and the Mann–Whitney U test. Behavioral ecology: official journal of the International Society for Behavioral Ecology, 17, 688–690. 3. Delacre, M., Lakens, D. & Leys, C. (2017) Why Psychologists Should by Default Use Welch’s t-test Instead of Student’s t-test. International Review of Social Psychology, 30, 92–101. ## Testing for oxidative stress in the small groups Now we will look at the data on oxidative stress, specifically the malondialdehyde (MDA) levels and superoxide dismutase (SOD) activity. To keep things simple we'll just look at birds from the small groups, so we'll generate a new data frame that just has the data for groups of four birds or fewer (you might recall we did something similar to plot the mass data): ```{r eval = FALSE} weaver_small <- subset(weaver, weaver$GrpSize <=4) ``` As always, we'll start by visualising our data. We'll use a strip chart to visualise the differences between the egg removal and control treatment birds for MDA, starting with MDA. This code was previously used to plot the mass data : see if you can modify it to draw the MDA data from our `weaver_small` data frame. ```{r stripchart_2, exercise = TRUE, exercise.lines = 12} # stripchart for birds from small groups stripchart( weaver$mass_final ~ weaver$Treatment, subset = weaver$GrpSize <= 4, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "Final bird mass (g)", main = "Small groups" ) ``` ```{r stripchart_2-hint-1} # You need to: # 1. Change the name of the data frame from # "weaver" to "weaver_small" throughout # # 2. Get rid of the subset = arguments in the # stripchart function calls # # 3. Change the names of the first variable # in the response ~ explanatory formula that # is at the start of the stripchart function # call # # 4. Change titles and axis labels as appropriate ``` ```{r stripchart_2-hint-2} # Make sure there is a comma between each argument # and that all your brackets match ``` ```{r stripchart_2-hint-3} # Here's the solution # stripchart for MDA stripchart( weaver_small$MDA_final ~ weaver_small$Treatment, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "MDA level", main = "MDA" ) ``` Looking at this plot, we can see that there appears to be something of a difference between the two treatments, with the birds from the egg removal groups having, in general, lower MDA levels than those from the control groups. We can also see that there are no obvious red flags in terms of data points with extreme values or obviously problematic data distributions. Let's do our t-test. We'll use the default Welch t-test. ```{r MDA_t_test, exercise = TRUE} ``` ```{r MDA_t_test-hint-1} # Remember to use data from the weaver_small data frame # The response variable is MDA_final # The explanatory variable is Treatment # You don't need to subset the data or # specify anything else ``` ```{r MDA_t_test-hint-2} # Here is the solution: t.test(weaver_small$MDA_final ~ weaver_small$Treatment) ``` Take a look at this output, and try to answer the following questions ```{r quiz3, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("The difference between the means is not statistically significant", message = "Answer 1. Because the calculated p-value is less than 0.05 we cautiously reject the null hypothesis and accept the alternative. Conventionally this means that we do have a statistically significant difference"), answer("The difference between the means is statistically significant", correct = TRUE), answer("Both upper and lower confidence intervals have the same sign, indicating that we do not have confidence that the population value for the difference between means is not zero", message = "Answer 3. Both upper and lower CIs are positive, so the 95% confidence interval does not include zero, meaning that we can have some confidence that the population value is not zero"), answer("Because the lower confidence interval is very close to zero we have to be very cautious about this result even though the p-value is just below the threshold for significance", correct = TRUE), answer("The effect of removing eggs is that MDA levels are lower by a value that is most probably between 1.71 and 0.007", correct = TRUE) ) ``` Last one is the SOD data. Hopefully by now you should have the hang of this. First of all you need to visualise your data. Here's the code for the MDA plot, see if you can modify it for the SOD data. ```{r stripchart_3, exercise = TRUE, exercise.lines = 10} # stripchart for MDA stripchart( weaver_small$MDA_final ~ weaver_small$Treatment, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "MDA level", main = "MDA" ) ``` ```{r stripchart_3-hint-1} # Change the name of the first variable in the formula # Change axis labels and title as appropriate ``` ```{r stripchart_3-hint-2} # This is the solution # stripchart for SOD stripchart( weaver_small$SOD_final ~ weaver_small$Treatment, vertical = TRUE, pch = 1, col = "darkgreen", ylab = "Superoxide Dismutase activity", main = "SOD" ) ``` Once again, there don't seem to be any problematic data points or obviously difficult distributions. It's hard to see of there's any difference between the two groups, but we can run a t-test which should help us out. ```{r SOD_t_test, exercise = TRUE} ``` ```{r SOD_t_test-hint-1} # You just need to use the same code # as for MDA and replace the first # variable in the formula ``` ```{r SOD_t_test-hint-2} # This is the solution t.test(weaver_small$SOD_final ~ weaver_small$Treatment) ``` If you've got this far you won't be surprised by this final quiz about the SOD t-test output. ```{r quiz4, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("The difference between the means is not statistically significant", correct = TRUE), answer("The difference between the means is statistically significant", message = "Answer 2. Because the calculated p-value is greater than 0.05 we do not have any reason to reject the null hypothesis and accept the alternative. Conventionally this means that we do not have a statistically significant difference"), answer("Both upper and lower confidence interval boundaries have the same sign, indicating that we do not have confidence that the population value for the difference between means is not zero", message = "Answer 3. The lower CI is negative and the upper is positive, so the 95% confidence interval does include zero, meaning that we can have little confidence that the population value is not zero"), answer("Because the upper and lower confidence intervals are roughly symmetrical around zero we can infer that on the basis of these data there is little reason to think there might be an effect", correct = TRUE) ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/10_Two_sample_t_tests/Basic_analysis_2_Two_sample_t_tests.Rmd
--- title: "Basic Analysis 3: Chi-square tests" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Chi-square tests are widely used to test assumptions about the distribution of counts (frequencies) of observations in different categories. Here we learn ho to do the two common types: Chi-square tests with "contingency tables" and Chi-square tests comparing expected and observed frequencies --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) plague <- matrix(data = c(158, 81, 331, 549), nrow = 2) colnames(plague) <- c("Plague present", "Plague absent") rownames(plague) <- c("High diversity", "Low diversity") plague_expected <- matrix(data = c(104.4424, 134.5576, 384.5576, 495.4424), nrow = 2) colnames(plague_expected) <- c("Plague present", "Plague absent") rownames(plague_expected) <- c("High diversity", "Low diversity") ``` ## Chi-square tests explained This video has an explanation of the basic principles behind the chi-square test, illustrated with a reanalysis of a very important dataset. ![](https://youtu.be/-EyswWU8GPs) ## Bubonic plague and rodent diversity in China Many people think that bubonic plague is a disease of the past and not something to worry about today. This is not, in fact, the case: plague still infects people in many parts of the world, including Southern and Central Africa, South America, the USA and South-East Asia. As is commonly known, the causative agent of plague is the bacterium *Yersinia pestis*, which is transmitted to humans when they are bitten by rat fleas which are themselves infected. Rat fleas don't just bite rats, and a number of rodent species are implicated as plague hosts which can lead to transmission to humans, so it's important to undrstand how rodent communities relate to plague incidence in people. As part of a study of plague incidence in China Zhe Sun and a large group of coworkers^1^ assembled a dataset of recorded plague cases from the beginning of the "third pandemic" of bubonic plague in the late 18th Century until the 1960s. They divided China into 1119 1º grid cells and classified each one as either having plague present (239) or absent (880). Each cell was then further classified as having a high-diversity (\> average number of species) or low diversity (\< average number of species) of rodents which were indicated as being likely plague hosts by a separate analysis. This gives us a table of data which looks like this: ```{r} plague <- matrix(data = c(158, 81, 331, 549), nrow = 2) colnames(plague) <- c("Plague present", "Plague absent") rownames(plague) <- c("High diversity", "Low diversity") print(plague) ``` Just looking at the table we can see that the proportion of grid squares with plague recorded as present is higher in the high diversity than the low diversity squares. This suggests that there is some association between rodent species diversity and the presence of plague, but we have little idea of how likely it is that this apparent association could have arisen by random chance. To test the idea that this apparent association is simply a product of sampling error, we can do a chi-square test. To do this, we firstly calculate the expected values for each cell. These are the values that we would predict if there were no association between rodent diversity and plague presence. The expected value for each cell is: $$Expected\: value = \frac{Column\: total \times Row\: total}{Grand\: total}$$ We can calculate these easily and then assemble our full *contingency table*. ```{r} Col_totals <- colSums(plague) Row_totals <- rowSums(plague) Grand_total <- sum(plague) plague2 <- cbind(plague, Row_totals) plague2 <- rbind(plague2, c(Col_totals, Grand_total)) colnames(plague2)[3] <- "Totals" rownames(plague2)[3] <- "Totals" print(plague2) ``` Now we can generate a new matrix with the calculated expected values for each cell. There are some blanks in the code indicated by XXXXX - try to fill them in. ```{r exp_matrix, exercise = TRUE, exercise.lines = 12} # Generate vector of data plague_expect <- c(239 * 489/1119, # top left hand XXXXX * 630/1119, # bottom left hand 880 * 489/XXXXX, # top right hand XXXXX * XXXXX/1119) # bottom right hand # Make it into a 2x2 matrix plague_expected <- matrix(plague_expect, nrow = 2) colnames(plague_expected) <- c("Plague present", "Plague absent") rownames(plague_expected) <- c("High diversity", "Low diversity") print(plague_expected) ``` ```{r exp_matrix-hint-1} # You can find all the relevant numbers in the row and column totals of the plague2 matrix ``` ```{r exp_matrix-hint-2} # This is the solution # Generate vector of data plague_expect <- c(239 * 489/1119, # top left hand 239 * 630/1119, # bottom left hand 880 * 489/1119, # top right hand 880 * 630/1119) # bottom right hand # Make it into a 2x2 matrix plague_expected <- matrix(plague_expect, nrow = 2) colnames(plague_expected) <- c("Plague present", "Plague absent") rownames(plague_expected) <- c("High diversity", "Low diversity") print(plague_expected) ``` Now we have the expected values as well as the observed values. If there were no association between plague presence and rodent diversity then our expected values should be roughly equal to our observed values: in other words, our observed values minus our expected values should be roughly equal to zero for each cell in the table. Are they? ```{r} plague - plague_expected ``` Those numbers look rather large, which might indicate that the observed counts of grid cells are not distributed as we would expect with no association. What we need to do is to convert these differences between observed and expected frequencies into a value which summarises how different our observed frequencies are from the expected values, and which would be distributed on a probability distribution that we can use to assess how likely or unlikely we would be to see this sort of difference simply from sampling error. We do this by calculating a chi-square value (AKA a $\chi^2$ value) as follows: $$\chi^2 = \sum{\frac{\left(observed - expected\right)^2}{expected}}$$ In other words, we take those differences between the observed and expected values that we calculated above, we square them, divide each one by the appropriate expected value and then add all of the values we've calculated together. Here's some code to do this. See if you can fill in the parts marked XXXXX. ```{r chisq1, exercise = TRUE, exercise.lines = 10} # Calculate differences between observed and expected differences <- plague - plague_expected # Square them differences_squared <- XXXXX # Divide by the expected values chisq1 <- differences_squared/XXXXX # Calculate Chi-square test statistic chisq_test <- XXXXX(chisq1) # Print result cat("The calculated value of chi-square is", chisq_test) ``` ```{r chisq1-hint-1} # Use the sum() function to add everything together # # To square each value in a vector or a matrix, use # name^2 ``` ```{r chisq1-hint-2} # This is the solution: # Calculate differences between observed and expected differences <- plague - plague_expected # Square them differences_squared <- differences^2 # Divide by the expected values chisq1 <- differences_squared/plague_expected # Calculate Chi-square test statistic chisq_test <- sum(chisq1) # Print result cat("The calculated value of chi-square is", chisq_test) ``` OK. We've calculated our test statistic. If the null hypothesis of no association were to be true and we sampled thousands of times from the same data and calculated htis value, it's distribution would follow the theoretical chi-square distribution on a certain number of degrees of freedom. For a chisquare test calculated on a contingency table, as we have done, the degrees of freedom are equal to the number of rows -1 times the number of columns -1. Since we have two rows and two columns that gives us (2-1) \* (2-1) df, which is equal to 1. Now that we know our test statistic, and we now how we would expect it to be distributed were the null hypothesis true, we can ask how likely we would be to see a value as big, or bigger, than the number we've calculated. We can do this with the `pchisq()` function as follows: ```{r} pchisq(q=62, df=1, lower.tail=FALSE) ``` This is the p-value for our chi-square test. As you can see it's a very small number indeed, indicating that the probability of finding the pattern in the data that we have observed by sampling error alone is very, very small indeed. Unsurprisingly, R has a built-in function that will do a chi-square test for you so you don't have to go through the calculations we did above. The function is `chisq.test()` and if you give it a matrix as an argument it will carry out a chi-square test for a contingency table. The only difference is that if the matrix is a 2x2 matrix, as ours is, R will automatically apply something called *Yates' Correction for Continuity*. This is an adjustment to the calculation to take account for the way that some of the assumptions about how the test works don't really work perfectly with 2x2 tables, especially when one or more cells in the table has an expected value of 5 or less. This analysis doesn't really need Yates' correction because the expected values are all high but it doesn't make a lot of difference to the final result. See if you can get R to do a chi-square test. You just need to use the `chisq.test()` function with the `plague` matrix as the only argument. ```{r chisquare_test1, exercise = TRUE} ``` ```{r chisquare_test1-solution} # This is the solution chisq.test(plague) ``` You can see that Yates' correction makes the chi-square value slightly different, and also the p-value but the differences are small and the overall conclusion is the same: the apparent association between rodent diversity and plague, with grid squares more likely to have plague when rodent diversity is high, is very unlikely to have arisen by chance. To help us further interpret this analysis, we can ask R to tell us the *residuals* from the chi-square test. In the case of this particular analysis, the residuals are the difference between the expected and observed values, adjusted by dividing by the square root of the expected value. This adjusts the the observed - expected values by the sample size and these residuals can tell us about which cells in the table contribute the most to the overall result. ```{r} chisq.test(plague)$residuals ``` Here, positive values indicate that there is a positive association: so cells with plague present are strongly associated with high rodent diversity, and cells without plague are associated more weakly with low rodent diversity. Negative values indicate the opposite, so low rodent diversity is strongly negatively associated with the presence of plague and high rodent diversity is more weakly negatively associated with the absence of plague. <br><br> <hr> 1. Sun, Z., Xu, L., Schmid, B.V., Dean, K.R., Zhang, Z., Xie, Y., Fang, X., Wang, S., Liu, Q., Lyu, B., Wan, X., Xu, J., Stenseth, N.C. & Xu, B. (2019) Human plague system associated with rodent diversity and other environmental factors. Royal Society open science, 6, 190216. ## Habitat preferences in a reef fish Many reef fish have larval stages which develop in open water and only settle onto a reef as they develop towards adulthood. As part of this process they make choices about where to settle based on sensory cues that might include both visual and olfactory components. To try to understand this process, John Majoris and co-workers published a study in 2018^2^ in which they tested the habitat preferences of young neon gobies *Elacatinus lori* on the barrier reef in Belize. Adult *E. lori* live inside tube sponges, and Majoris *et al.* collected freshly settled *E. lori* individuals and carried out a series of choice tests to examine how they choose sponges to settle in. ![](images/E_lori.png){width="400"} **Figure 1** Top panel: Neon goby, *Elacatinus lori*, wikimedia commons. Bottom panels: two yellow tube sponges, typical of the habitat where the fish lives. Photos from Majoris *et al*, 2018. These fish are found more often inhabiting a yellow sponge of a species called *Aplysina fistularis*, and more rarely in a similar species, *Agelas conifera*, which is brown in colour. As one part of the experiment, fish were presented with a choice between brown and yellow tube sponges. To investigate what senaory cues were important in the choice, some sponges were covered with a clear plastic cylinder, allowing visual information but not olfactory, and some were covered with a mesh, allowing olfactory cues but not visual. When both visual and olfactory cues were allowed, 25 fish chose one of the yellow sponges and 5 one of the brown ones. When visual cues only were allowed 21 chose a yellow sponge and 10 a brown one but when olfactory cues only were allowed, 11 fish chose one of the yellow sponges and 9 one of the brown ones. If the fish were choosing sponges at random we would expect that half of them would choose the yellow and half the brown sponges. Looking at these data there is certainly a bias towards yellow sponges when all cues or just visual cues are allowed, but how sure are we that this pattern hasn't arisen just by chance? We can use a slightly different version of the chi-square test here which is used when we know what the expected frequency of our response variable is. In this case, if there were no choice operating we would expect the gobies to choose one of the brown sponges 50% of the time. To test whether the fish choice differs signficantly from this in the first case, with all visual cues allowed, we add an argument `p = c(0.5, 0.5)` to our `chisq.test()` function call. This tells `chisq.test()` to compare the observed frequencies with what would be expected were the probability of choosing a yellow or brown sponge equal. ```{r} # Test so see if preferences deviate from a 50/50 # ratio when all sensory cues are allowed chisq.test(c(25,5), p = c(0.5, 0.5)) ``` Note that since we are only dealing with two data points here I've just used the `c()` function to make a vector within the `chisq.test()` function call to input the data, rather than setting up an object separately. For the choice experiments where all cues were allowed, then, we have a highly significant pattern which is most unlikely to have arisen by chance. It seems that neon gobies have a strong preference for yellow sponges to live in. How about the other two cases? Try to repeat this analysis for the visual only and then olfactory only cases. ```{r visual, exercise = TRUE} # Visual only ``` ```{r visual-solution} # Visual only chisq.test(c(21,10), p = c(0.5, 0.5)) ``` ```{r olfactory, exercise = TRUE} # Olfactory only ``` ```{r olfactory-solution} # Olfactory only chisq.test(c(11,9), p = c(0.5, 0.5)) ``` Have a look at the results of all three chi-square tests and see if you can answer these questions. ```{r quiz1, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("When olfactory cues only are allowed the difference between the means is not statistically significant", message = "Answer 1. This is not a test comparing means (that's a t-test). We are comparing frequencies here."), answer("When visual cues only are allowed there is a significant preference for yellow sponges",correct = TRUE), answer("When olfactory cues only are allowed there is no significant preference for yello or brown sponges", correct = TRUE), answer("These results tell us that these fish are using vision alone to make choices about their habitat", message = "Answer 4. That's not something we can infer from these results. There is certainly evidence that visual cues are important on the scale at which this experiment was conducted, but olfactory cues could be important at other times or scales, and auditory cues could potentially also be important"), answer("The p-value of 0.66 for the olfactory only test tells us that there is no effect of olfaction in habitat choice", message = "Answer 5. We always have to be careful when interpreting non-significant results. This result tells us that we have not found any evidence that olfaction is important, but our inability to find evidence does not necessarily mean that there really is no effect"), answer("Because the p-value for visual cues only is very close to 0.05 we have to be cautious about interpreting this result", correct = TRUE) ) ``` <br><br><hr> 2\. Majoris, J.E., D'Aloia, C.C., Francis, R.K. & Buston, P.M. (2018) Differential persistence favors habitat preferences that determine the distribution of a reef fish. *Behavioral ecology: official journal of the International Society for Behavioral Ecology*, **29**, 429--439. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/11_Chi_square_tests/Basic_Analysis_3_Chi_square_tests.Rmd
--- title: "Basic Analysis 4: Correlation" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > In this tutorial we move to looking at how to analyse relationships between pairs of variables. Correlation analysis allows us to quantify the strength and direction of these relationships, and test the statistical significance of any relationship. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) load("mhc.rda") ``` ## Correlation analysis explained The video embedded here explains the way that we can calculate a correlation coefficient, what it means and how to test for statistical significance. ![](https://youtu.be/IDDbtfHByKw) ## MHC promiscuity and pathogen diversity The major Histocompatibility complex (MHC) is a region of the vertebrate genome encoding cell-surface proteins that are important in the recognition of foreign organisms such as bacteria and viruses by the immune system. Within humans, there is considerable genetic diversity in the alleles that encode for the MHC. One possible explanation for this is that t is driven by host-pathogen interactions, and one aspect of this that could be important is the variety of antigens which a particular MHC allele can bind to, with more "promiscuous" alleles being able to promote immune activity against a broader range of pathogens and so being more generalist. If these generalist alleles are favoured in regions of high pathogen diversity this could explain some of the geographical variation that is known to exist in MHC alleles. Máté Manczinger and co-workers tested this idea in a paper published in 2019^1^. They collected allele prevalence data for the HLA-DRB1 MHC gene from a number of populations worldwide and used two methods to estimate the promiscuity for the gene for each population: firstly, they used bioinformatics techniques to predict the binding affinities for the alleles in question, and secondly they used empirical data on MHC binding *in vitro,* giving two separate measures. For each population they were then able to calculate the pathogen richness in the corresponding geographical area using publicly available data on the prevalence 168 extracellular pathogens (bacteria, fungi, protists and helminths). They also calculated similar data for intracellular pathogens such as viruses. We'll look at one of their two response variables the *in-vitro* promiscuity levels for the HLA-DRB1 gene, an estimate of how broad the antigen binding capabilities are at a population level. There are two explanatory variables, extracellular pathogen diversity and intracelullar pathogen diversity. Since intracellular pathogens are not exposed to the aspects of the immune response associated with the MHC, the prediction to be tested is that HLA-DRB1 promiscuity should correlate with extracellular but not intracellular pathogen diversity. The data are loaded as a dataframe called `mhc`. As always, we need to check our data have loaded properly. Use `str()` to check the structure of the dataset. ```{r mhc_structure, exercise = TRUE} ``` ```{r mhc_structure-solution} str(mhc) ``` All looks fine. `Intracellular` and `Extracellular` are the two pathogen diversity estimates. `Promiscuity_pred` is the predicted promiscuity from the bioinformatics study and `Promiscuity_in_vivo` is the promiscuity based on empirical lab tests. The units for promiscuity are abitrary because the values have been normalised, the units for pathogen diversity are in numbers of species. Some of the latter are fractional because of the way these were estimated. We're focussing on the *in-vitro* promiscuity, and whether this correlates with the extracellular and intracellular pathogen diversities. As always, we'll start by drawing a graph and looking at the data. In this case, with two variables which are both continuous data, scatterplots are appropriate. Here is the first one, with *in-vitro* promiscuity plotted against extracellular pathogen diversity. ```{r correlation_plot_1} plot(Promiscuity_in_vitro ~ Extracellular, data = mhc, pch = 16, col = "aquamarine4", xlab = "Extracellular pathogen diversity (species)", ylab = "Population-level promiscuity") ``` **Figure 1** *In-vitro* MHC promiscuity for 28 human populations plotted against the local diversity of extracellular pathogens. Looking at figure 1, we can see that there does indeed seem to be a general trend towards higher antigen binding promiscuity in regions with higher pathogen diversity. Now for the intracellular pathogen diversity. See if you can adapt the code above to draw a new plot ```{r correlation_plot_2, exercise = TRUE, exercise.lines = 10} ``` ```{r correlation_plot_2-hint-1} # You can use the code for the first plot # You need to change the name of the x-variable # and the x-axis label ``` ```{r correlation_plot_2-hint-2} # This is the solution plot(Promiscuity_in_vitro ~ Intracellular, data = mhc, pch = 16, col = "aquamarine4", xlab = "Intracellular pathogen diversity (species)", ylab = "Population-level promiscuity") ``` **Figure 2**. *In-vitro* MHC promiscuity for 28 human populations plotted against the local diversity of intracellular pathogens. Looking at this plot it's much harder to see any relationship between the two variables. In both the intra- and extracellular cases, however, the data are fairly noisy and it would be helpful to to firstly quantify how strong the relationship between the two variables is and secondly to ask how likely such a pattern would be to arise simply by sampling error. <br><br> <hr> 1\. Manczinger, M., Boross, G., Kemény, L., Müller, V., Lenz, T.L., Papp, B. & Pál, C. (2019) Pathogen diversity drives the evolution of generalist MHC-II alleles in human populations. *PLoS biology*, **17**, e3000131. ## Calculating the correlation coefficient In this section we'll just consider the relationship between *in-vitro* promiscuity and extracellular pathogens. Let's start by calculating *r*, the correlation coefficient. The formula for *r* is somewhat intimidating: $$ r = \left. \frac{\Sigma{\left(x-\bar{x}\right) \left(y-\bar{y}\right)}}{n-1} \right/s_xs_y, $$ but it can be broken down into more easily understood parts. $$ \Sigma{\left(x-\bar{x}\right) \left(y-\bar{y}\right)} $$ Is the sum of the differences between the $x$ and $y$ values and their respective means. If there is a positive correlation this will give a positive value and if a negative correlation it will give a negative value. This number is dependent on the sample size, however, with larger samples giving larger values for the same sort of relationship just because there is more data. $$ \frac{\Sigma{\left(x-\bar{x}\right) \left(y-\bar{y}\right)}}{n-1} $$ Is this value divided by the *degrees of freedom* ($n-1$) which corrects for sample size. This value is called the *covariance*, but it's still not particularly useful here because it will vary depending on the scale that our data are measured on: if one of our variables was human height measured in mm, for example, we would get a larger value for the covariance than if it were human height measured in m. $$ r = \left. \frac{\Sigma{\left(x-\bar{x}\right) \left(y-\bar{y}\right)}}{n-1} \right/s_xs_y, $$ is the covariance *standardised* by the product of the standard deviations of $x$ and $y$. This takes out the effect of scale, and gives us a value for the correlation coefficient (technically in this case *Pearson's product-moment correlation coefficient* but life's too short for names that long) that will always be between -1 and 1, with -1 indicating a perfect negative correlation, 0 indicating no correlation and +1 indicating a perfect positive correlation. Let's calculate $r$ for our data. See if you can fill in the bits marked with XXXXX. ```{r correlation_1, exercise = TRUE, exercise.lines = 25} # Mean of x mean_x <- mean(mhc$Extracellular) # Mean of y mean_y <- XXXXX # Sample size n1 <- length(mhc$Promiscuity_in_vitro) # Standard deviation of x sd_x <- sd(XXXXX) # Standard deviation of y sd_y <- sd(mhc$Promiscuity_in_vitro) # Covariance covar_xy <- sum((mhc$Extracellular - mean_x) * XXXXX)/(XXXXX -1) # r r1 <- XXXXX/(sd_x * sd_y) # Print r cat("The correlation coefficient is", r1) ``` ```{r correlation_1-hint-1} # Work your way methodically through the # code, thinking about how it relates to # the equations above. # Everything you need to know is there, # you just need to be careful about which # variable is being used in each place. # Remember, the x variable is mhc$Extracellular and # the y variable is mhc$Promiscuity_pred # Finally, be careful with your brackets ``` ```{r correlation_1-hint-2} # This is the solution # Mean of x mean_x <- mean(mhc$Extracellular) # Mean of y mean_y <- mean(mhc$Promiscuity_in_vitro) # Sample size n1 <- length(mhc$Promiscuity_in_vitro) # Standard deviation of x sd_x <- sd(mhc$Extracellular) # Standard deviation of y sd_y <- sd(mhc$Promiscuity_in_vitro) # Covariance covar_xy <- sum((mhc$Extracellular - mean_x) * (mhc$Promiscuity_in_vitro - mean_y))/(n1 -1) # r r1 <- covar_xy/(sd_x * sd_y) # Print r cat("The correlation coefficient is", r1) ``` Hopefully you've got a value of 0.674. This is about what we'd expect given what we can see in the plot: a medium-strength positive correlation. What about testing this for significance? ## Testing $r$ for significance You hopefully recall that when we calculate a *sample mean* $\bar{x}$ we are producing an estimate of the *population mean* $\mu$. In just the same way, the value of $r$ that we've calculated is the *sample correlation coefficient* and is an estimate of the *population correlation coefficient* $\rho$. The null hypothesis that we want to test in this case is that $\rho$ = 0. In other words, if the overall, population-level pattern was that there were no correlation, we want to know the probability of seeing a sample correlation coefficient as big as we did (or one even more extreme). When we were doing t-tests to compare two sample means, we calculated the *effect* which was the difference between means, and we standardised it by dividing our value by a figure that meant that if the null hypothesis were true we would expect it to be distributed on a $t$ distribution. Having done that we could ask how likely we would be to see the value that we observed if the null hypothesis of no difference were true. We can do the same with $r$, and in fact we can calculate a value based on $r$ that we would expect to follow a t-distribution if the null hypothesis were true. This is calculated as: $$ t = \frac{r \sqrt{n-2}}{\sqrt{1-r^2}}, $$ and if the null hypothesis were true we would expect it to be distributed on a $t$ distribution with $n-2$ degrees of freedom. We know that r = 0.674 and n = 28, so try to calculate t. ```{r t_1, exercise = TRUE} t1 <- 0.674*sqrt(XXXXX - XXXXX) / sqrt(1 - XXXXX^2) cat("The value of t is", t1) ``` ```{r t_1-solution} # This is the solution t1 <- 0.674*sqrt(28 - 2) / sqrt(1 - 0.674^2) cat("The value of t is", t1) ``` All we need to do now is to calculate the probability of observing a value of of 4.65 or higher on a *t* distribution with 35 degrees of freedom. ```{r} 2*pt(4.65, 28, lower.tail = FALSE) ``` So p = 0.0000722, a very small number indeed. ## Using R's built-in function Of course, R has a built in function to do this analysis, namely `cor.test()`. All you have to do is to give it the names of the two variables in question as arguments and it'll do the rest. NB because we're not implying any direction of effect in a correlation analysis --- the analysis does not make the assumption that promiscuity is caused by diversity or vice-versa --- we don't enter the variables to be analysed as a formula with a tilde `~`, just as two arguments separated by commas. ```{r cor_test_1, exercise=TRUE} ``` ```{r cor_test_1-solution} cor.test(mhc$Promiscuity_in_vitro, mhc$Extracellular) ``` This gives us the value of $r$ at the bottom, labelled `cor`, 95% confidence intervals for our estimate of $r$ (which we didn't calculate) and further up the results the output of the t-test for significance. You can see that the values here are the same as the ones we calculated, give or take a little rounding error. What about the relationship between *in-vitro* promiscuity and intracellular pathogen diversity? If you remember, the scatterplot for these data seemed to show little correlation, but we need to check this. The variables we want to look at are now mhc\$Promiscuity_in_vitro and mhc\$Intracellular. ```{r cor_test_2, exercise = TRUE} ``` ```{r cor_test_2-solution} cor.test(mhc$Promiscuity_in_vitro, mhc$Intracellular) ``` Have a look at the results from this and compare them with the correlation with extracellular pathogen diversity. Have a go at this quiz. ```{r quiz1, echo = FALSE} question("Which of the following statements are correct? More than one answer can be correct.", answer("The correlation between the MHC promiscuity and intracellular pathogen diversity is statistically significant", message = "Answer 1. Because the calculated p-value is greater than 0.05 we are unable to reject the null hypothesis. This means that we have little confidence that the pattern observed is not the result of sampling error"), answer("There is a weak positive correlation between MHC promiscuity and intracellular pathogen diversity, but it is not statistically significant",correct = TRUE), answer("Because p>0.05 we are unable to reject the null hypothesis", correct = TRUE), answer("Because p>0.05 we cautiously reject the null hypothesis and accept the alternative"), answer("The result for the correlation analysis for MHC promiscuity and intracellular pathogen diversity means that we can be sure that there is no relationship between the two", message = "Answer 5. There is not a signficant correlation, but that doesn't mean that there is no relationship: there could be a weak relationship that we have not detected because our sample size wasn't large enough, or there could actually be a stronger relationship that we failed to detect because of sampling error. A non-significant result does not tell us that there is no effect or relationship, only that we have no good evidence that there is one."), answer("Because the 95% confidence intervals for the correlation coefficient for the first analysis are both positive and a long way from zero, and the p-value is very small indeed, we can be certain that there is a positive correlation between MHC promiscuity and extracellular pathogen diversity", message = "Answer 6. This is certainly good evidence in support of there being a positive correlation but we must always be cautious. Remember that we will get a type 1 error (false positive) 5% of the time even when the null hypothesis is true."), answer("The confidence intervals for the second analysis tell us that the population value for the correlation coefficient could quite easily be negative", correct = TRUE) ) ``` ## Assumptions of correlation analysis and Spearman's Rank Correlation Coefficient As with all parametric analyses, the correlation analysis we've used depends on the data being analysed having certain properties. These *assumptions* are: * Data should be approximately normally distributed. Ideally, both of the variables being analysed should be normally distributed, so symmetrical and with the majority of the data close to the mean. In practice, this analysis is not affected by small deviations from normality in the data being analysed, especially when sample sizes are large. If there is obvious skew in one or both variables, however, or if there is some other substantial deviation from normality then you should think about whether this might be impacting your analysis. * Data should be continuously distributed. This sort of analysis works best when your data can take any value. If the data are categorial or otherwise constrained to only have a few values (Satisfied/neutral/dissatisfied or alive/dead or similar) then you shouldn't using this sort of analysis. Integer data (e.g. counts of things) are OK so long as there's a decent range: so if your data go from 12 to 127 with lots of different values that's fine, but if you only have 0, 1 & 2 then probably not. * The relationship between the two variables should be approximately linear: in other words it should be roughly a straight line rather than a curve. * A further "assumption" that you'll often see listed is that there should be no major outliers. This is a bit of a tricky one: anomalous data points will influence the output of the analysis, but of course we have to be very careful in designating data points "outliers" when they will often be legitimate observations that just happen to have rather extreme values. Rather than listing this as an "assumption" it's probbaly better to just say that you should be aware that major outliers will have a big effect on your results. If these assumptions are violated in a significant way then there are a number of options available. If the data are positively skewed or the relationship has some types of non-linearity then you might be able to apply some sort of transformation (a log transformation or a square root transformation will often deal with skewed data). Otherwise one popular option is to use a non-parametric equivalent of the Pearson's correlation analysis, such as *Spearman's rank correlation coefficient*. This is a quite simple variation of the parametric analysis: instead of analysing the values themselves, both variables are ranked from the smallest to the largest and a correlation coefficient is computed using the ranks. If you had a dataset like this: ```{r echo - FALSE} set.seed(2112) dat1 <- data.frame(matrix(data = round(rnorm(10, 10, 6),1), nrow = 5)) names(dat1) <- c("X1", "Y1") print(dat1) ``` You can extract the rankings for a variable with the `rank()` function: ```{r} rank(dat1$X1) ``` So for the first variable the lowest value is the third one, 6.2, so this has rank 1, the second lowest is 6.3 which has rank 2 and so on until we reach the highest value, 15.5, which has rank 5. To calculate r~s~, the Spearman's rank correlation coefficient, we can just plug these ranked data into an ordinary correlation analysis: ```{r} cor.test(rank(dat1$X1), rank(dat1$Y1)) ``` or we can add the argument `method = "spearman"` without ranking the variables ourselves. We're also adding a further argument which we wouldn't normally use, `exact = FALSE` to make R calculate the p-value in the same way as for an 'ordinary' correlation: otherwise it's calculated using a different method for this analysis. ```{r} cor.test(dat1$X1, dat1$Y1, method = "spearman", exact = FALSE) ``` The values for r~s~ and the p-value we get are exactly the same for both methods so you can see how the Spearman's analysis relates to the Pearson's. If you read the paper by Manczinger et al. which we used for the data we've analysed here you'll see that they used the non-parametric Spearman's rank correlation analysis rather than the Pearson's for their data. Let's have another look and think about why that might be. Here are the two scatterplots again. ```{r fig.width = 5, fig.height = 7, echo = FALSE} par(mfrow = c(2,1)) par(mar = c(5,4,2,1)) plot(Promiscuity_in_vitro ~ Extracellular, data = mhc, pch = 16, col = "aquamarine4", xlab = "Extracellular pathogen diversity (species)", ylab = "Population-level promiscuity") plot(Promiscuity_in_vitro ~ Intracellular, data = mhc, pch = 16, col = "aquamarine4", xlab = "Intracellular pathogen diversity (species)", ylab = "Population-level promiscuity") ``` What might be causing the original authors concern? Have a look at the plots and see what you think. ```{r quiz2, echo = FALSE} question("Which of these might be a problem with these data?", answer("Deviation from normality", message = "Answer 1. There's nothing to suggest the sort of deviations from normality that would seriously distort the analysis, such as strong skew. Probably not this one"), answer("Data are not contonuously distributed", message = "Answer 2. These data are continuously distributed, or close enought that we don't need to worry"), answer("Non-linear relationship(s) in the plot with extracellular pathogens", correct = TRUE), answer("Non-linear relationships in the plot with intracellular pathogens", message = "Answer 4. There's not really much of a relationship at all and it's hard to see any pattern, linear or otherwise"), answer("Outlying datapoints", message = "Answer 5. Maybe. There are certainly a few data points in the top graph that look a bit outside the main spread of data and could be having an effect on the results") ) ``` The main aspect of these data that might be causing a problem is possible non-linearity in the plot of MHC promiscuity versus Extracellular pathogen diversity --- for low diversities, it seems that there is more of a flat relationship, which then becomes steeper at higher diversities. We can visualise this better by adding a *non-parametric smoother* to these data. Briefly, this is a technique which gives an indication of the trends in the data according to the data which are near a particular point rather than on the basis of the whole dataset, so it can show you changes in slope. ```{r} plot(Promiscuity_in_vitro ~ Extracellular, data = mhc, pch = 16, col = "aquamarine4", xlab = "Extracellular pathogen diversity (species)", ylab = "Population-level promiscuity") lines(lowess(x = mhc$Extracellular, y = mhc$Promiscuity_in_vitro)) ``` You can see this better now. Is this a big enough and obvious enough departure from linearity to invalidate the correlation analysis? Maybe... one thing that we should mention is that this pattern was even stronger in the predicted MHC promiscuity data from the bioinformatics analysis, which we've not analysed here. Let's finish up by carrying out a Spearman's analysis on MHC promiscuity as related to both intracellular and extracellular pathogen diversity. ```{r spearman_1, exercise = TRUE, exercise.lines = 6} ``` ```{r spearman_1-hint-1} # You can use the same code that we used for # the parametric analysis, just with the # method = "spearman" argument added # # Remember to check that there's a comma # between all of your arguments and that # all your brackets match ``` ```{r spearman_1-hint-2} # This is the solution cor.test(mhc$Promiscuity_in_vitro, mhc$Extracellular, method = "spearman") cor.test(mhc$Promiscuity_in_vitro, mhc$Intracellular, method = "spearman") ``` R gives us a warning message because some of the ranks are tied, but this isn't really anything to worry about. You can see that (unsurprisingly) our main conclusions are not affected: there remains a good correlation between population-level MHC promiscuity and extracellular pathogen diversity which is signfiicant at a very low p-value. The relationship between population-level MHC promiscuity and intracellular pathogen diversity is weak at best and certainly not statistically significant. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/12_Correlation/Basic_Analysis_4_Correlation.Rmd
--- title: "Linear models 1: Single-factor ANOVA" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Analysis of variance explained: how to partition variance and why this is so useful for telling us about differences between means. --- ```{r setup, include=FALSE} library(learnr) library(gplots) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) load("ragwort.rda") ragwort$inoculum <- as.factor(ragwort$inoculum) ``` ## Linear models introduction This set of tutorials will teach you about one of the most important statistical methods there is, the linear model (also known as the general linear model). Linear modelling is a hugely flexible and powerful way of investigating and describing the important patterns within a dataset, and can easily cope with complex experimental designs. In this first tutorial we'll look at one of the simplest applications of linear modelling, specifically the analysis where there is a single explanatory variable which is a factor with two or more levels. This analysis is commonly referred to as an ANOVA (or more precisely a single-factor ANOVA) but it is also a subset of the analyses possible in the overall general linear model. All the principles we'll see here apply to other linear modelling situations so it's worth spending some time getting to grips with this simple example. ## Fitting an ANOVA ### Introducing the data The data we're going to be analysing come from a paper by Minggang Wang and coauthors published in the *New Phytologist* in 2018^1^. Wang *et al.* were studying the phenomenon of *plant-soil feedback* (PSF) whereby aspects of plant biology are affected by the presence of other plants in the soil before the one in question. The study organism in this case is ragwort, *Jacobea vulgaris*, an important and toxic weed in arable fields in Europe and is also highly invasive in other parts of the world. Ragwort is known to be strongly affected by PSF with plants grown in soil that has previously had plants of the same species growing in it exhibiting severely reduced growth. ![](images/ragwort.jpeg){width="400"} A ragwort plant *Jacobea vulgaris*. Photo R. Knell 2020. As part of a much larger and more wide-ranging study of the effects of PSY on ragwort, Wang *et al.* investigated the effect of soil organisms of different sizes on this PSF effect. To do this they grew individual ragwort plants in soil that was inoculated with water that had been mixed with "conditioned" soil from a pot which a ragwort plant had previously grown in. Before being added to the soil for the new plants, the water was passed through filters of either 1000, 20, 5 or 0.5 µm mesh size. The first of these would exclude soil animals over 1mm in size, the 20 µm filter would remove most of the very small animals that live in soil such as nematodes and collembola, the 5 µm mesh would exclude most fungi and the 0.2 µm mesh would filter out most bacteria. Each pot had a single seedling grown in it for 5 weeks after which they were harvested and a variety of measurements taken including the root biomass. The data are already loaded as a data frame called `ragwort`. ### Initial exploratory analysis Let's have a look, starting with a look at the overall structure of the data frame. ```{r} str(ragwort) ``` In this data frame we have one factor, `inoculum`, with four levels, and two continuous variables, `root_mass` and `leaf_mass` which are hopefully self-explanatory. How much replication is there? ```{r} table(ragwort$inoculum) ``` Nost treatments have 10 replicates. The 0.2µm one only has seven and this is explained in the paper because the preparation of these samples was very time consuming. There are 18 replicates of the 5µm treatment and it's not clear why this is. Examining the data doesn't reveal any obvious problems however, and there's nothing to indicate that some of the rows have been accidentally duplicated, so we'll not worry about this further. Let's visualise the root mass data using a boxplot. ```{r fig.cap = "**Figure 1** Boxplot showing root mass data for each treatment level"} # Change the order of the levels in inoculum so that they make sense ragwort$inoculum <- factor(ragwort$inoculum, levels = c("1000µm", "20µm", "5µm", "0.2µm")) # Plot a boxplot boxplot(root_mass ~ inoculum, xlab = "Treatment", ylab = "Root mass (g)", data = ragwort, col = "aquamarine4") ``` Looking at these boxplots, we can be happy that the data are probably reasonably well-behaved. There are no obvious outliers and the boxplots are sufficiently symmetrical that we don't need to worry about excess amounts of skew in the distribution of these data. When we come to think about the differences between groups, however, it's hard to say much with certainty: the 1000µm treatment has the lowest median and the 5µm treatment the highest, but there's a lot of overlap between them when we look at the interquartile ranges, and overall it's hard to say anything with much confidence about effects of the different filter sizes on root mass. A statistical test of the differences between the means would be helpful. We could use a t-test to compare each mean with each other mean, but that would give us a total of 6 tests and, because of the large number of tests, an overall type 1 error rate of 0.26, or 26%. This would mean that we would be quite likely to find at least one significant effect that was in fact simply a consequence of sampling error. What we are going to do instead is use an analysis called ANalysis Of VAriance, or ANOVA. Rather than comparing means, this relies on something called *partitioning variance* to detect whether there are differences between means. <br><br><hr> 1 Wang, M., Ruan, W., Kostenko, O., Carvalho, S., Hannula, S.E., Mulder, P.P.J., Bu, F., van der Putten, W.H. & Bezemer, T.M. (2019) Removal of soil biota alters soil feedback effects on plant growth and defense chemistry. The New phytologist, 221, 1478–1491. ## Partitioning variance This video explains what is meant by partitioning variance: ![](https://youtu.be/qh66ScABeM0) ## Generating an ANOVA table Just partitioning variance doesn't answer any questions about these data --- we want to know if there is a significant difference between our means. To do this we need to take our partitioned data and use it to carry out an ANalysis Of VAriance. <br> ![](https://youtu.be/9xyve87BJjk) <br> ### ANOVA tables in R In the video you saw how to calculate the total, error and treatment sums of squares from first principles and how to put those together in an ANOVA table. In R there are two functions that will do this for you, one is `aov()` and the other is `lm()`. `lm()` stands for linear model and refers to the fact that the analysis we're looking at here is in fact one component of the larger family of analyses which are collectively referred to as linear models. `aov()` is very similar to `lm()` but gives its output in a slightly different way. We'll just use `lm()` here because we'll be going on to use it much more with a variety of different analyses. The way we ask `lm()` to calculate an ANOVA on data like this with a single factor as the *explanatory variable* is to use a formula with `response variable ~ explanatory variable`: so we have the response variable, in this case `root_mass`, then a *tilde* `~` which means something like "as explained by" in R formulas, and then the explanatory variable, in this case `inoculum`. We'll have to tell R to use the `ragwort` dataset as well with an argument stating `data = ragwort`. So to do our ANOVA we'll use the code `lm(root_mass ~ inoculum, data = ragwort)`. When we do this we want to save the *fitted model* (remember, what we are doing here is a straightforward linear model) to an object so that we can access the outputs in a variety of ways. Let's start by fitting the model and then just looking at the ANOVA table, which we do with the `anova()` function. ```{r} # Calculate ANOVA and save fitted model to object R1 R1 <- lm(root_mass ~ inoculum, data = ragwort) # Generate ANOVA table print(anova(R1)) ``` This gives us a standard ANOVA table. There isn't a row for the total sum of squares but this is not informative anyway, and the error sum of squares is titled `Residuals`, but the rest is the standard layout. The mean square values are the sums of squares divided by the appropriate df., the F-statistic is the treatment mean squares (0.0283) divided by the error mean squares (0.00444) and the p-value is the probability of observing an F-value of 6.38 or greater on an F-distribution with 3 and 41 degrees of freedom. Our p-value is a small number and well below 0.05 so we conclude that on the basis of this ANOVA we have at least one mean which is significantly different from at least one other mean. ## Exercise: Exploratory analysis of the leaf mass data with As we saw earlier there is a third variable in the `ragwort` data frame, namely `leaf_mass`. This gives the leaf mass (surprise!) in grams for the same plants that we have fitted an ANOVA to the root masses of. Before any analysis it's important to plot out the data and check for any anomalies, look for evidence of weird or problematic distributions and so on. As we did with the root mass data, this can be done in this case by plotting a boxplot of `leaf_mass` against `inoculum`. Have a go at doing this and remember to label your axes. ```{r boxplot, exercise = TRUE, exercise.cap = "Boxplot", exercise.lines = 6} ``` ```{r boxplot-hint-1} # You can use the code from the root mass boxplot above # Just remember to change the variable names and the axis labels # Where necessary ``` ```{r boxplot-hint-2} # For the formula just replace root_mass with leaf_mass # For the axis labels just change the y-axis label ``` ```{r boxplot-hint-3} # This is the solution: boxplot(leaf_mass ~ inoculum, xlab = "Treatment", ylab = "Leaf mass (g)", data = ragwort, col = "aquamarine4") ``` Have a look at the boxplot, think about the patterns you can see and try to answer the following questions. There is a pull-down section below with some discussion of the patterns and the questions. You might like to revise the tutorial on using boxplots in exploratory data analysis if this is difficult or unfamiliar. ```{r boxplot-quiz, echo=FALSE} quiz( caption = "Boxplot questions", question( "Is there an overall pattern in these data?", answer("No"), answer( "As the filter size increases the leaf mass tends to decrease", correct = TRUE ), answer( "There is a suggestion of a positive correlation between filter size and leaf mass" ), answer( "Because the inter-quartile ranges are so large it is difficult to say" ) ), question( "What can we say about the shape of the data distributions?", answer("There is obvious positive skew"), answer("It is not clear but there is some indication of negative skew"), answer("It's not possible to say because the sample size is too small"), answer( "The boxplots are roughly symmetrical indicating no major problems with the data distribution", correct = TRUE ) ), question( "What can we say about the variance of our samples?", answer( "There is some variability between treatments but overall nothing to suggest a serious problem with heteroskedasticity", correct = TRUE ), answer("The variance is clearly declining as the median values increase"), answer("It's not possible to say because the sample size is too small"), answer( "The clear heteroskedasticity is likely to cause problems with our analysis" ) ), question( "Are there any outliers which are obviously anomalous?", answer( "Yes, the boxplot shows three outliers which should be deleted before further analysis" ), answer( "The outliers indicated on the boxplot are not particularly extreme values and are unlikely to be true outliers, rather this is what we might expect to see in a dataset of this nature", correct = TRUE ), answer("It's not possible to say because the sample size is too small"), answer( "The outlier indicated for the 5µm treatment is clearly a value which we should be very concerned about and is likely to reflect an error in recording or similar" ) ) ) ``` <details><summary>**Click here for more on the boxplots and the questions**</summary> One reason to look at this boxplot is to get an idea of what the general patterns in the data are. Here we can see that there is a general tendency for the leaf mass values to increase with smaller filter sizes, so the 1000µm treatment has the lowest median value and the 0.2µm one the highest. Another reason to look at this boxplot is to get an idea of the shape of the data and the amount of variablity. As with most parametric analyses we are assuming that our data are approximately normally distributed and that the variance is not changing a lot as the values increase or decrease. In terms of the shape of the data distributions, there is little to indicate a problem: yes some of the boxplots are not perfectly symmetrical but there is no systematic pattern and certainly nothing to indicate (for example) strong positive skew. The patterns in the variance between groups do hint at possible differences but there's no consistent pattern associated with, for example, the median value, and although the IQR is large on, for example, the 1000µm boxplot the whiskers are short (compare with the 5µm whiskers) so this is probably not something to worry about. Boxlots also allow us to check whether there are any data values which are clearly anomalous and which we might need to consider removing before analysis. The 'outliers' indicated here are not different enough from the rest of the values to cause any worries and there's no reason to take a second look at them or consider removing them before analysis. If we had an outlier which had a value that was very unlikely (e.g. a single value of 5.5g when all the rest are less than 0.7g) or impossible (e.g. a negative value) then things would be different. </details> ## Exercise: Fitting the ANOVA to the leaf mass data Now fit an ANOVA to your data, save the fitted object as `L1` and generate the ANOVA table. The code we used before for the root mass ANOVA has been pasted in and you just have to change it ```{r ANOVA, exercise = TRUE, exercise.cap = "ANOVA", exercise.lines = 6} # Calculate ANOVA and save fitted model to object R1 R1 <- lm(root_mass ~ inoculum, data = ragwort) # Generate ANOVA table anova(R1) ``` ```{r ANOVA-hint-1} # Change the name of the object (R1) # Change the response variable in the formula to root_mass ``` ```{r ANOVA-hint-2} # The line fitting the ANOVA should read: L1 <- lm(leaf_mass ~ inoculum, data = ragwort) ``` ```{r ANOVA-hint-3} # The line generating the ANOVA table should read anova(L1) ``` Take a look at the ANOVA table and try to answer these questions. ```{r ANOVA-quiz, echo = FALSE} quiz( caption = "ANOVA questions", question( "What does the reported p-value for this ANOVA tell us?", answer("p < 0.05 so there is no significant difference between the means"), answer( "p is much less than 0.05 so all the means are significantly different from each other" ), answer( "P < 0.05 so we conclude that filter size and leaf mass are significantly correlated" ), answer( "p < 0.05 indicating that at least one mean is significantly different from at least one other", correct = TRUE ) ), question( "How is the F-statistic calculated in the ANOVA table?", answer( "Mean Square inoculum / mean square residuals", correct = TRUE ), answer( "Df Residuals / Df inoculum" ), answer("Sum sq inoculum / Mean sq inoculum"), answer( "Difference between means divided by the standard error of the differences" ) ) ) ``` <details><summary>**Click here for more on the ANOVA table**</summary> The F-statistic is the test statistic for our ANOVA and is calculated by dividing the MS treatment (in this case inoculum) by the MS error (what R calls Residuals). If the null hypothesis were true we would expect this ratio, on average, to be 1. For our F-test we compare the calculated F-statistic with an F-distribution on (in this case) 3 and 41 degrees of freedom to find out the probability of generating a value as big as, or bigger than, 24.97 if the null hypothesis were actually true. Here, that probability is a very small number and considerably below the p=0.05 cut off for statistical significance, so we cautiously conclude that this pattern is unlikely to be a consequence of random sampling. Because in ANOVA we are not comparing means directly, but are partitioning the variance into that epxlained by our treatment (MS inoculum) and that which we can't explain (MS error or MS residuals), our significant result tells us nothing about which means are different from which other means. All we can tell is that at least one mean is significantly different from at least one other mean. The next section tells us how to get more detail on where the differences really lie. </details> ## Interpreting ANOVA output ### Post-hoc testing The statistically significant ANOVA tells us that at least one mean value in our root mass data is significantly different from at least one other mean value, but it doesn't tell us any more: we can't say, for example, whether the mean value for the 1000µm treatment is significantly different from the mean value for the 0.2µm treatment. To gain some further insight we have two options. Firstly, we can use a *post-hoc test*. These are statistical tests which are designed to compensate for the enhanced probability of a *type 1 error* (a false positive) that arises when doing multiple tests. Because of this compensation they tend to be *conservative* --- they have a higher *type 2 error rate* than we would like (a type 2 error is when we fail to detect an effect when one is really there: a false negative) . In other words, post-hoc tests have a higher probability of failing to detect an effect when they should do so than we would normally reagrd as acceptable, so they are usually only used once statistical significance has been established via an ANOVA or similar. The most common post-hoc test that you'll see used is the Tukey HSD test. This will calculate the difference between means for all six possible pairwise comparisons and give us 95% confidence intervals and a p-value for each of these differences. The `TukeyHSD()` function doesn't work on ANOVA objects fitted with `lm()` but it does on ones fitted with `aov()` so our function call is a little more obscure than we might like. ```{r} TukeyHSD(aov(R1)) ``` Looking at this you can see that the 95% confidence intervals for the estimated differences between means overlap zero in all cases aside from the last two, and this is backed up by the two p-values for these comparisons being less than 0.05. On the basis of this we can conclude that the significant differences in this case are between the mean root mass for the 5µm treatment and the mean root masses for the 1000µm and 20µm treatments. Let's plot the means and their 95% confidence intervals (for clarity, we are now talking about the confidence intervals for the means themselves, not for the differences between means which are what we get from the Tukey test) and have a look at them. We'll use the`plotmeans()` function from the gplots package. ```{r warning = FALSE, fig.cap = "**Figure 2** Mean and 95% confidence intervals for root mass data at each level of inoculum"} # Load package gplots library(gplots) # Draw the plot plotmeans(ragwort$root_mass ~ ragwort$inoculum, connect = FALSE, # No lines connecting points barcol = "aquamarine4", col = "aquamarine4", pch = 16, cex = 1.2, n.label = FALSE, # No sample size labels xlab = "Inoculum", ylab = "Mean root mass (g)" ) ``` This is consistent with what the Tukey test is telling us: you can see that means for the 1000 and 20µm treatment are close to each other and that the 95% CIs for each extend further than the mean for the other, indicating that we have little confidence that the small difference we see between these two means has arisen from anything aside from sampling error. The mean root mass for the 5µm treatment,which the Tukey test indicated is significantly different from the 1000 and 20µm treatments is quite a lot higher than the other two and importantly the 95% CIs for this mean and the other two don't overlap. Finally, the 0.2µm treatment mean is intermediate between the two low values for the large mesh sizes and the high value for the 5µm treatment, but the 95% CIs are rather large, with a lot of overlap with all the other treatments. You might recall that this treatment has a lower sample size than the others becuase of the difficulty of filtering enough inoculum through such a fine filter, and consequently the 95% CIs are larger than for the other treatments and we have less confidence in our estimate of the mean root mass for these plants. ### Using the estimated model coefficients The second option for interpreting a significant ANOVA is perhaps a little less satisfying if you want to have rigid rules about where the differences lie, but it also has some advantages over the post-hoc test we looked at above. This involves looking at the differences between means as estimated in the process of fitting the ANOVA --- the *model coefficients*. These, and some other information can be viewed by using the `summary()` function on our `R1` object. The output you get from R when you do this is not intuitively easy but is hopefully explained in the following video. The video deals with some more complex cases as well but for the moment ignore these: just watch the first 11 minutes. ![](https://youtu.be/CS5ogBL-MHo) ```{r} summary(R1) ``` Looking at the coefficients table we can see that the estimate for the first row, labelled `intercept` is 0.328. This is in fact the estimated mean for the first level of the factor, which in this case is the 0.2µm treatment --- so this is the mean root mass for the plants grown in soil filtered through the 0.2µm mesh. We can check this by calculating the mean ourselves: ```{r} mean(ragwort$root_mass[ragwort$inoculum == "0.2µm"]) ``` R gives us a standard error for this estimate, a t-value and a marginal p-value but this is not especially informative here since all it's doing is telling us that the mean root mass for plants grown in sterile soil is significantly different from zero. This is the next line: ```{r echo = FALSE} summary(R1)$coefficients[2,,drop = FALSE] ``` This time we have the name of our factor (inoculum) and also the name of the factor level in question (1000µm). The value for the estimate is -0.0782. This is not the estimated mean now, but the estimated difference between the mean for the 0.2µm treatment and the mean for the 1000µm treatment. We can check that as well: ```{r} # Sterile soil mean minus the estimated difference mean(ragwort$root_mass[ragwort$inoculum == "0.2µm"]) - 0.0782 # Mean for whole soil mean(ragwort$root_mass[ragwort$inoculum == "1000µm"]) ``` You can see that the two values are the same. The standard error in the coefficients table for this row is now the standard error for the *difference* between the two means. R calculates a t-value and the marginal p-value and this is 0.0219. Interestingly this is less than 0.05 and now we have an indication that the difference between the 0.2µm mean root mass and the 1000µm mean root mass are significantly different. This is at odds with the results from the Tukey test which gave us a p-value of 0.0966 for this comparison. Moving onto the third line of the coefficients: ```{r echo = FALSE} summary(R1)$coefficients[3,,drop = FALSE] ``` this is now telling us the difference between the estimated mean for the treatment with water filtered through a 20µm mesh and the 0.2µm filtered treatment. This time the relatively large standard error for this difference and the associated p-value suggest that there is not a significant difference between the mean root mass for the 0.2µm treatment and that for the 20µm treatment. This at least is consistent with our Tukey test result which gave us a p-value for this comparison of 0.426. The final row of the coefficients table is for the 5µm treatment compared with the 0.2µm treatment and again this gives us a non-significant comparison which is again consistent with the results from our Tukey test. All in all, however, the set of contrasts that we have from just using the default table given by `summary()` is not giving us the information we really need to interpret the patterns of differences and similarities in these means as we would like to. There's no indication of whether the 5µm treatment mean is significantly different from the 1000µm mean, for example. Rather than just relying on the default settings, we can ask for a different set of contrasts. One way to do this is to refit the model but with a different factor level specified as the intercept. In this case the 1000µm filtered treatment would be an appropriate choice --- we would then get a set of contrasts allowing us to compare the other means with this one which makes a bit more sense. The `relevel()` function allows us to do this. ```{r} # Change reference level ragwort$inoculum <- relevel(ragwort$inoculum, ref = "1000µm") # Fit new model R2 <- lm(root_mass ~ inoculum, data = ragwort) # Ask for the summary table summary(R2) ``` ```{r echo = FALSE} ragwort$inoculum <- relevel(ragwort$inoculum, ref = "0.2µm") ``` All of the details of this ANOVA are the same for both R1 and R2, the only difference is that the coefficients table is calculated with the 1000µm treatment as the intercept. All of the other filtered treatments have means which are higher than the 1000µm treatment, so the estimated coefficients are all positive numbers. Two of these (5µm and 0.2µm) are sufficiently different from the 1000µm treatment and have small enough standard errors that these differences are significantly different from zero on the marginal t-test provided. The p-value for the 0.2µm treatment is the same as we saw earlier with the default option for the intercept and is rather closer to 0.05 than that for the 5µm treatment, meaning that we have rather less confidence in this particular effect. Consistent with the Tukey test, the 20µm treatment does not appear to be significantly different from the 1000µm treatment. On the basis of this approach to understanding the differences between oour means, and focusing on the results when we have the 1000µm treatment specified for our intercept, we would conclude that the mean root mass for both the 0.2 and the 5 µm treatment is significantly higher than that for the 1000µm treatments, whereas that for the 20µm treatment is not. This approach doesn't give us explicit tests for the other differences, such as those between the 0.2 and the 5µm means, but we can look at our graph of means and confidence intervals (figure 2) and conclude that given the similarity of the means and the strong overlap of the 95% confidence intervals we can have little confidence that there is a significant difference between the 0.2 and 5µm treatment means, or those for the the 0.2 and 20µm treatments. This example, using the *treatment contrasts* from the coefficients table, is an example of using *contrasts* to assess *planned comparisons* in our data: on the basis of our prior knowledge of the system and guided by the aims of our experiment we might decide before we analyse the results that the contrasts with the 1000µm treatment are what we are really interested in and focus on those as we have done here. If we were really interested in a different set of contrasts that couldn't be addressed by setting one factor level as the intercept then it is actually possible to specify our own choice of contrasts so long as there are fewer contrasts in total than there arefactor levels. How to do this is something of an advanced subject and we won't address it here but the process is described in a number of textbooks including Field *et al.* (2012)^2^, Logan (2011)^3^ and Faraway (2014)^4^ ### Post-hoc tests or contrasts? Post-hoc tests are widely used to help interpret ANOVA results but there are a number of good reasons to be cautious about using them. These include the generally conservative nature of post-hoc tests which gives a higher probability of a *type 2 error* (a false negative) than we would like, the related possibility that you can get a significant ANOVA but no significant differences on a post-hoc test and also the fact that such tests are not available for more complex linear models so we can really only use the estimated coefficients. The problem with conservatism can be seen here: the contrast between the 1000 and 0.2µm treatments was found to be non-significant by the Tukey test but the treatment contrasts, which are less prone to type 2 errors, found the opposite. As the number of factor levels, and therefore the number of comparisons from the post-hoc test, increases this problem becomes worse. The flip side of this, of course, is that relying on contrasts as we did in the second part of this doesn't give you as much information and we aren't able to compare every mean with every other mean. The decision as to whether to use a psot-hoc test or to use contrasts really depends on the answers to several questions: * Based on your prior knowledge of the system and the aims of your experiment, do you need to make comparisons between every treatment mean and every other treatment mean, or are there specific comparisons which will give you the information you need? If the former, use a post-hoc test, if the latter use contrasts. * Is it important for you to know the significance and effect size of some of the comparisons between means as accurately as possible? If so use contrasts. Overall, biologists are often lazy and just throw a post-hoc test at an ANOVA result without really thinking about what it is that they're trying to find out (see Ruxton and Beauchamp 2008[^6] for some commentary on this). This is not best practice and much of the time we would be better served by using contrasts, either the treatment contrasts that R gives us by default or somethign more sophisticated. It's better to think about what you're trying to acheive with your analysis and what the important predictions you want to test are and to focus on them than to just dump everything into a post-hoc test without really getting to grips with what the important tests arising from your experiment are. This doesn't mean that you should never use a post-hoc test of course, just that you should think about how you're going to proceed if you get a significant result *before* you do your ANOVA, not afterwards. 2 Field, A., Miles, J. & Field, Z. (2012) Discovering Statistics Using R, 1st edition. SAGE Publications Ltd. 3 Logan, M. (2011) Biostatistical Design and Analysis Using R: A Practical Guide, 1st edition. Wiley-Blackwell. 4 Faraway, J.J. (2014) Linear Models with R (Chapman & Hall/CRC Texts in Statistical Science), 2nd edition. Chapman and Hall/CRC. 5 Ruxton, G.D. & Beauchamp, G. (2008) Time for some a priori thinking about post hoc testing. Behavioral Ecology 19, 690–693. ## Exercise: interpreting the leaf mass ANOVA YOu've already fitted an ANOVA to the leaf mass data and the object is saved as L1. The ANOVA gave a signficant p-value so now we want to know where the differences between the means lie. You'll use both a post-hoc test and also look at the results via the treatment contrasts in the coefficients table. Let's start by carrying out a Tukey's HSD test on our fitted model. This is the code we used before for the root model: try to modify it for the leaf mass model ```{r prepare-anova, echo = FALSE} L1 <- lm(leaf_mass ~ inoculum, data = ragwort) ``` ```{r tukey, exercise = TRUE, exercise.cap = "Tukey test", exercise.lines = 3, exercise.setup = "prepare-anova" } TukeyHSD(aov(R1)) ``` ```{r tukey-hint-1} # Just replace the R1 object with the # name of the leaf mass model ``` ```{r tukey-hint-2} # This is the solution: TukeyHSD(aov(L1)) ``` Have a look at the output from this and have a go at this quiz. ```{r tukey-quiz, echo=FALSE} quiz( caption = "Tukey HSD questions", question( "Which of the following statements are true? More than one answer can be correct.", answer("All of the means are different from all of the other means"), answer( "On average, the mean leaf mass for the 1000µm filter treatment is 0.191g less than that for the 5µm treatment", correct = TRUE ), answer( "The 5µm treatment mean is significantly different from the 1000µm and 20µm means, but not from the 0.2µm mean", correct = TRUE ), answer( "The 95% confidence intervals for the difference between the 20 and 0.02µm treatments are from -0.123 to -0.207" ), answer( "The biggest difference between means is between the 1000 and 0.2µm treatments", correct = TRUE ), answer( "The smallest p-value is for the difference between the 1000 and 0.2µm treatments" ), answer( "All of the comparisons between means are significant except for the comparison between the 0.2 and 5µm treatment means", correct = TRUE ) ) ) ``` Now let's draw a plot of the means and confidence intervals and see how this corresponds to the differences we've seen from our Tukey HSD test. Here's the code we used for the root mass data, see if you can convert it to show your leaf mass data. ```{r CI_plot, exercise = TRUE, exercise.cap = "Plotting CIs and means", exercise.lines = 15} # Load package gplots library(gplots) # Draw the plot plotmeans(ragwort$root_mass ~ ragwort$inoculum, connect = FALSE, # No lines connecting points barcol = "aquamarine4", col = "aquamarine4", pch = 16, cex = 1.2, n.label = FALSE, # No sample size labels xlab = "Inoculum", ylab = "Mean root mass (g)" ) ``` ```{r CI_plot-hint-1} # This is actually really easy: # Just replace the variable for root mass # in the plot formula with the one for leaf mass # and change the y-axis label # You don't need to refer to the ANOVA # at all ``` ```{r CI_plot-hint-2} # This is the solution # Load package gplots library(gplots) # Draw the plot plotmeans(ragwort$leaf_mass ~ ragwort$inoculum, connect = FALSE, # No lines connecting points barcol = "aquamarine4", col = "aquamarine4", pch = 16, cex = 1.2, n.label = FALSE, # No sample size labels xlab = "Inoculum", ylab = "Mean leaf mass (g)" ) ``` Have a look at this and have a go at these questions ```{r CIs-plot-quiz, echo=FALSE} quiz( caption = "Means and CIs plot questions", question( "For the Tukey test output some of the 95% CIS are negative, but these are all positive. Why?", answer("These are 95% CIs for the means, the Tukey test table gives the 95% CIs for the differences between the means", correct = TRUE), answer( "The Tukey test logs all the values so low positive values can be transformed to negative ones"), answer( "This is because the means are almost all different from each other"), answer( "The 95% confidence intervals from the Tukey test need to be multiplied by the approriate value of t to generate these values") ), question( "Which of these statements are true? More than one answer can be correct.", answer("Because the 95% CIs for the 1000 and the 20µm means do not overlap with any other CIs we would expect these means to be different from all the other means", correct = TRUE), answer("We have the most confidence in the location of the 20µm mean", correct = TRUE), answer("The differences between means and the patterns of overlap between the confidence intervals we see in this plot is what we would expect given the results of the Tukey test", correct = TRUE), answer("The amount of overlap between the 0.2µm mean and the 0.5µm mean is a consequence of the small sample size for the 0.2µm treatment"), answer("Because the 95% CIs for 1000µm do not overlap zero we conclude that this mean is not signficant") ) ) ``` Now we're going to contrast what we've found out from the Tukey test with what we might get from looking at the coefficients table. This is not something you'd do normally because you would do one or the other, but it is a useful exercise. The first thing to think about is whether we want to want to use the default intercept for our treatment contrasts. In the root mass example we used the 1000µm treatment as the intercept; this made sense because this had the lowest value and is perhaps the treatment that we would expect to have the lowest value for leaf mass. This time, though we're going to use the default option, which is the 0.2µm treatment. This is because we've already looked at our means and 95% CIs and we're pretty confident in the differences between the 1000µm treatment and the other means, but we are not so sure about the 0.2µm treatment. This might be regarded by purists as a little too post-hoc for their liking but we'll do it anyway. The leaf mass model is, in case you've forgotten, called `L1`. See if you can generate the summary table which includes the estimated coefficients. ```{r summary, exercise = TRUE, exercise.cap = "Generating the summary table", exercise.lines = 3, , exercise.setup = "prepare-anova"} ``` ```{r summary-hint-1} # This is easy. You just need to use the summary() # function with the L1 object as an argument ``` ```{r summary-hint-2} # This is the solution summary(L1) ``` Have a look at this and try to complete the quiz. ```{r summary-quiz, echo=FALSE} quiz( caption = "Treatment contrasts questions", question( "What does the value of 0.44073 in the first row of the Estimate column represent?", answer("The estimated mean value for all the data points together"), answer( "The 95% confidence interval for the 0.2µm treatment"), answer( "The difference betwen the mean for the 0.2 µm treatment and all other means"), answer( "The mean value for the 0.2µm treatment", correct = TRUE) ), question( "What does the value of -0.12293 in the third row of the Estimate column represent?", answer("The difference between the mean for the 20µm treatment and the mean for the 0.2µm treatment", correct = TRUE), answer("The upper 95% Confidence interval for the 20µm treatment"), answer("The difference between the mean for the 1000µm treatment and the mean for the 20µm treatment"), answer("The difference between the mean for the 20µm treatment and all the other means together") ), question( "Which of the following is true? More than one answer can be correct.", answer("The treatment contrasts show that the mean for the 20µm treatment is significantly different from the means of all other treatments"), answer("The treatment contrasts show that the mean for the 5µm treatment is not significantly different from the mean for the 0.2µm treattment", correct = TRUE), answer("The conclusions from the treatment contrasts are consistent with those for the Tukey test", correct = TRUE), answer("Just from looking at the treatment contrasts we can tell that the estimated mean for the 1000µm treatment has the lowest value", correct = TRUE) ) ) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/13_Single_factor_ANOVA/Linear_models_1_single_factor_ANOVA.Rmd
--- title: "Linear models 2: Linear Regression" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Linear models with a single, continuous explanatory variable. How to fit a straight line through a cloud of data, partitioning the variance for a significance test and how to interpret the output. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) load("latitude_diversity.rda") load("gabon_diversity.rda") ``` ## Fitting lines to bivariate data We very often find ourselves with sets of *bivariate data*: two separate variables have been measured for each thing we're interested in. This might be genetic variability and distance from another population, it might be nutrient intake and immune response, it might be height on a mountain and the size of a plant, or it might be any number of combinations of variables. Quite often in these circumstances we are interested in the effect that one variable (often called the *independent* variable) has on another (the *dependent* variable). Sometimes we find complex relationships between variables that have to be described by some form of curve, but sometimes we are lucky and there is a simple relationship between the two which we can describe with a straight line. In this latter case we can use a type of linear model where instead of relating our response variable to a factor with discrete levels, as you saw when we were looking at ANOVA, we use a continuous explanatory variable. This lets us describe the relationship in terms of the slope and the intercept of the straight line which best describes how the dependent variable changes with the independent variable. This form of analysis is often called *linear regression* and the basics are explained in this video: <br><br> ![](https://youtu.be/m5SjJYt6_uk) ## Example: tree diversity and latitude Let's work though an example of a linear regression. The `latitude_diversity` data set contains data on tree diversity from 24 forest plots in locations ranging from the tropics to northern Europe and the USA^1^. One of the measures we have for each plot is the Shannon diversity index, a measure of the overall diversity of trees present, and another is the latitude of the plot. Let's have a look at these data with a scatterplot. We'll use the absolute value of latitude because some values are negative if the plot is South of the Equator, but the pattern we're interested in is how diversity changes with distance from the equator. ```{r fig.cap = "**Figure 1** Shannon diversity for trees plotted against latitude for 24 forest plots"} plot(Shannon_diversity ~ abs(Latitude), data = latitude_diversity, pch = 16, col = "aquamarine4", ylab = "Shannon diversity", xlab = "Absolute latitude") ``` As you can see the Shannon diversity index decreases with increasing latitude, and just from looking at these data we can see that they might be suitable for linear regression analysis. We have a clear potential direction of causality (diversity does not cause latitude but latitude might cause diversity), the relationship is not obviously different from a straight line and the data don't have any obvious skew, outliers or other weirdness. If we want to find the line that best allows us to predict the Shannon diversity of a patch of forest from its latitude, we can fit a linear regression using the `lm()` (for linear model) function with a *formula* specifying exactly what we want the function to fit, much as we've seen for ANOVA and also for plotting data --- in fact the formula we'll use with `lm()` here is exactly the same as the one we used to generate the plot. We have the *response variable* (AKA the dependent variable), then a *tilde* (~), the symbol which means "as explained by" in an R formula, and then the *explanatory variable*, AKA the independent variable. If both are in the same data frame we can use the `data = ` argument to tell R where to look. To fit a linear model to these data, therefore, we can use this code: ```{r} L1 <- lm(Shannon_diversity ~ abs(Latitude), data = latitude_diversity) L1 ``` Just inputting the name of the fitted `lm()` object returns the formula and the model coefficients. The first one, labelled *Intercept* is the y-intercept, or the predicted value when the explanatory variable is zero. In this case the intercept corresponds to the predicted Shannon diversity at the equator, which is 4.82. The second one, labelled with the name of the *explanatory variable* is the slope of the relationship, or in other words the amount by which we predict the Shannons diversity index to change with every degree of increase in latitude. Putting these into the equation of a straight line we get: $$ y = -0.0741 \times |latitude| + 4.82$$ and we can draw this onto our scatterplot using the `abline()` function. ```{r fig.cap = "**Figure 2** Shannon diversity for trees plotted against latitude for 24 forest plots with a line fitted from a linear regression"} plot(Shannon_diversity ~ abs(Latitude), data = latitude_diversity, pch = 16, col = "aquamarine4", ylab = "Shannon diversity", xlab = "Absolute latitude") abline( a = 4.82, b = -0.0741, lwd = 1.5, col = "aquamarine4" ) ``` Is the line a good fit to the data? It's not perfect of course because there's a fair amount of noise in these data but just from looking at it we can see that it's going to be difficult to find anything better. Is the slope of the line significantly different from zero? <br><br><hr> 1: Originally published in LaManna, J.A., Mangan, S.A., Alonso, A., Bourg, N.A., Brockelman, W.Y., Bunyavejchewin, S., Chang, L.-W., Chiang, J.-M., Chuyong, G.B., Clay, K., Condit, R., Cordell, S., Davies, S.J., Furniss, T.J., Giardina, C.P., Gunatilleke, I.A.U.N., Gunatilleke, C.V.S., He, F., Howe, R.W., Hubbell, S.P., Hsieh, C.-F., Inman-Narahari, F.M., Janík, D., Johnson, D.J., Kenfack, D., Korte, L., Král, K., Larson, A.J., Lutz, J.A., McMahon, S.M., McShea, W.J., Memiaghe, H.R., Nathalang, A., Novotny, V., Ong, P.S., Orwig, D.A., Ostertag, R., Parker, G.G., Phillips, R.P., Sack, L., Sun, I.-F., Tello, J.S., Thomas, D.W., Turner, B.L., Vela Díaz, D.M., Vrška, T., Weiblen, G.D., Wolf, A., Yap, S. & Myers, J.A. (2017) Plant diversity increases with the strength of negative density dependence at the global scale. Science, 356, 1389–1392. ## Testing for significance We've fitted our best fit line using a linear regression. Now we would like to determine whether our line is significantly different from one with a slope of zero (in other words, is the effect of latitude on diversity significant?). This video explains how to do this by partitioning variance and constructing an ANOVA table: <br><br> ![](https://youtu.be/FWX0cI-37-M) <br><br> As we saw in the video, fitting a linear regression involves partitioning variance in a very similar way to an ANOVA with a factor as an explanatory variable, and just like an ANOVA we can carry out an F-test to see if the amount of variance explained by our fitted model (the treatment variance or the treatment MS) is greater than that which is unexplained (the error variance or MS error, or as R labels it the residual variance). Using the `anova()` function on our model will generate an ANOVA table just like the one we saw when we looked at single factor ANOVA. ```{r} print(anova(L1)) ``` This is calculated in the same way as the ANOVA table for a single factor ANOVA, except that instead of calculating the SS error (or the Residual Sum of Squares as R calls it) by subtracting a group mean from each data point we subtract the predicted value of the fitted line: so if we have a data point which has a latitude of 9.15 and a Shannon diversity of 4.0 (This is Barro Colorado Island, Panama in our data) the predicted value is $4.82 - 0.0741 \times 9.15 = 4.142$. $(x - \bar{x})^2$ is therefore $\left( 4.0 - 4.142 \right)^2 = 0.020$ for this particular data point. Just to make the point we can calculate our sums of squares separately. ```{r} SSTotal <- sum((latitude_diversity$Shannon_diversity - mean(latitude_diversity$Shannon_diversity))^2) SSError <- sum((latitude_diversity$Shannon_diversity - (4.82 - 0.0741 * abs(latitude_diversity$Latitude)))^2) SSTreatment <- SSTotal - SSError cat("SSTreatment = ",SSTreatment) cat("SSError = ",SSError) ``` Compare these to the ANOVA table above and you can see that they match the entries in the Sum Sq column. The Mean square values are just the Sums of Squares divided by the degrees of freedom and the F statistic is the Mean square treatment (labelled as `abs(Latitude)` here) divided by the Mean Square error (labelled `Residuals` here). ## Summary tables for linear regression in R In practice we don't need to calculate our sums of squares separately, nor do we even need to use `anova()` to get an ANOVA table for our linear regression. If we use `summary()` on our fitted model object we can get all the information we need. The first five minutes of this video explains how to extract the model coefficients from the summary table for a linear regression. The rest of the video (six minutes onwards) deals with more complex models that we won't concern ourselves with here. <br><br> ![](https://youtu.be/aCOgHUSd-SI) <br><br> Let's apply this to our model. ```{r} summary(L1) ``` The model summary should be somewhat familiar to you from the ANOVA tutorial. Here it follows the same layout, so we have a reminder of what the model formula is, some summary statistics for the *residuals*, then the table of coefficients. This table has the estimate for the intercept and the slope of the line, their standard errors and some p-values derived from marginal t-tests. The p-value for both the intercept and the slope tell us in this case that they are both significantly different from zero. That's not especially useful for the intercept except in some particular cases, but it is of course an important hypothesis test for the slope: the null hypothesis for most linear regressions is that the slope is equal to zero. If you've ever been taught to test for the significance of a regression using a t-test this is the equivalent of what you were taught previously. After the coefficients table there are some summary statistics for our model fit. The most commonly used one of these is the R-squared value (here written as "Multiple R-squared") which is telling us the proportion of the overall variation which is explained by our model. This is the same as the R-squared value that you might have enountered in correlation analysis and we can derive it ourselves from the SSTreatment and SSTotal which we calculated earlier: ```{r} SSTreatment / SSTotal ``` Which is the same value as the one in the table. So overall, latitude explains 63% of the overall variaotin in diversity. In ecology at least that's a high value for r-squared and indicates that the relationship between latitude and diversity is a strong one. The adjusted R-squared value is adjusted by the number of explanatory variables in the model and we can ignore it because a) we've only got one explanatory variable and b) no-one uses it anyway. The last line of the output from `summary()` gives us the same F-statistic and test that we had form our ANOVA table. You might notice that the p-value from the t-test in the coefficients table is in fact exactly the same as the one for the F-test based on partitioning the variance. This is the case for linear regression with a single explanatory variable only so be a little careful about the marginal value from the coefficients table: yes in this case it's also equivalent to a significance test for the whole model, but don't think it is when you're dealing with more complex statistical models with multiple explanatory variables. It's quite common for people to get a bit confused over this so best to make it clear now. ## Linear regression assumptions and diagnostics As with all linear models, there are certain assumptions regarding the data we're analysing which need to be met, or at least partially met, for our analysis to be valid. We've not looked at these in detail before, but they need to be introduced. The next tutorial focusses on them in detail, but here is an introduction. The four assumptions that we make for any linear model are: 1. **Independence of data** --- we're assuming that there is no underlying structure in our data set that makes some data points more similar than others. This might arise if we had 10 measurements of mouse weight but four of the mice were siblings while the rest were unrelated. If this were the case then the four siblings would probably be more similar to each other than to the rest of the mice, which would violate our assumption of independence. This is more of an issue for experimental design than analysis but it is also arguably the most important. 2. **Normal errors** --- when we subtract the predicted values from the actual data, what's left (the *residuals*), which represents the *error* or the variance we're unable to explain, should be at least approximately normally distributed. Just as a reminder, the assumption is that the *errors* are normal, not that the response variable overall is normally distributed. 3. **Equality of variance** --- for ANOVA this means that the variance for alll the groups should be approximately equal, for regression it means that the variance should not change much as we go from low to high values of the explanatory variable. In other words the amount of spread around the fitted line should be roughly the same at all values of the explanatory variable. 4. **Linearity** --- we're fitting a straight line to describe the relationship between our response and explanatory variable, but if the relationship between them is actually better described by a curve of some sort rather than a straight line then it's going to be better to fit a curve. We can check these assumptions using *diagnostic plots* that R will generate if we use the `plot()` function on our fitted model object. There is a whole tutorial on these later in the course but for the moment we'll have a quick look. We'll just look at the first two diagnostic plots because they're the most informative. Here's the first. ```{r} # Plot the residuals versus fitted values plot(L1, which = 1) ``` This plot shows us the *residuals* plotted against the *fitted values*: so for each data point we have the value that the model is predicting for that data point from the model equation on the x-axis, and the residual, or the distance from the fitted value, on the y-axis. This is a very useful diagnostic plot because it can show issues with the data such as heteroskedasticity (changes in the variance as the values increase) or non-linearity. What we're hoping to see is a cloud of points with no pattern, centred vertically around zero. Looking at the plot, there's no obvious pattern in terms of changing variances or curves that might indicate non-linearity. There are some points with rather large negative residuals and these reflect the couple of data points with lower diversity for their latitude that you can see on the scatterplot we drew earlier. Should we be concerned about this? Maybe, maybe not. R has kindly given us their row numbers (10 and 11) and if we look at where these two points are from ```{r} latitude_diversity$Country[10:11] ``` we find that both are from Hawaii. Let's just visualise them on the scatterplot with a fitted line which we drew before. ```{r fig.cap = "**Figure 4** Shannon diversity for trees plotted against latitude for 24 forest plots with a line fitted from a linear regression, with data from Hawaii indicated by a black circle"} plot(Shannon_diversity ~ abs(Latitude), data = latitude_diversity, pch = 16, col = "aquamarine4", ylab = "Shannon diversity", xlab = "Absolute latitude") points(latitude_diversity$Shannon_diversity[latitude_diversity$Country == "Hawaii USA"] ~ abs(latitude_diversity$Latitude[latitude_diversity$Country == "Hawaii USA"]), cex = 2) abline( a = 4.82, b = -0.0741, lwd = 1.5, col = "aquamarine4" ) ``` We have two points from the same place both of which are uncharacteristically far from the line. Because linear models are fitted by minimising the *squared* distances from each data point then data which are far from the line can have substantial effects on the overall fit of the model and these two data points are probably pulling the line further towards the x-axis than it would otherwise be. Should we keep them in? It is possible that there is something special about Hawaii which means that the diversity here doesn't follow the same patterns as in other parts of the world --- Hawaii is more isolated than all the other locations, and is geologically relatively recent. On that basis it could be that if we excluded these data we would get a better description of the relationship between tree diversity and latitude in continental or near-continental locations. If that was our main aim with this analysis then such an exclusion might be justified, but if we are interested in the overall patterns of diversity and are not concerned about whether our data are from isolated islands or not then we might keep these data in the analysis. In general it's best to be very cautious about excluding data from an analysis and only to do it when there is a very good reason to do so. Let's look at the second diagnostic plot, the *qq-plot* (or quantile-quantile plot) of the residuals. ```{r} # Plot the qq-plot plot(L1, which = 2) ``` The qq-plot is not immediately accessible if you don't know how it's generated, but for now we can say that this is a powerful way of checking whether our residuals are following a normal distribution. The thing to remember is that if the residuals are close to normally distributed, they should all be lined up on the diagonal line across the plot. Our qq plot is just about acceptable. The residuals with the highest and lowest values are both below the line, indicating that the lowest values are smaller than we would expect (this reflects the low values for diversity in Hawaii which we just discussed) and the highest values are also a little lower than expected. Most of the points are on the line or close to it, however, and it's not obvious that there's much we could do to make things better aside from removing the two Hawaiian data points, and you should be reluctant to do that without a really compelling reason. As a final note, you might have spotted that these data are actually violating our assumption of independence to some extent because of their geographical locations. Two data points are from Hawaii, and we've seen that they are both rather similar to each other and different from all the others. There are also several from SE Asia and a whopping nine from the Continental USA. Because of their geographical closeness these data are more likely to be similar to each other than to data points from similar latitudes but from further away. Is this a concern? Yes and no... in practice it is often difficult or impossible to remove all dependence between our data and so, as scientists, we tend to tolerate some non-independence, with the amount we are prepared to overlook being really a judgement call that's based on intuition, how accurate we need to be with our estimates of effect size and (realistically) what we think others, including potential reviewers, will be OK with. Some non-independence between data can actually be controlled for statistically, so if we were really concerned about this *spatial autocorrelation* in our data we could use an analysis technique that took it into account, but that is a rather advanced topic so we'll leave it at that. ## Exercise 1: analysing bird abundance near villages in Gabon In 2017, Sally Koerner and co-authors^1^ published a study of how animal abundance changes with distance from villages in Gabon. This involved establishing 24 2.5km transects at varying distances from the nearest village, monitoring them monthly and recording all of the mammals and birds encountered. We'll look at a small subset of their data, focussing on how the relative abundance of ungulates and rodents changed with the distance from the nearest village. Relative abundance was calculated for each group of animals as the percentage of all encounters on that transect which were with that particular group. The data are loaded as the dataframe `gabon_diversity`. Whenever we start working with some data it's a good idea to look at the structure of the dataframe with the `str` function. ```{r structure, exercise=TRUE} ``` ```{r structure-hint-1} # This is very straightforward: just use # str() with the name of the data frame in question ``` ```{r structure-hint-2} # This is the solution: str(gabon_diversity) ``` You should see that we have quite a few variables in this data frame of varying types, so `TransectID` and `NumHouseholds` are integers, there are quite a few numeric variables including some describing the environment around the transect, such as Veg_canopy which is a measure of how closed the canopy is in the area, and also our measures of the relative abundance of the different groups of animals (`RA_Apes`, `RA_Birds` etc) are also numeric vectors. Finally we have two factors, `LandUse` and `HuntCat` which have imported as factors becuase we set `stringsAsFactors = TRUE` in our `read.csv()` function call. We're not going to look at these further in this exercise but whenever you import a factor it's a good idea to check that the number of levels is correct because if there is any problem with the data it can often show up here. In this case we know that there should be three levels of each so that's OK. For this exercise we're just going to use the relative abundance data and the distance from each village. We'd like to know how the relative abundance of some of these groups of animals changes with how far the transect is from the nearest village, and we'll use linear regression to do this. We're not going to do this for all the animal groups, rather for this exercise we'll just look at the birds. If we want to look at the relationship between relative abundance and distance, it's clear which variables are response and explanatory (or dependent and independent) --- firstly we are interested in the processes affecting animal abundance not village geography, and secondly the relative abundance of birds, rodents or ungulates is unlikely to be affecting the distance from the nearest village, but the distance might be affecting the relative abundance, so there is a fairly clear direction of causality. This means `Distance` is going to be the explanatory variable in all of our analyses. Let's start with the birds. As always, before we do anything else we'll plot out our data and have a look at it. In this case we'd like a scatterplot with `RA_Birds` on the y-axis and `Distance` on the x-axis. Don't forget to add sensible axis labels. ```{r bird_plot, exercise = TRUE, exercise.lines = 6} ``` ```{r bird_plot-hint-1} # This is just a scatterplot like the ones we # were looking at before. Use the plot() function # Remember y-variable ~ x-variable for the formula, # tell R which dataframe to use with data = # and specify the x- and y-axis labels with # xlab = and ylab = ``` ```{r bird_plot-hint-2} # Don't forget to check that there are commas # between each argument and that your brackets match ``` ```{r bird_plot-hint-3} # The formula is RA_Birds ~ Distance # The dataset is specified with data = Distance # The axis labels are something like xlab = "Distance from nearest village (Km)" # and ylab = "Relative abundance of birds" ``` ```{r bird_plot-hint-4} # This is the solution plot(RA_Birds ~ Distance, data = gabon_diversity, xlab = "Distance from nearest village (Km)", ylab = "Relative abundance of birds") ``` Looking at this plot you can see that there appears to be a negative relationship between distance and relative abundance of birds. Can you see anything in the plot that might be a cause for concern with regards to fitting a linear regression to these data? There aren't any obviously problematic data points, so none with values that are clearly radically different from all of the rest, or with values which would be impossible such as a negative relative abundance or a distance of 20000 Km from the nearest village. There's maybe a hint of non-linearity if you squint and turn the plot sideways but nothing more than that, and there's nothing in the plot to suggest big changes in variance as distance increases or decreases. All in all these data look quite well behaved so let's go ahead and fit a regression. You need to save the regression as an object and I would suggest `B1` as a suitable name. Fill in the correct variable names to replace "response" and "explanatory" in the formula to fit your regression and then add a line to bring up the summary of the B1 object. ```{r bird_regression, exercise = TRUE} B1 <- lm(response ~ explanatory, data = gabon_diversity) ``` ```{r bird_regression-hint-1} # The response variable is RA_Birds # The explanatory variable is Distance ``` ```{r bird_regression-hint-2} # Use the summary() function to access # the summary of the fitted model ``` ```{r bird_regression-hint-3} #This is the solution B1 <- lm(RA_Birds ~ Distance, data = gabon_diversity) summary(B1) ``` You can see that we have a highly significant negative relationship between distance and the relative abundance of birds. Before we go any further we should check our diagnostics: use the `plot()` function to generate the residuals versus fitted values plot and the qq plot of the residuals. Remember that we can choose the first two diagnostic plots by using the `which = 1:2` argument. ```{r prepare-birds} B1 <- lm(RA_Birds ~ Distance, data = gabon_diversity) ``` ```{r bird_diagnostics, exercise = TRUE, exercise.setup = "prepare-birds"} ``` ```{r bird_diagnostics-hint-1} # Just use plot() with the name of your saved # object as the first argument and which = 1:2 # as the second argument ``` ```{r bird_diagnostics-hint-2} # This is the solution # Diagnostic plots plot(B1, which = 1:2) ``` These diagnostic plots are pretty good. There's no suggestion of any problems in the residuals versus fitted values plot and although we have some deviation from our straight line for the highest and lowest residuals in the qq plot there's nothing that should cause us concern. Now that we're happy with our diagnostics we can think about what our regression is telling us. Here's the summary output again. ```{r echo = FALSE} B1 <- lm(RA_Birds ~ Distance, data = gabon_diversity) ``` ```{r} summary(B1) ``` Try to answer these questions using the summary output. ```{r bird_regression_quiz, echo=FALSE} quiz( caption ="Bird regression quiz", question("Which of the following are true? More than one answer can be correct.", answer("At a distance of zero the relative abundance of birds is predicted to be 16.035", message = "Answer 1: The estimated relative abundance of birds at a distance of zero is given by the intercept \n"), answer("At a distance of 10Km the relative abundance of birds is predicted to be 76.83"), answer("At a distance of zero the relative abundance of birds is predicted to be 76.83", correct = TRUE), answer("At a distance of 20Km the relative abundance of birds is predicted to be 46.21", correct = TRUE), answer("At a distance of 5Km the relative abundance of birds is predicted to be 84.49", message = "Answer 5: This is calculated incorrectly with the slope being positive rather than negative") ), question("Which of the following are true?", answer("For every Km distance from the nearest village, the relative abundance of birds is predicted to decline by 0.281%", message = "Answer 1: 0.281 is the standard error of the slope, not the slope itself \n"), answer("For every Km distance from the nearest village, the relative abundance of birds is predicted to decline by 1.53%", correct = TRUE), answer("The fitted model explains 57.5% of the total variance in the response variable", correct = TRUE), answer("The fitted model explains 55.5% of the total variance in the response variable", message = "Answer 4: This value is the adjusted R-squared, which is not really informative here. You want the multiple R-squared \n"), answer("The intercept is significantly different from zero", correct = TRUE), answer("The slope of the fitted line is not significantly different from zero", message = "Answer 6: The null hypothesis for the test of significance of the regression is that the slope is zero. Our p-value is 0.0000177 (1.77e-05) which is well below the cut off of 0.05 so we will reject the null") ) ) ``` Finally, let's visualise our fitted regression by replotting our scatterplot and using the `abline()` function to add a line. *Top tip* --- if you give `abline()` the name of a fitted linear regression object as an argument it will extract the intercept and slope itself and draw the fitted line for you. ```{r birds_regression_plot, exercise = TRUE, exercise.lines = 8, exercise.setup = "prepare-birds"} ``` ```{r birds_regression_plot-hint-1} # You can paste in the scatterplot code from the previous exercise # Then add the abline() function ``` ```{r birds_regression_plot-hint-2} # This is the solution plot(RA_Birds ~ Distance, data = gabon_diversity, xlab = "Distance from nearest village (Km)", ylab = "Relative abundance of birds") abline(B1) ``` 1: Koerner, S.E., Poulsen, J.R., Blanchard, E.J., Okouyi, J. & Clark, C.J. (2017) Vertebrate community composition and diversity declines along a defaunation gradient radiating from rural villages in Gabon. The Journal of applied ecology, 54, 805–814. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/14_Linear_Regression/Linear_models_2_Linear_Regression.Rmd
--- title: "Linear models 3: Assumptions and Diagnostics" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Linear models make certain assumptions about the data being analysed. Here we introduce these assumptions and then look at how to use diagnostic plots to check how well our data correspond to them.. --- ```{r setup, include=FALSE} library(learnr) knitr::opts_chunk$set(echo = TRUE, comment = NA, fig.width = 5, fig.height = 5) load("ragwort.rda") ragwort$inoculum <- as.factor(ragwort$inoculum) load("gabon_diversity.rda") ``` ## ANOVA and Regression diagnostics Not all data are suitable for analysis with linear models. The way the analysis is calculated is based on a number of assumptions about the distribution of the data and if these are not met then the conclusions from the analysis can be incorrect. This video introduces the important concepts about linear model assumptions and describes how to test these using the *diagnostic plots* that R can produce for you if you've fitted a linear model to your data. It's a bit long (sorry) so feel free to pause it and make a cup of tea as many times as you like. ![](https://youtu.be/zDD0RoZhMZ4) ## Diagnostics for the root mass ANOVA from tutorial 1 To recap, the important assumptions about the data for linear model are: 1. Data points are *independent* --- in other words, each data point is a separate measure of the effect in question. This is arguably the most important assumption of all, but it is not something that can be checked after analysis. Rather, we should be making sure that our data are independent by careful study design. 1. The *errors are normal* --- here, *error* refers to the remaining variance in the data that we cannot explain, so in this case the distribution of the data points around the estimated means should be normal, or at least approximately so. NB you might have read or been told that the important assumption is that the data overall follow a normal distribution. This is not true and in fact the response variable can have any distribution so long as the *errors* are normal. 1. The *variance is equal between groups* --- for the present analyses the groups are the different treatment groups, and for our ANOVA to be reliable the variance of each group should be approximately the same. 1. *Linearity* --- this means that when we have a continuous explanatory variable, we assume that the relationship between the explanatory variable and the response variable is best described by a straight line, rather than a curve. Fortunately, linear models are quite resilient to deviations from these assumptions. They will continue to produce reliable results when the errors are somewhat non-normal or there are moderate differences in variance between groups so long as the sample size is reasonably large and the design is roughly *balanced* (meaning that the sample sizes are the same or nearly so for each group). This means that unless our sample size is very small or the design is badly unbalanced (e.g. two groups with 30 measurements and one with 3) we do not have to be especially picky and can ignore minor violations of our assumptions. Nonetheless, major violations of these assumptions will invalidate our analysis so it's important to check these every time you run one of these analyses. Let's go back to the ANOVA we used for the root mass data in tutorial 1. Just to remind you, these are data from a study of how Plant-Soil Feedback (PSF) affects the growth of ragwort plants *Jacobea vulgaris*, originally published by Wang *et al*^1^. Ragwort plants were grown in soil which had been inoculated with water from soil which ragwort had previously grown in, and the water was passed through a series of filters to remove various components of the soil ranging from small invertebrates to bacteria. This boxplot shows the distribution of the data within the four factor levels for inoculum: ```{r echo = FALSE} boxplot(root_mass ~ inoculum, xlab = "Treatment", ylab = "Root mass (g)", data = ragwort, col = "aquamarine4") ``` This is the ANOVA table we generated, which tells us that we have a significant effect of the factor `inoculum`. ```{r} # Calculate ANOVA and save fitted model to object R1 R1 <- lm(root_mass ~ inoculum, data = ragwort) # Generate ANOVA table print(anova(R1)) ``` To bring up the diagnostic plots for a linear model you can just use the `plot()` function with the model object as an argument, e.g. `plot(modelname)`. This will give four separate plots by default, but the first two of these are really the most important so we'll use the `which = ` argument to plot just these. Firstly the diagnostic plots for the root mass ANOVA model. ```{r fig.cap = "Diagnostic plots for the root mass ANOVA", fig.width = 4, fig.height = 4} plot(R1, which = 1:2) ``` The upper plot shows us the residuals versus the fitted values. This gives us lots of information about how good our model is at describing the patterns in the data and also lets us check for increases or decreases in variance with increases in the fitted values. Our plot has the residuals arranged in four groups. This is because there are only four fitted values, corresponding to the mean for each of the four factor levels. There are no obvious patterns in this plot that might cause concern. The degree of spread for each group is roughly the same, so the assumption of equal variances is probably OK, and there are no obvious extreme values and no indication of anything else amiss. The lower plot gives us information about how closely the residuals conform to a normal distribution. If they were perfectly normal they would all lie on the dotted line, but in practice this never happens. Here most of our data points are on or very close to the line but the ones with the most negative residuals are somewhat above the line and the ones with the most positive residuals are somewhat below the line. This tells us that the distribution of residuals has "thin tails" (or to use the statistical jargon it is somewhat "paltykurtotic"). In other words, the most negative and the most positive values are closer to zero we would expect were they following a normal distribution. Is this a problem for our analysis? Not really. As we've discussed, linear models are robust to small violations of these assumptions, and this is only a small violation. The residuals are symmetrically distributed, with no skew, and they do at least roughly conform to a normal distribution, albeit one with somewhat skinny tails. Finally, if you're wondering what the numbers on the plots are, R labels the three most extreme residuals on each plot with their index number. This is helpful and makes it easy to identify them if you need to do anything further. <br><br><hr> 1 Wang, M., Ruan, W., Kostenko, O., Carvalho, S., Hannula, S.E., Mulder, P.P.J., Bu, F., van der Putten, W.H. & Bezemer, T.M. (2019) Removal of soil biota alters soil feedback effects on plant growth and defense chemistry. The New phytologist, 221, 1478–1491. ## Exercise 1: diagnostics for the leaf mass ANOVA In tutorial 1 you fitted an ANOVA to another variable from the dataset on ragwort plant growth and PSY, testing whether the leaf masses of the plants varied between inoculum treatments. As a reminder here is the ANOVA: ```{r} # Calculate ANOVA and save fitted model to object R1 L1 <- lm(leaf_mass ~ inoculum, data = ragwort) # Generate ANOVA table print(anova(R1)) ``` We have a significant effect of the `inoculum` factor here as well, indicating that at least one mean is significantly different from at least one other mean. See if you can plot the two diagnostic plots for your fitted ANOVA on leaf mass. ```{r root_anova_setup, echo = FALSE} # Calculate ANOVA and save fitted model to object R1 L1 <- lm(leaf_mass ~ inoculum, data = ragwort) ``` ```{r diagnostics, exercise = TRUE, exercise.cap = "Diagnostics", exercise.lines = 5, exercise.setup = "root_anova_setup", fig.width = 4, fig.height = 4} ``` ```{r diagnostics-hint-1} # You can use the code as before but substitute # the name of your ANOVA object ``` ```{r diagnostics-hint-2} # This is the solution # plot(L1, which = 1:2) ``` Have a look at these plots. What do you see? ```{r diagnostics-quiz, echo=FALSE} quiz( caption = "Diagnostic plots questions", question( "Are the errors roughly normally distributed?", answer("Yes: the points on the qq-plot are mostly on or close to the line, but there is one residual with an anomalously positive value", correct = TRUE), answer("No: several of the points in the qq-plot are a long way from the line "), answer("No, there is evidence that the residuals follow a skewed distribution"), answer("Yes: the similar amounts of dispersion in the residuals versus fitted values plot indicates normal errors") ), question("Are the variances of the groups different enough to cause concern?", answer("Yes: the variance is clearly increasing with increasing fitted values"), answer("No: the points on the qq-plot are mostly on or close to the line, but there is one residual with an anomalously positive value"), answer("Yes: smaller fitted values are clearly associated with more dispersion in the residuals"), answer("No: the amount of dispersion in the residuals is roughly equal between groups, but there is one residual with an anomalously high value", correct = TRUE) ) ) ``` <br><br> <details><summary><font size = +2>**Click here for more on the diagnostics**</font></summary> As you've probably seen the diagnostics for the leaf mass model are mostly well behaved, but there is one data point (30) which has an anomalously high positive residual - it seems to be sufficiently different from the others that we should maybe take a closer look. Let's visualise this particular point: here we'll draw a `stripchart()` and draw in data point 30 as a solid data point using `points()` to make it clear where it is. ```{r} stripchart( leaf_mass ~ inoculum, data = ragwort, pch = 1, vertical = TRUE, xlab = "Treatment", ylab = "Leaf mass (g)", col = "aquamarine4" ) points(ragwort$leaf_mass[30] ~ ragwort$inoculum[30], pch = 16) ``` This point is in fact one of the "outliers" that showed up in the boxplot of these data that we looked at in the previous tutorial. At the time we suggested that it wasn't sufficiently different from the rest of the data to justify any special treatment such as exclusion. Now that we have this further evidence from the diagnostic plots, however, we might want to look rather more closely at whether this particular point is having any substantial effect on the conclusions from the ANOVA. We can refit our ANOVA with the data point removed by using the subset argument. ```{r} L2 <- lm(leaf_mass ~ inoculum, data = ragwort, subset = -30) summary(L2) ``` The ANOVA is still highly significant (the last row of the summary gives the F-statistic from the ANOVA table) but have the other details changed? Let's compare it with the coefficients table for previous model. ```{r echo = FALSE} L1 <- lm(leaf_mass ~ inoculum, data = ragwort) ``` ```{r} summary(L1) ``` In general the new model is a somewhat better fit all round: the R-squared values are higher, the F-statistic is larger and the marginal p-values in the coefficients table are smaller. The marginal p-value for the contrast between the 5µm and the 0.2µm treatment has also changed from 0.323, nowhere near statistical significance, to 0.0731, making this contrast now close to significance. What to do? This single data point is having some effect on our conclusions and our confidence in the patterns in these data so should we just remove it and present the analysis without it? Different people might give you different answers here but your author's opinion is no: unless there is further, independent evidence that indicates a potential problem with this data point (e.g. was it noted during the experiment as being different from the rest? Was it accidentally given extra water? Was it left in a better lit position than the others for some reason) there is still no good reason to exclude it. The best thing to do is just to note in our report that there is one data point which is rather influential and that exclusion of it leads to a somewhat different result, giving us less confidence in whether there is a difference between two of the treatments or not. This way we present the reader with as much information about how certain or uncertain we are about the patterns in our data as we can and let them make up their own mind. </details> <br><br> ## Exercise 2: analysing ungulate abundance near villages in Gabon As a second exercise, we'll look at a second data set from the study of animal abundance and distance to villages in Gabon that we previously met in the tutorial on linear regression (data from Koerner *et al.* 2017^1^). Here we'll look at how the relative abundance of ungulates changes with distance from the nearest village. The data are loaded and the data frame is called `gabon_diversity`. Running `str()` lets us check that everything has imported properly. ```{r} str(gabon_diversity) ``` As always, the first thing to do is to look at a scatterplot. The variable name for ungulate relative abundance is `RA_Ungulate` and that for distance is `Distance`. ```{r ungulates_scatterplot, exercise = TRUE, exercise.lines = 5} ``` ```{r ungulates_scatterplot-hint-1} # Use the plot() function and put in a formula # with RA_Ungulate as the response variable, then # a tilde ~ and then the explanatory variable which # is Distance # # Specify appropriate x- and y-axis labels using # xlab = and ylab = # # You also need to include the data = gabon_diversity argument ``` ```{r ungulates_scatterplot-hint-2} #Here's a code framework for you to adapt: plot(response ~ explanatory, data = gabon_diversity, xlab = "", ylab = "") ``` ```{r ungulates_scatterplot-hint-3} #Here's the solution: plot(RA_Ungulate ~ Distance, data = gabon_diversity, xlab = "Distance from nearest village (Km)", ylab = "Relative abundance of ungulates") ``` Looking at the plot, you can see that there seems to be something of a positive relationship between the variables, with higher values for ungulate relative abundance associated with longer distances from the nearest village. The pattern is less clear than for our birds however, and the data seem to be rather wedge-shaped, with more spread associated with longer distances. Fit a model and save it as an object called U1, and then call up a summary using `summary()` ```{r ungulates_mod1, exercise = TRUE} ``` ```{r ungulates_mod1-hint-1} #Here is a code framework to help you Name <- lm(response ~ explanatory, data = gabon_diversity) summary() ``` ```{r ungulates_mod1-hint-2} # This is the solution U1 <- lm(RA_Ungulate ~ Distance, data = gabon_diversity) summary(U1) ``` OK, that seems to have worked and the summary is telling us that we have a significant relationship between distance and ungulate relative abundance. Let's jump straight in and look at the diagnostics. Once again we just want the first two plots so specify `which = 1:2` as an argument to `plot()`. ```{r prepare-ungulates} U1 <- lm(RA_Ungulate ~ Distance, data = gabon_diversity) ``` ```{r ungulate_diagnostics, exercise = TRUE, exercise.setup = "prepare-ungulates", fig.height = 4, fig.width = 4} ``` ```{r ungulate_diagnostics-hint-1} # Just use plot() with the name of your saved # object as the first argument and which = 1:2 # as the second argument ``` ```{r ungulate_diagnostics-hint-2} # This is the solution plot(U1, which = 1:2) ``` Look at these residual plots and try to answer these questions. ```{r ungulate_diagnostics_quiz, echo=FALSE} quiz( caption = "Ungulate diagnostics", question("What do you conclude from the plot of residuals versus fitted values?", answer("There is no pattern and so our fitted model seems valid"), answer("The plot is mostly OK but there are a couple of data points with large negative residuals that could be influencing our results"), answer("There is strong evidence that the relationship is actually non-linear"), answer("There is a wedge-shaped pattern of residuals, indicating that the variance is increasing with increasing distance", correct = TRUE) ), question("What do you conclude from the qq plot of the residuals?", answer("Most residuals are approximately normally distributed but there are a few data points with high positive residuals which are more positive than we would expect if they were following a normal distribution", correct = TRUE), answer("Most residuals are approximately normally distributed but there are a few data points with high positive residuals which have smaller values than we would expect if they were following a normal distribution"), answer("The qq-plot shows the typical pattern found when the errors are positively skewed"), answer("The qq plot shows that the residuals are following a Poisson distribution") ) ) ``` ## Exercise 3 dealing with heteroskedastic data The residuals versus fitted values plot for our ungulate regression confirms what we thought might be the case with regards to the increasing variance in the data as the distance from the nearest village increases. This means that we have *heteroskedastic* data and this is not an ideal one for fitting a linear model. We have a number of options now to try to deal with this. They are: 1. Ignore it on the grounds that linear models are robust to violations of these assumptions 2. Use an analysis which allows us to explicitly model the changing variance with the increasing distance (e.g. a weighted least squares approach using the `gls()` function from the nlme package). 3. Transform our response variable somehow so that it is better behaved and reanalyse. Option 1 has its merits and we could consider this depending on what we're trying to do. If we're really just concerned about demonstrating that there's a relationship between the two variables and aren't particularly bothered about how well our line is estimated this might be OK (although if that's the case why not just do a correlation analysis?). If, however, we want do something like compare the degree of change with distance between ungulates and other mammal groups then we want our estimate of the slope and intercept to be as good as possible and we might not want to just leave it as it is. Option 2 is definitely a possibility but it's a little advanced for our purposes and probably best left for another day. This leaves us with option 3: transform our data and reanalyse. There are lots of options here and the most common approach would be repeat the analysis with log transformed data: taking logs has a bigger effect on large values than on small values, so it tends to reduce variance more for large values than for small ones. Log transforming these data isn't completely straightforward, however, because we have one data point with a value of zero and the log of this is -infinity. One solution to this is to add some constant, for example 1, to each datapoint before taking logs. This works and is something that is done a lot but it does mean we're changing our data quite a lot before analysing it and also the choice of constant (should we use 0.1? 1? 100?) can alter the distribution of the data and change the output of the analysis. An alternative to log-transformation is to use a square root transformation. This alters the distribution of the data somewhat less than a log transformation and also avoids the problem with the zero value in the dataset. Try refitting the model to the square root of relative abundance and check the diagnostic plots again. Remember that you can do the transformation within the formula in the `lm()` function call. Call your new model object [U2](https://en.wikipedia.org/wiki/Wikipedia:Lamest_edit_wars). ```{r sqrt_lm, exercise = TRUE, fig.height = 4, fig.width = 4} ``` ```{r sqrt_lm-hint-1} # The function to calculate a square root is sqrt() ``` ```{r sqrt_lm-hint-2} # The arguments you want in your lm() function call is sqrt(RA_Ungulate) ~ Distance, data = gabon_diversity ``` ```{r sqrt_lm-hint-3} # To plot the diagnostics you want: plot(U2, which = 1:2) ``` ```{r sqrt_lm-hint-4} # This is the solution U2 <- lm(sqrt(RA_Ungulate) ~ Distance, data = gabon_diversity) plot(U2, which = 1:2) ``` You can see that the residuals versus fitted values plot no longer has that wedge shape, and also that the qq plot looks rather better. Let's look at the summary output for our new model. ```{r prepare-ungulates2} U2 <- lm(sqrt(RA_Ungulate) ~ Distance, data = gabon_diversity) ``` ```{r sqrt_lm_summary, exercise = TRUE, exercise.setup = "prepare-ungulates2"} ``` ```{r sqrt_lm_summary-hint-1} # You just need to call summary on the U2 object summary(U2) ``` Here are some questions about the summary table. Remember this regression was fitted to the square root transformed relative abundance data, not the raw data. ```{r ungulate_regression_quiz, echo=FALSE} quiz( caption ="Ungulate regression quiz", question("Which of the following are true? More than one answer can be correct.", answer("At a distance of zero the relative abundance of ungulates is predicted to be 0.490", correct =TRUE), answer("At a zero the relative abundance of ungulates is predicted to be 0.699", message = "Answer 2: this is the square root of the predicted relative abundance of ungulates. You need to square the predicted values to get predicted relative abundances \n"), answer("At a distance of 10km the relative abundance of ungulates is predicted to be 1.611", message = "Answer 3: You need to square the predicted values to get predicted relative abundances \n"), answer("At a distance of 20Km the relative abundance of ungulates is predicted to be 6.36", correct = TRUE) ), question("Which of the following are true? More than one answer can be correct.", answer("For every Km distance from the nearest village, the relative abundance of ungulates is predicted to increase by 0.0912", message = "Answer 1: This is the slope for the relationship between the square root of relative abundance and distance \n"), answer("For every Km distance from the nearest village, the square root of the relative abundance of ungulates is predicted to increase by 0.0912", correct = TRUE), answer("The fitted model explains 42.7% of the total variance in the response variable", correct = TRUE), answer("The intercept is not significantly different from zero", message = "Answer 4: it is significantly different from zero as you can see from the marginal p-value of 0.035 \n"), answer("The slope of the fitted line is significantly different from zero", correct = TRUE) ) ) ``` Finally, we need to visualise our data with the fitted model. We could plot the transformed data with the straight line we've fitted, but that would not allow for useful comparisons with other analyses where we haven't transformed the data, such as the birds. Furthermore, plotting the transformed data it makes it harder to get a good understanding of the pattern we've ddescribed in our data. A better option is to plot the back transformed predicted values onto the untransformed data. There are a variety of ways to do this but one option is to use the `curve()` function like this. ```{r} # Plot the data plot(RA_Ungulate ~ Distance, data = gabon_diversity, xlab = "Distance from nearest village (Km)", ylab = "Relative abundance of ungulates") # Generate a function which calculates backtransformed # predicted values U2_fitted <- function (x) (0.6997 + 0.0912 * x)^2 # Draw the function on with curve. NB add = TRUE # means that the curve is plotted over the previous plot, # otherwise it will generate a new graph. curve(U2_fitted, add = TRUE) ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/15_Model_assumptions/Linear_models_3_Assumptions_and_diagnostics.Rmd
--- title: "Linear models 4: Multi-factor ANOVA" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Analysis of variance with multiple explanatory factors: how to fit models, check diagnostics and interpret the output.. --- ```{r setup, include=FALSE} library(learnr) library(gplots) library(ggplot2) knitr::opts_chunk$set(echo = TRUE, comment = NA, message = FALSE, fig.width = 5, fig.height = 5) load("mouse_activity.rda") load("zebra_bacteria.rda") bacteria1 <- subset(zebra_bacteria, Line ==1) ``` ## Adding extra variables to a linear model So far we've seen how the linear model can cope with both factors and continuous variables as explanatory variables, and that the way that variance is partitioned for both types of explanatory variable is fundamentally the same. Linear models can also cope with more than one explanatory variable: so for two variables the variance explained by the first variable is partitioned as before, giving us the treatment variance for variable 1, and the error variance. The error variance is then further partitioned into the treatment variance that is accounted for by the second variable, leaving the final error variance. This allows us to analyse the effect of several different things that might influence your variable of interest at the same time, giving some important benefits over analysing each explanatory variable's effect separately: * Fitting a single model reduces the number of statistical tests which we need to do, reducing the probability of type 1 errors. * We can sometimes detect effects which we would not see in single variable analyses, because the linear model allows us to ask the question "what is the effect of variable Y when we have already taken the effect of variable X into account?". * We can statistically control for variables if we wish to: so in a study with height and weight as explanatory variables we can ask the question "What is the effect of weight when height has been taken into account?" --- in other words, what is the effect of an individual being heavier or lighter than would be expected for someone of that height. * We can look for *interactions* between explanatory variables which arise when the effect of one variable depends on the level of the other. In this tutorial we'll look at analysing linear models with more than one factor as the explanatory variables. These linear models are often called "multi-factor ANOVA" or you might see them called a "two-way ANOVA" or similar depending on the number of explanatory factors used. ## Two-factor linear model example The data we'll be looking at here come from a study on the effect that paternal exposure to nicotine has on mice, looking particularly at the behaviour of their descendants, as published by McCarthy and co-authors in 2018^1^. The dataset we will use is a small subset of what was presented in their paper, and deals with the spontaneous locomotor activity that the F1 offspring of nicotine exposed or control males mated with unexposed females exhibited over a 12 hour period. Locomotor activity was measured by placing the animals in a testing chamber with a series of infra-red beams arranged in a grid, and each time a beam was broken this was logged as a single locomotory event. Our dataset (`mouse_activity`) includes data from both male and female F1 offspring.The first thing to do is to check its structure. ```{r} str(mouse_activity) ``` `SLA` (Spontaneous Locomotory Activity) is our response variable and is the count of all the recorded times an infra-red beam was broken during the 12-hour period. `Sex` is the sex of the F1 mouse in question, and `Treatment` is the treatment the father of the mouse was exposed to: "Water" if the mouse was given plain drinking water and "Nicotine" if the mouse was given drinking water containing 200µg/mL nicotine for 12 weeks. Both `Treatment` and `Sex` are character vectors but need to be sepcified as factors. ```{r} mouse_activity$Treatment <- as.factor(mouse_activity$Treatment) mouse_activity$Sex <- as.factor(mouse_activity$Sex) ``` Let's look at our sample sizes for each factor combination. <br> ```{r} table(mouse_activity$Sex:mouse_activity$Treatment) ``` The data are somewhat unbalanced, with sample sizes ranging from 11 to 18 and overall rather more data for the controls whose fathers were given water. Nonetheless there's no combination with a really small or large sample size by comparison to the others (i.e. no combination with 3, or 300). Now let's visualise these data with a boxplot. ```{r fig.cap = "**Figure 1.** Boxplot showing Spontaneous Locomotor Activity (SLA) as measured in IR beam triggers over 12 hours for mice with paternal expsoure to either nicotine or water."} boxplot(SLA ~ Treatment * Sex, data = mouse_activity, xlab = "Treatment & Sex", ylab = "SLA(movements/12h)") ``` Looking at this we can see some patterns: spontaneous movement rates seem higher in females and in animals with fathers exposed to nicotine, but as always we need to do some statistics to give us an idea of how confident we can be that thses patterns aren't just a consequence of sampling error. The boxplots are also at least approximately symmetrical which tells us that these data aren't strongly skewed. There do, however, seem to be some differences in variance between the groups such that the mice with fathers exposed to nicotine have rather wider IQRs than those whose fathers were given plain water. It's not 100% clear how severe this is but we should keep an eye out for evidence of heterogeneous variances when we look at our diagnostic plots. Fitting linear models with multiple explanatory variables in R simply involves adding new elements to the formula that is the first argument of the `lm()` function. `SLA ~ Treatment` will fit a model with `Treatment` as the explanatory factor. `SLA ~ Treatment + Sex` will fit a model with both `Treatment` and `Sex` as the explanatory factors but with no interaction between them --- in other words the *main effects* of the two factors only. `SLA ~ Treatment + Sex + Treatment:Sex` will fit a model with both `Treatment` and `Sex` as explanatory factors but also with the interaction between the two (specified here by `Treatment:Sex`). `SLA ~ Treatment * Sex` will, in this case, fit the same model as the previous example. The asterisk `*` means "fit all the main effects and also all the interactions. Here there can only be one interaction because there are only two explanatory factors. If there were a third factor, however, for example `Diet` (good or bad) then <br> If we were to want to add a third variable (e.g.Diet) we could do it like this: <br> `SLA ~ Treatment*Sex*Diet` would fit all three main effects, the two-way interactions between `Treatment` and `Sex`, between `Treatment` and `Diet` and between `Diet` and `Sex`, and also the three-way interaction between `Treatment`, `Sex` and `Diet`. `SLA ~ Treatment + Sex + Diet + Treatment:Sex + Treatment:Diet` will fit all three main effects plus two interaction terms only, namely those between `Treatment` and `Sex` and between `Treatment` and our fictional `Diet` factor. <br> We'll fit a model with both main effects and the interaction, and then bring up an ANOVA table to get an idea of the statistical significance of our explanatory variables. ```{r eval = FALSE} mouse1 <- lm(SLA ~ Treatment * Sex, data = mouse_activity) anova(mouse1) ``` ```{r echo = FALSE} mouse1 <- lm(SLA ~ Treatment * Sex, data = mouse_activity) print(anova(mouse1)) ``` The basic structure here is familiar from the single factor ANOVA tutorial. Instead of simply partitioning the variance into that explained by the treatment and the remaining, error variance, however, we have now partitioned it into that explained by the two main effects of `Treatment` and `Sex`, plus a further sum of squares etc. for the interaction between the two and then the error variance (which R calls the `Residuals` of course). The various elements such as the Mean square values and the F-statistics are calculated in the same way as for any other linear model, so MS Sex = SS Sex (253190901) divided by df Sex (1), and F for Sex is MS Sex/MS Residuals (or error) = 253190901/19714072 = 12.84316 on 1 and 50 df which is highly significant (p = 0.000767). Before we go further with interpreting this model, of course, we should make sure that the data are well behaved and the model is a good fit. <br><br><hr> 1: McCarthy, D.M., Morgan, T.J., Jr, Lowe, S.E., Williamson, M.J., Spencer, T.J., Biederman, J. & Bhide, P.G. (2018) Nicotine exposure of male mice produces behavioral impairment in multiple generations of descendants. PLoS biology, 16, e2006497. ## Model diagnostics Every time you fit a linear model you should check your diagnostic plots to make sure that the assumptions that we make about our data are met, or at least are approximately met. Let's do that for our example. ```{r} plot(mouse1, which = 1:2) ``` The residuals versus fitted values plot shows little to concern us. The differences in variance between groups that we saw in the boxplot earlier are there but overall these are not really substantial enough to cause us serious concern. The qq plot is a bit odd with some deviation from the line of equality but most of the deviations aren't too major. There are some datapoints that have high residual values (6, 48, 38) but again these are not enough to really cause a lot of concern. It's a bit of a judgement call in this case but it's most likely OK. We can do a further check by drawing a histogram of the residuals. ```{r} hist(mouse1$residuals) ``` You can see from the histogram that the distribution of the residuals is a bit pointier than a normal distribution, with a big sharp peak in the centre of the distribution, and there's a suggestion of some positive skew. Overall though the distribution of the residuals is at least approximately normal and close to symmetrical. Given the general robustness of ANOVA to small to medium deviations from the fundamental assumptions about the data these fairly small departures from the ideal distribution are not likely to be affecting our output in any substantial way. As a side note, this sort of error distribution is one of the main reasons why these tutorials are all using real data from real science. When you fit models to real data, there is *always* something about the diagnostic plots that makes you scratch your head. You never see residuals perfectly lined up in your qq plot, or with no pattern whatsoever in the residuals versus fitted values. The thing to ask yourself is whether whatever weirdness you're seeing is of a magnitude that might change the results of your analysis. If it is (and knowing what will and what won't is often a matter of experience) then you need to do something about it. If it isn't then don't. If you're not sure, then maybe try something (data transformation, removing highly influential and worrying data points), refit your model, compare it with the original and see if it makes a difference. If it doesn't then you know you're good. ## Significance of model terms Now that we're reasonably happy about our diagnostics, let's look at the output of our analysis again. We've already seen that we have a non-significant interaction term but the two main effects are significant. How reliable are these p-values though? Take a look at what happens when we change the order of the explanatory variables in our model formula. Here's that first ANOVA table again. ```{r eval = FALSE} # ANOVA table for first model anova(mouse1) ``` ```{r echo = FALSE} print(anova(mouse1)) ``` <br> Here's the ANOVA table for the model with the explanatory variables in the opposite order --- so `Sex * Treatment` instead of `Treatment * Sex`. ```{r eval = FALSE} # Fit model with Sex first instead of treatment mouse2 <- lm(SLA ~ Sex * Treatment, data = mouse_activity) # ANOVA table for second model anova(mouse2) ``` ```{r echo = FALSE} # Fit model with Sex first instead of treatment mouse2 <- lm(SLA ~ Sex *Treatment, data = mouse_activity) # ANOVA table for second model print(anova(mouse2)) ``` The statistics for the two main effects change depending on whether their order in the formula. When treatment is first we have F~1,50~ = 15.44, p = 0.000262, but when treatment is the second factor in the model we have F~1,50~ = 13.79, p = 0.000516. This is a consequence of the way that a linear model is fitted: as explained at the start of the tutorial the first term is based on the sum of squares calculated from the raw data, but for the second term the sum of squares is calculated using the error sum of squares once the sum of squares for the first term has been removed. This method of calculating the sums of squares is called *type I sums of squares* or sometimes *sequential sums of squares*, and is why the SS, MS, F-statistic and p-value can change depending on the order of the terms in the model --- this will happen so long as there is any kind of correlation between the explanatory variables, and even when the explanatory variables are factors this will happen unless the design is perfectly balanced, with exactly equal sample sizes for every combination of treatments. Do we need to worry about this here? Maybe. One option is to use something called the *type III sums of squares* --- a different way of calculating the sums of squares which treats every term as if it were the last term entered into a model, so in this case the SS for Sex is calculated based on data which has already had the SS for Treatment and for the interaction partitioned out, and the SS for Treatment is calculated based on data which has already had the SS for Sex and for the interaction partitioned out. Some statistical software (e.g. SAS) uses these type III sums of squares by default, so if your analysis is giving a different result from one done with different software then this is worth bearing in mind as a possible explanation. There are also type II sums of squares but these only work in models where there is no interaction term. There are two options for calculating significance using type III sums of squares. One is to use the `drop1()` function in base R. This actually carries out a *deletion test* on the model: it refits the model with the term in question removed and then compares it with the original model using a *partial F-test*, which is a version of the F-test that can be used to compare nested models. This is the equivalent of calculating the type III sums of squares. `drop1()` will only delete terms that can sensibly be compared with the full model so if we have both explanatory variables also present in an interaction term it will only give us a p-value for the interaction: ```{r} drop1(mouse1, test = "F") ``` Two things to note here are 1) the p-value here is the same as the one from the ANOVA table. This is because the interaction term is entered last in the model anyway. If there were other explanatory variables which were not included in the interaction term we would also get deletion tests for these, however, and they would be different. 2) As we will discuss later, if there is an interaction then the p-values for the main effects included in the interaction term are fairly meaningless, so in this case the p-value for the interaction term only is arguably the only appropriate one to look at. For the second option, we can use the `Anova()` function from the `car` package. Note that this function has a capital A to distinguish it clearly and straightforwardly from the `anova()` function we've already used. What could possibly go wrong? ```{r eval = FALSE} library(car) Anova(mouse1, type = "III") ``` ```{r echo = FALSE} library(car) print(Anova(mouse1, type = "III")) ``` This supports what we've already calculated using the type I sums which are the default option in R: both main effects are significant but the interaction term is non-significant. Since the interaction is very far from significant, and there's nothing to indicate that there is an important interaction effect here, we can consider generating a new and more parsimonious model without the interaction term. There is more in the upcoming tutorial on model selection on the reasoning behind this and when it might or might not be justified. ```{r eval = FALSE} mouse3 <- lm(SLA ~ Treatment + Sex, data = mouse_activity) Anova(mouse3, type = "III") ``` ```{r echo = FALSE} mouse3 <- lm(SLA ~ Treatment + Sex, data = mouse_activity) print(Anova(mouse3, type = "III")) ``` Our main effects remain statistically significant in the new reduced model. Now that we have a fitted model and we're happy with our understanding of the signficance of the various terms, let's think about interpreting it. ## `summary()` output for two-factor ANOVA To interpret our model we'll need to use the coefficients table from the model summary. This video explains these work. You've possibly already watched the first 11 minutes in the single factor ANOVA tutorial --- the rest of the video extends this to two-factor ANOVA. There's quite a lot of it so maybe get a cup of tea. ![](https://youtu.be/CS5ogBL-MHo) ## `summary()` output for our model ```{r} summary(mouse3) ``` We have an ANOVA with two factors, each of which has two treatments. This gives us a fairly straightforward coefficients table. As with the single factor ANOVA models, the first row of the coefficients table is labelled `(Intercept)`. Instead of telling us the estimated mean for the treatment that comes first in the alphabet this is now the estimated mean for the treatment combination with the levels for each factor that are first alphabetically. Our factor levels are ```{r} levels(mouse_activity$Treatment) levels(mouse_activity$Sex) ``` So the intercept for this model is the estimated mean for female mice with fathers treated with nicotine. We can check this by calculating the mean directly. ```{r} with(mouse_activity, mean(SLA[Sex == "Female" & Treatment == "Nicotine"])) ``` The estimate from the model is very close to the actual mean but not dead on. This is because we have no interaction in the model and consequently it is not estimating every single mean exactly: rather the intercept is estimated and then the effects of `Treatment` and `Sex`. As with single factor ANOVA, we have a standard error, t-statistic and p-value for the intercept which tells us nothing more than this mean is significantly different from zero. The next row is labelled `TreatmentWater`. ``` Estimate Std. Error t value Pr(>|t|) TreatmentWater -4545 1212 -3.750 0.000453 *** ``` This coefficient gives the estimated difference between the mean for mice with paternal nicotine treatment (the intercept) and mice with paternal water treatment - mice whose fathers were treated with water cause 4545 fewer triggers per 12 hours. Because we have no interaction term in our model this applies to both sexes of mice equally: looking at this from the perspective of the nicotine treatment, both male and female mice whose fathers were treated with nicotine cause, on average, 4545 extra triggers per night. In other words, paternal nicotine treatment is associated with a considerable increase in spontaneous locomotor activity --- almost a third more in fact. The standard error, t-test and the small marginal p-value for this row of the coefficients table tells us that an effect this big, or bigger, is unlikely to have arisen simply by sampling error. The last line is labelled `SexMale`. ``` Estimate Std. Error t value Pr(>|t|) SexMale -4365 1206 -3.619 0.000678 *** ``` Much as the previous line gives us the effect of paternal exposure, this coefficient gives us the effect of sex. The intercept was for females, and since the estimate for males is -4365 this tells us that on average males will cause 4365 fewer triggers per 12 hours, whether or not their fathers were exposed to nicotine. Because we have no signficant interaction between our explanatory factors, their effects are not dependent on the value of the other explanatory factor. In other words, the effects of each factor are *additive* in that to get the model prediction for a particular combination of factor levels we can simply add the relevant effects together. So for a male with paternal exposure to nicotine the predicted value is Intercept (14554) + the coefficient for Sex (-4365) = 10189, whereas for a male with paternal expsore to water the predicted value is Intercept (14554) + the coefficient for Sex (-4365) + the coefficient for Treatment (-4545) = 5644. Do we need to go any further with interpreting this model, for example by doing a post-hoc test to assess exactly which means are different from which other means? No. We know the effects of both `Sex` and `Treatment` and we can express those in terms of the actual behaviour of these mice. We know that these main effects are statistically highly significant, and we know that the interaction term is far from significant and can safely be ignored. Further post-hoc testing would not really be helpful. Let's finish by plotting out our means and confidence intervals. This is a fairly complicated piece of code because we're also plotting the data over the top, and drawing the x-axis labels in using the `axis()` function twice, once for each line. ```{r fig.cap = "**Figure 2.** Spontaneous Locomotor Activity (SLA) as measured in IR beam triggers over 12 hours for mice with paternal expsoure to either nicotine or water. Diamonds indicate means and error bars are 95% confidence intervals."} # Load the gplots package so we can use # the plotmeans() function library(gplots) # Plot means and 95% CIs. Note no x-axis (xaxt = "n") or x-axis label # (xlab = "") plotted, no lines connecting the means (connect = FALSE), # no sample size indicators (n.label = FALSE) # and we are drawing the means and CIs larger and thicker # than the defaults (cex = 2.5, barwidth = 2). plotmeans( SLA ~ interaction(Treatment,Sex), data = mouse_activity, n.label = FALSE, pch = 18, cex = 2.5, col = "darkblue", barcol = "darkblue", barwidth = 2, connect = FALSE, xlab = "", ylab = "SLA (triggers per 12 hours)", xaxt = "n", ylim = c(0, 24000) ) # Use points function to draw in the data. # The extra 88 at the end of the hex code for # the colour makes the points semitransparent # This horror show of nested parentheses: # jitter(as.numeric(interaction(Treatment,Sex)), 0.6), # uses the interaction() function to generate a factor # with all the combinations of levels from the interaction # of sex and treatment, then converts it to numerical # values, then adds some noise with the jitter() function # in order to reduce the amount of overplotting for the # individual data points in the final graph points( SLA ~ jitter(as.numeric(interaction(Treatment,Sex)), 0.6), data = mouse_activity, pch = 16, col = "#00777788", cex = 1.2 ) # Draw in the axis, the axis ticks and the top line of # the tick labels axis( side = 1, at = 1:4, labels = c("Nicotine", "Water", "Nicotine", "Water") ) # Use axis to draw in the "Female" and "Male" labels # at the appropriate place. Line = 1.5 moves the axis down # and lwd = 0 means the axis line is not drawn. axis( side = 1, at = c(1.5, 3.5), line = 1.5, labels = c("Female", "Male"), lwd = 0 ) ``` ## Exercise: multi-factor ANOVA Here, we will use some data from a study on bacterial adaptation to host gut environments originally published in 2018 by a group of researchers at The University of Eugene, Oregon and the Canadian Institute for Advanced Research^2^. They looked, among other things, at the competitive ability a bacterium called *Aeromonas veronii* after either 4 or 18 passages through the gut of an otherwise germ-free larval zebrafish. Competitive ability was measured *in vivo* by incoulating a larval zebrafish with a dose of both the experimental line of bacteria and an ancestral line which was tagged with Green Flourescent Protein for 3 days, following which the gut contents were homogenised and the homogenate plated onto tryptone soy agar. After a suitable incubation period the colonies of each strain were counted using a flourescence microscope to distinguish them and a "competitive index" calculated as the ratio of the number of colonies for the adapted line to the colonies from the ancestral line. The particular experiment which we will look at here is one in which the experimenters compared the competitive ability of bacteria in the host strain which they had been evolving (WT hosts) with the competitive ability of the same bacteria but competing in hosts of a different genetic background (an immunodeficient *myd88~-~* mutant). They also compared the competitive abilities of bacteria which had had a short period of adaptation (4 passages) with bacteria which had a longer period (18 passages). The data are loaded as the data frame `zebra_bacteria`. We can check its structure with `str()`. ```{r} str(zebra_bacteria) ``` The response variable is `CI` (Competitive Index) and we have three other variables: `Line`, `Host` and `Passage`. `Line` is a number from 1 to 3 and refers to the fact that the experimenters actually repeated the experiment three times with three independently evolved lines of bacteria. For the present we'll just analyse the data from line 1 but we will look at a model including this factor in the tutorial on model selection. Let's start by using `subset()` to select just the data associated with Line 1. We'll call our new data frame `bacteria1`. ```{r} bacteria1 <- subset(zebra_bacteria, Line == 1) ``` None of our explanatory vectors are currently factors but we need both `Host` and `Passage` to be specified as such. You can do this using the `as.factor()` function. I've done one, you'll need to do the other. Once you've done that check the data frame again using `str()`. ```{r make_factors, exercise = TRUE} bacteria1$Host <- as.factor(bacteria1$Host) ``` ```{r make_factors-hint-1} # This is the solution bacteria1$Host <- as.factor(bacteria1$Host) bacteria1$Passage <- as.factor(bacteria1$Passage) str(bacteria1) ``` All looks good so far. You can see that we've gone from 208 observations to 64 as is a consequence of subsetting out just one line's worth of data. There are two factors to include in our analysis. `Host` has two treatments, `WT` and `myd88` and `Passage` also has two treatments, `4` and `18`. Before we jump in and fit a model, though, we should have a look at our data. See if you can draw a boxplot of `CI` as explained by `Host` and `Passage`. ```{r bacteria_boxplot, exercise = TRUE, exercise.lines = 6} ``` ```{r bacteria_boxplot-hint-1} # You can use the boxplot() function and put in # the variable names in a formula as you would # for a linear model. You need to specify the dataframe # using the data = argument ``` ```{r bacteria_boxplot-hint-2} # Don't forget to put some sensible axis # labels in. # Make sure there are commas between each argument ``` ```{r bacteria_boxplot-hint-3} # Here's a code framework which might help boxplot(response ~ explanatory1 * explanatory2, data = dataframe, ylab = "", xlab = "") ``` ```{r bacteria_boxplot-hint-4} # Here's a solution boxplot(CI ~ Passage * Host, data = bacteria1, ylab = "Competitive index", xlab = "Treatment combination") ``` What do you see? ```{r boxplot-quiz, echo=FALSE} question( "What can we say about the shape of the data distributions?", answer("There is strong positive skew", correct = TRUE), answer("It is not clear but there is some indication of negative skew"), answer("It's not possible to say because the sample size is too small"), answer( "The boxplots are roughly symmetrical indicating no major problems with the data distribution", ) ) ``` <details><summary>**Click here for more: what to do with these data?**</summary> As you have hopefully seen, these data look like they are very positively skewed. Almost all of the data have values between zero and about 1000, but there are also some datapoints with much higher values. Given that the linear model assumes normality in the errors, or the residuals, rather than in the raw data we should be cautious about worrying about apparent violations in the raw data. In this case, however, it's clear that if we were to fit a model to these data our residuals would be exceedingly skewed and also we can imagine that those extreme values would have some disproportionate effects on our fitted model. We can use a data transformation to try to fix this extreme skew, with the two easy options being a square root transformation and a log transformation. Just checking: ```{r} summary(bacteria1$CI) ``` There aren't any zeros in the data so we can use a log transformation without having to add a constant to the dataset. Let's compare what we see with the two different transformations: see if you can modify this code to plot one boxplot of square root transformed data and then another one with log transformed data. ```{r transformations, exercise = TRUE} boxplot(CI ~ Passage * Host, data = bacteria1, ylab = "Competitive index", xlab = "Treatment combination") boxplot(CI ~ Passage * Host, data = bacteria1, ylab = "Competitive index", xlab = "Treatment combination") ``` ```{r transformations-hint-1} # The function for a square root transformation is sqrt() # The function for a log transformation is log() ``` ```{r transformations-hint-2} # This is the solution boxplot(sqrt(CI) ~ Passage * Host, data = bacteria1, ylab = "Square root of competitive index", xlab = "Treatment combination") boxplot(log(CI) ~ Passage * Host, data = bacteria1, ylab = "Log competitive index", xlab = "Treatment combination") ``` This is a nice illustration of the effect of the two different transformations. You can see that the square root transformation is not quite sufficient to get rid of the skew in these data, but the log transformation gives us some nicely symmetrical boxplots. We'll do our analysis on log transformed data. </details> <br><br><hr> 2. Robinson, C.D., Klein, H.S., Murphy, K.D., Parthasarathy, R., Guillemin, K. & Bohannan, B.J.M. (2018) Experimental bacterial adaptation to the zebrafish gut reveals a primary role for immigration. PLoS biology, 16, e2006893. ## Model fitting and diagnostics We have two explanatory factors and we'd like to fit both of these plus their interaction. Save the fitted model object as `M1`, and then check the significance of the interaction term by bringing up an ANOVA table using `anova()`. We won't worry about type I versus type III sums of squares for the moment since we're really only concerned with the significance of the interaction term. Don't forget to log the response variable and tell `lm()` that you're using data from `bacteria1`. As a consequence of some weirdness that happens with the learnr package, please enclose your `anova()` function call in a `print()` function to keep the ANOVA table formatting consistent. Sorry about this. ```{r model_fit_1, exercise = TRUE} ``` ```{r model_fit_1-hint-1} # Use lm() to fit the model with a formula which # is the same as you used for the boxplots, and # remember to specify what the dataframe is with data = # for the print() stuff it's just print(anova(M1)) ``` ```{r model_fit_1-hint-2} # Here is the solution M1 <- lm(log(CI) ~ Passage * Host, data = bacteria1) print(anova(M1)) ``` You can see from this that we have a significant interaction between `Host` and `Passage` --- what this means is that the effect of `Passage` depends on whether the level of `Host` is `WT` or `myd88`, or alternatively, the effect of `Host` depends on whether `Passage` is `4` or `18`. Before we look into this further we should check the diagnostics for our model. Remember that you can bring up just the first two diagnostic plots with `which = 1:2`. ```{r prepare-M1, echo = FALSE} bacteria1$Passage <- as.factor(bacteria1$Passage) M1 <- lm(log(CI) ~ Passage * Host, data = bacteria1) ``` ```{r bacteria_model1_diagnostics, exercise = TRUE, exercise.setup = "prepare-M1"} ``` ```{r bacteria_model1_diagnostics-hint-1} # Just use the plot() function on your model object # and specify which = 1:2 as a second argument ``` ```{r bacteria_model1_diagnostics-hint-2} # This is the solution plot(M1, which = 1:2) ``` What do the diagnostic plots tell us? More than one answer can be correct. ```{r diagnostics-quiz, echo=FALSE} question( "Which of the following is correct? More than one answer can be correct.", answer("Data points 25, 33 & 44 are identified as outliers and should be removed", message = "Answer 1. R will always identify the three residuals with the largest absolute values. That doesn't mean they are problematic and in this case there's no reason to be concerned. "), answer("Because the points in the qq plot are all close to the line of unity we have a residual distribution which is very close to a normal distribution", correct = TRUE), answer("The systematic deviation from the line in the qq plot could be because the data are not independent", message = "Answer 3. The qq plot can't tell you anything about the independence of your data points, it only gives you information on how similar your residuals are to a normal distribution "), answer("The plot of residuals versus fitted values indicates potentially important problems with heteroskedasticity", message = "Answer 4. The amount of dispersion in the residuals is roughly the same across the plot, indicating that there's no substantial change in the variance as the fitted values change "), answer("The plot of residuals versus fitted values indicates potentially important problems with non-independence of data as shown by the data being divided into four groups", message = "Answer 5. The data are divided into four groups because the model is estimating four means "), answer("The diagnostic plots are fine and give no reason for concern", correct = TRUE) ) ``` ## Interpretation We have a fitted model with a highly significant interaction term and we're happy that the diagnostic plots aren't showing us anything that gives any cause for concern. Next question: what does this mean? The next thing we should do is bring up the summary for the model. Have a look and see if you can answer the questions. Remember that higher numbers for the competitiveness index mean that the experimentally evolved strain is more competitive. ```{r summary_M1, exercise=TRUE, exercise.setup = "prepare-M1"} ``` ```{r summary_M1-hint-1} # You just need to use the summary function # on the fitted model object ``` ```{r summary_M1-hint-2} # This is the solution summary(M1) ``` ```{r summary_quiz1, echo = FALSE} quiz( caption = "Model summary quiz", question("The line labelled (Intercept) gives the estimated mean value for what?", answer("WT Host and 4 Passages"), answer("WT Host and 18 Passages"), answer("myd88 host and 4 Passages", correct = TRUE), answer("myd88 hist and 18 Passages") ), question("What is the estimated mean for myd88 Host and 18 Passages?", answer("2.616"), answer("2.616 + 1.3122", correct = TRUE), answer("2.616 - 1.5341"), answer("2.616 + 1.3122 - 1.5341") ), question("Considering _only the myd88 treatments_, which statement is true?", answer("Bacteria passaged for 18 generations are signficantly more competitive than those passaged for 4"), answer("Bacteria passaged for 18 generations are significantly less competitive than those passaged for 4"), answer("The competiveness of bacteria passaged for 18 generations is signficantly similar to that of bacteria passaged for 4 generations"), answer("Bacteria passaged for 18 generations are slightly more competitive than those passaged for 4, but we have little confidence that this pattern has arisen by anything other than random chance", correct = TRUE) ) ) ``` <br><br> ### Understanding the interaction For the bacteria competed in the myd88^-^ hosts the summary table tells us that there is little effect of the number of passages the bacteria went through in their experimental evolution: the estimated mean for 4 passages is similar to that for 18 and the marginal p-value for the row of the coefficients table in question is considerably greater than 0.05. So far all is fine, now what about the bacteria competing in the wild type (WT) hosts? This is where the interaction term shows itself. Here's that coefficients table again: ``` Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 2.6160 0.5420 4.826 9.95e-06 *** Passage18 1.3122 0.7666 1.712 0.0921 . HostWT -1.5341 0.7666 -2.001 0.0499 * Passage18:HostWT 2.8503 1.0841 2.629 0.0109 * ``` The `(Intercept)` is the estimated mean for myd88^-^ hosts and 4 passages. The `Passage18` row gives the estimated change in the mean log CI associated with changing the bacteria from those that had 4 passages to those that had 18. If there were no interaction, as in the example that was worked through earlier in the tutorial, this would apply to all levels of the other factor. Because there is an interaction, however, this applies to the myd88^-^ treatments but things get a bit more complicated with the WT treatments. The `HostWT` row is the effect of changing `Host` to WT from myd88^-^, when the bacteria have been through 4 passages. You can see that this is a negative effect and that it is associated with a small marginal p-value, meaning that we're somewhat confident that this isn't just something that arose through sampling error --- the p-value is only fractionally below 0.05 however so we have to be cautious. The estimated mean log CI for WT hosts and 4 passages is 2.616 - 1.5341 = 1.0819. Now, what about the mean for WT hosts and bacteria which have had 18 passages? If there were no interaction term this would be 2.616 (intercept) + 1.3122 (effect of passage number) - 1.5341 (effect of host). It's not though, because we have an interaction term, so the effect of passage number depends on the host. The last row of the coefficients table, `Passage18:HostWT` gives us the effect of this interaction which is 2.8503. This is the difference in the effect of passage number when the host is WT as opposed to an myd88^-^ host: so the estimated mean for WT hosts and bacteria that have had 18 passages is (wait for it) 2.616 (intercept) + 1.3122 (effect of passage number in myd88^-^ hosts) - 1.5341 (effect of host) + 2.8503 (difference in the effect of passage number depending on the host) = 5.2444. We can check this against the actual mean. ```{r} with(bacteria1, mean(log(CI[Passage == "18" & Host == "WT"]))) ``` They're the same. Now that we have an idea of what the coefficients table means, how about trying to come up with a useful interpretation of what all this is telling us. This will be easier if we have a plot of our means and confidence intervals. This is some of the code we used above to draw the means and confidence intervals for the mouse movement data: we'll skip drawing in the jittered raw data for the moment. See if you can adapt this code to give us a plot of means and confidence intervals for the bacterial competition data. ```{r confidence_plot, exercise = TRUE, exercise.lines = 14} library(gplots) plotmeans( SLA ~ interaction(Treatment,Sex), data = mouse_activity, n.label = FALSE, pch = 18, col = "darkblue", barcol = "darkblue", connect = FALSE, xlab = "", ylab = "SLA (triggers per 12 hours)", ) ``` ```{r confidence_plot-hint-1} # You need to: # Change the variable names in the formula # Change the name of the data frame in data = # Change the y-axis label # # You can also, if you wish: # Change the plot symbol # Change the colours # # Don't forget you need to log the response variable ``` ```{r confidence_plot-hint-2} # The formula should read log(CI) ~ interaction(Passage,Host) ``` ```{r confidence_plot-hint-3} # The data = argument should read data = bacteria1 ``` ```{r confidence_plot-hint-4} # Here is a solution library(gplots) plotmeans( log(CI) ~ interaction(Passage,Host), data = bacteria1, n.label = FALSE, pch = 16, col = "darkblue", barcol = "darkblue", connect = FALSE, xlab = "", ylab = "Log competitive index" ) ``` The x-axis labels are not marvellous but we can see what's going on. Let's summarise what we've got now. * When the host is myd88^-^ passage number does not significantly affect the competitive ability of the bacteria * When the host is WT the competitive ability increases substantially with passage number such that bacteria that have undergone 18 passages are much better competitors than those that have undergone 4 passages. This is what the interaction term means in this case: the effect of one variable depends on the value of the other. Finally we can also note that * Bacteria passaged 4 times were less competitive in WT hosts than in myd88^-^ hosts. ### Two final points Let's look at the ANOVA table for this model (it doesn't matter whether we use type I or type III sums of squares, the result is qualitatively the same). ```{r echo = FALSE} M1 <- lm(log(CI) ~ Passage * Host, data = bacteria1) ``` ```{r} print(anova(M1)) ``` We can see that in addition to the highly significant interaction term we have a significant main effect of Passage but not one of Host. Does that mean much? Since we know that the effect of passage number is completely dependent on the host (and *vice-versa*), is it even meaningful to talk about the statistical significance of the main effect? Most statisticians would say no --- when a main effect is also present in a model in a higher-order interaction term, as is the case here, then there is little meaning to the statistical significance of the main effect. Many researchers nowadays would not report these p-values, and would only report the significance of the interaction term since that is the only one that has much meaning. A second point arises when we think about ways to test the idea that passage number has an effect in one group of hosts but not in another. You could think about testing this, not by fitting an ANOVA and looking for an interaction, but by doing two t-tests, one for each host type. ```{r} t1 <- t.test(log(bacteria1$CI) ~ bacteria1$Passage, subset = bacteria1$Host == "WT") t2 <- t.test(log(bacteria1$CI) ~ bacteria1$Passage, subset = bacteria1$Host == "myd88") cat("P-value for WT hosts = ", t1$p.value) cat("P-value for myd88- hosts = ", t2$p.value) ``` The t-test for WT hosts is finds a highly significant difference, whereas the one for myd88^-^ hosts does not. Aha! you could say, there is a significant difference in one host but not the other, so the effect varies between hosts. OK... but this is not really a good test for whether the effect varies between hosts. The most important problem with this approach is that a non-significant result does not indicate that there is no effect, rather that we are unable to say with confidence that there is an effect. Furthermore, the analysis is not asking directly whether there is a difference between hosts and it could be affected by (for example) smaller sample sizes in one host than the other. Fitting a model and testing for an interaction is a far, far better way of asking whether an effect varies between groups. Nonetheless, this is a common statistical error and you are likely to see analyses reported where tests like this are used in this way —-- don't fall into this trap yourself. ### Publication standard plot The plot we drew above is OK but not really up to scratch for a publication. Here's some code, similar to that which we used earlier for the mouse movement data, which will do a rather better job. It's fairly complex but feel free to have a look through it if you'd like to know more. I've also added some code for producing a similar figure in ggplot2 below. Again, this is only really here as an example and if you're not familiar with ggplot2 then just ignore it. ```{r fig.width = 6, fig.height = 6} library(gplots) # Plot means and 95% CIs. Note no x-axis (xaxt = "n") or x-axis label # (xlab = "") plotted, no lines connecting the means (connect = FALSE), # no sample size indicators (n.label = FALSE) # and we are drawing the means and CIs larger and thicker # than the defaults (cex = 2.5, barwidth = 2). plotmeans( log(CI) ~ interaction(Passage,Host), data = bacteria1, n.label = FALSE, pch = 16, cex = 2.5, col = "darkblue", barcol = "darkblue", barwidth = 2, connect = FALSE, xlab = "", ylab = "Log Competitive Index", xaxt = "n", ylim = c(-3, 10) ) # Use points function to draw in the data. # The extra 55 at the end of the hex code for # the colour makes the points semitransparent. # In the previous plot we had a value of 88 for this # but because thre is more overplotting here I made # the points more transparent. # This horror show of nested parentheses: # jitter(as.numeric(interaction(Host:Passage)), 0.6), # uses the interaction() function to generate a factor # with all the combinations of levels from the interaction # of Host and Passage, then converts it to numerical # values, then adds some noise with the jitter() function # in order to reduce the amount of overplotting for the # individual data points in the final graph points( log(CI) ~ jitter(as.numeric(interaction(Passage,Host)), 0.6), data = bacteria1, pch = 16, col = "#00777755", cex = 1.2 ) # Draw in the axis, the axis ticks and the top line of # the tick labels # padj = moves the text down a little # The \n in the labels is an 'escape character' that # adds a carriage return axis( side = 1, at = 1:4, padj = 0.5, labels = c("4 \nPassages", "18 \nPassages", "4 \nPassages", "18 \nPassages") ) # Use axis to draw in the "Female" and "Male" labels # at the appropriate place. Line = 1.5 moves the axis down # and lwd = 0 means the axis line is not drawn. # expression('myd88' ^ "\u2212" ) gives us the label with a # superscript minus sign. the \u2212 is a way of usin the # unicode symbol for a minus sign which loks a bit better. axis( side = 1, at = c(1.5, 3.5), line = 2.2, labels = c(expression('myd88' ^ "\u2212" ), "Wild Type"), lwd = 0 ) ``` ### GGplot2 example `ggplot()` won't calculate the 95% CIs for us like `plotmeans()` does so we'll need to do these ourselves. ```{r} # Function to calculate the confidence interval distance ci95<- function(x) { return(qt(0.975, df = length(x) - 1) * sqrt(var(x)) / sqrt(length(x))) } # calculate means and the size of the CIs for each factor level combination means <- with(bacteria1, tapply( X = log(CI), INDEX = interaction(Host,Passage), FUN = mean )) CIs <- with(bacteria1, tapply( X = log(CI), INDEX = interaction(Host,Passage), FUN = ci95 )) # Calculate upper and lower confidence intervals # for each mean UpperCIs <- means + CIs LowerCIs <- means - CIs # Assemble it into a data frame Host <- c("myd88", "WT", "myd88", "WT") Passage <- c(4,4,18,18) data1 <- data.frame(Host, Passage, means, UpperCIs, LowerCIs) # Clean up rm(means, CIs, UpperCIs, LowerCIs, Host, Passage) # Check the data frame print(data1) ``` Now that we have our mini data frame with the important values we can plot a graph with `ggplot()` ```{r} # set up the plot basics p1 <- ggplot(data = data1, aes( x = Host, y = means, colour = as.factor(Passage) )) + # Specify the y axis limits ylim(-3, 10) + # Add the points for the means geom_point(position = position_dodge(width = 0.75), size = 3) + # Add the errorbars geom_errorbar( aes( x = Host, ymin = LowerCIs, ymax = UpperCIs, colour = as.factor(Passage) ), position = position_dodge(width = 0.75), width = 0.1 ) + # Add in the data with some transparency and jitter geom_point( data = bacteria1, aes( x = Host, y = log(CI), colour = as.factor(Passage) ), alpha = 0.4, position = position_jitterdodge(), show.legend = FALSE ) + # Set the colours scale_color_manual(values = c("firebrick4", "steelblue")) + # Avoid the nasty default theme theme_bw() + # Y axis label and the caption for the legend labs(y = "Log competitive index", colour = "Passage \n number") # Plot the graph p1 ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/16_Multi_factor_ANOVA/Linear_models_4_Multi_factor_ANOVA.Rmd
--- title: "Linear models 5: Multiple Regression" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Linear models with more than one continuous explanatory variable: how to fit models, check diagnostics and interpret the output. --- ```{r setup, include=FALSE} library(learnr) library(gplots) library(ggplot2) knitr::opts_chunk$set(echo = TRUE, comment = NA, message = FALSE, fig.width = 5, fig.height = 5) load("finch_colours.rda") load("cricket_song.rda") ``` ## Multiple continuous explanatory variables. In much the same way that we can extend the simple ANOVA type linear model to include multiple explanatory factors, we can extend the simple regression type linear model to include multiple continuous explanatory variables --- an analysis that's often called *multiple regression*. This has the same advantages as fitting multiple explanatory factors: we carry out our analysis as a single model fitting exercise rather than as a series of individual analyses; we can detect effects that would otherwise not be found; we can control for the effects of variables statistically; and we can check for interaction terms between our variables. Much of the procedure for dealing with multiple continuous explanatory variables is similar to that for multiple factors so we can jump straight into some exercises. Note that some of the material here is a bit advanced and it's assumed that you've done the earlier tutorials in the series or are otherwise familiar with basic linear modelling in R. If that's not the case then you might want to do the first three tutorials before attempting this one. ## Exercise 1: house finch colouration and mitochondrial function The data we will analyse here come from a study published in 2019 by Geoffrey Hill of Auburn University and co-workers^1^. This research was based on the observation that many animals use visual signals with carotenoids as the pigment in question, and it's now well known that in many cases the redder the visual signal the better the signaller performs in terms of acquiring mates or competing for resources. The mechanism linking red carotenoid signalling to individual quality is not known, however, but Hill *et. al.* followed previous work which had found that many bird species ingest yellow carotenoids but then oxidise these to red pigments, probably in the mitochondria. One part of Hill *et al.*'s investigation into this involved measuring the hue of the red feathers and a series of measures of mitochondrial function in 36 male house finches *Haemorhous mexicanus* at a time when they were moulting and therefore actively producing red carotenoids. ![](images/house_finches.jpg){width="400"} **Figure 1** Male and female house finches showing the sexually dimorphic red colouration of the male. Photo by Donald Willin, released under a [creative commons attribution 2.0 licence](https://creativecommons.org/licenses/by/2.0/). The dataset which we'll analyse is a subset of a larger dataset which was analysed by Hill *et al.*. The dataframe is loaded and called `finch_colours`. We'll start by checking its structure. ```{r} str(finch_colours) ``` We have `Hue` as a response variable. Higher values of `Hue` indicate yellower plumage and lower values redder for a particular bird. There are then three measures of mitochondrial function: firstly, `C1RCR` which refers to the "Respiratory Control Ratio", calculated by dividing the maximum respiration rate by the basal rate. Secondly we have `C1MMP` which refers to the Mitochondrial Membrane Potential, and finally there is `PGC_1a`, a measure of the levels of a protein which activates transcription in mitochondrial biogenesis, so a measure of the rate at which mitochondria are replaced. As always we'll start with some exploratory graphs. In this case we'll draw scatterplots of Hue against our three measures of mitochondrial function. See if you can fill in the missing variables in this code block ```{r fig.height = 10, scatterplots, exercise = TRUE, exercise.lines = 27} par(mfrow = c(3,1)) plot(Hue ~ C1RCR, data = finch_colours, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "Respiratory control ratio") plot(XXXXX ~ XXXXX, data = XXXXX, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "Mitochondrial membrane potential") plot(XXXXX ~ XXXXX, data = XXXXX, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "PGC 1a") par(mfrow = c(1,1)) ``` ```{r scatterplots-hint-1} # What you need to do is to put the relevant # variable names into the formulas in the # second and third plot function calls, and # fill in the name of the data frame in the # data = argument ``` ```{r scatterplots-hint-2} # This is the solution par(mfrow = c(3,1)) plot(Hue ~ C1RCR, data = finch_colours, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "Respiratory control ratio") plot(Hue ~ C1MMP, data = finch_colours, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "Mitochondrial membrane potential") plot(Hue ~ PGC_1a, data = finch_colours, pch = 16, col = "aquamarine4", ylab = "Hue", xlab = "PGC 1a") par(mfrow = c(1,1)) ``` Looking at these plots we can see that there might be relationships between some of these variables and the redness of the bird's plumage. Recalling that high values for `Hue` mean yellower feathers, there might be a positive relationship between Hue and PGC 1a expression, and there might be negative relationships with respiratory control ratio and mitochondrial membrane potential (meaning in the last two cases that "better" individuals with redder feathers have higher values for RCR and MMP). More usefully, there's no indication of anything that might cause problems in our analysis: no wildly implausible or potentially problematic data points and everything looks quite well behaved. One data point for MMP does seem a little low given the distribution of the others but not to the extent that we need to worry much about it. Let's fit a model. Fill in the formula in the `lm()` function call. We want to explain the patterns in `Hue` and we'll fit just the main effects of `C1RCR`, `C1MMP` and `PGC_1a`: we won't test for interaction terms here. We'll use `drop1()` to check on the significance of each explanatory variable. Note that we're using `print()` to get the same output that you'd get from R in the console --- if you were doing this analysis outside a tutorial like this it would not be necessary. ```{r model_1, exercise = TRUE} M1 <- lm(, data = finch_colours) print(drop1(M1, test = "F")) ``` ```{r model_1-hint-1} # Hue is the response variable so that goes on the left of # the tilde. The three explanatory variables go on the # right hand side of the tilde, separated by plus symbols ``` ```{r model_1-hint-2} # This is the solution: M1 <- lm(Hue ~ C1RCR + C1MMP + PGC_1a, data = finch_colours) print(drop1(M1, test = "F")) ``` According to our deletion test two of our three explanatory variables have statistically significant effects on the redness of the finch's feathers. The third, mitochondrial membrane potential, does not. Depending on our opinion of the various arguments surrounding different model selection techniques, we could potentially choose to remove this non-significant term from our model to give us a *minimal adequate model* to describe the patterns in these data (see the tutorial on model selection for more on this). Since `C1MMP` is not a variable which we have introduced into the model to allow us to control for an effect, and it is not a variable that is in the model because it represents some important element of experimental design (e.g. if the design were a blocked one we should include `block` as a factor) we can argue that removing it is justified. Fit a second model called `M2` and assess the significance of the explanatory variables using `drop1()` ```{r model_2, exercise = TRUE} ``` ```{r model_2-hint-1} # Just use the code from the previous fit with C1MMP # removed and M1 replced by M2 # # Don't forget to change the name of the model in the # drop1() function call ``` ```{r model_2-hint-2} # This is the solution M2 <- lm(Hue ~ C1RCR + PGC_1a, data = finch_colours) print(drop1(M2, test = "F")) ``` This leaves us with a model with two significant explanatory variables. We should check our diagnostic plots now. ```{r echo = FALSE} M2 <- lm(Hue ~ C1RCR + PGC_1a, data = finch_colours) ``` ```{r} plot(M2, which = 1:2) ``` Have a think about the diagnostic plots and try to answer the questions. ```{r diagnostics-quiz, echo=FALSE} question( "Which of the following statements is correct? More than one answer can be correct.", answer("The residuals versus fitted values plot shows evidence of strong heteroskedasticity", message = "Answer 1. Heteroskedascticity means that the variance is varying across the range of fitted values, often leading to a plot with a characteristic wedge shape. There is no suggestion of that here"), answer("It is not clear but there is some indication of negative skew", message = "Answer 2. There's nothing here to indicate negative skew, which would show as more very negative residuals and a convex curve of points on the qq-plot"), answer("The qq-plot shows that there is some positive skew in the residuals", correct = TRUE), answer( "The qq-plot shows that the relationship between at least one of the explanatory variables and the response variable is non-linear", message = "Answer 4. The qq-plot can only tell you about the distribution of the residuals, not about whether the shape of the relationship between response and explanatory variables is linear", answer("The presence of some residuals with high positive values which are not mirrored in the negative residuals indicates that there might be some positive skew in the residuals", correct = TRUE) ) ) ``` <br><br> <details><summary>**Click here for more on the diagnostics**</summary> Rule 1 of dealing with real data is that your diagnostic plots are rarely going to give you a clear answer about the distributions of your residuals. This is a prime example of this. Both of these plots show us that there is a small amount of positive skew in our residuals: in the plot of residuals versus fitted values you can see some residuals with rather high positive values, and the qq-plot has the characteristic shape associated with positive skew, with the points following a curve with the smallest and the largest residuals both having more positive values than would be predicted if the residuals were following a normal distribution. What isn't clear is whether this amount of skew is likely to making a serious impact on our fitted model. We can check a histogram of the residuals to see if this gives any further enlightenment. ```{r} hist(M2$residuals, breaks = 10) ``` These are clearly somewhat positively skewed. Overall the amount of skew in the residuals is not especially severe, but we should probably try to do something about it. We could try a square root transformation of these data but, to cut a long story short, a square root transformation doesn't really rectify the problem so a log transformation is necessary. See if you can change this code to log transform the response variable so we can see what this does to the model with all three explanatory variables. There are no zeros in the `Hue` variable so there's no need to add a constant. ```{r log_model1, exercise = TRUE} L1 <- lm(Hue ~ C1RCR + C1MMP + PGC_1a, data = finch_colours) print(drop1(L1, test = "F")) ``` ```{r log_model1-hint-1} # Just use the log() function on the Hue variable ``` ```{r log_model1-hint-2} # This is the solution L1 <- lm(log(Hue) ~ C1RCR + C1MMP + PGC_1a, data = finch_colours) print(drop1(L1, test = "F")) ``` `C1MMP` remains non-significant but `PGC_1a` is now slightly the wrong side of 0.05. Let's simplify the model by removing the variable with the least support,`C1MMP`, and see what that looks like. ```{r} L2 <- lm(log(Hue) ~ C1RCR + PGC_1a, data = finch_colours) print(drop1(L2, test = "F")) ``` OK, since we removed the less well supported term from the model `PGC_1a` is statistically significant in this model, although not especially so. Let's re-check the diagnostics to see if the log transformation has sorted out the error distribution. ```{r} plot(L2, which = 1:2) ``` Things are better. The residuals versus fitted values plot no longer has any indication of skew, but there is a hint of a curve from the non-parametric smoother that R kindly draws across it in red. Is that indicative of a serious issue? Probably not in this case since the appearance of a curve is really being caused by a few negative residuals at the highest and lowest fitted values. If you were concerned about this you could try fitting a model with a quadratic term for one of the explanatory variables but we'll leave it there. The qq-plot shows that the log transform has fixed the issues with the most positive residuals although there is still some deviation from the line from the most negative residuals. </details> <br><br><hr> 1. Hill, G.E., Hood, W.R., Ge, Z., Grinter, R., Greening, C., Johnson, J.D., Park, N.R., Taylor, H.A., Andreasen, V.A., Powers, M.J., Justyn, N.M., Parry, H.A., Kavazis, A.N. & Zhang, Y. (2019) Plumage redness signals mitochondrial function in the house finch. Proceedings of the Royal Sociaty B: Biological sciences, 286, 20191354. ## Interpreting the fitted model Take a look at the `summary()` output for the L2 model. ```{r prepare-L2} L2 <- lm(log(Hue) ~ C1RCR + PGC_1a, data = finch_colours) ``` ```{r summary_log_model2, exercise = TRUE, exercise.setup = "prepare-L2"} ``` ```{r summary_log_model2-hint-1} # This is the solution summary(L2) ``` Much as the coefficients table for a simple linear regression gives us values we can use to generate the equation for a straight line, the coefficients here can give us a more complex equation relating our explanatory variables to the response variable. For a model with two continuous explanatory variables, the equation for the fitted model is: $$y = a + b \times x_1 + c \times x_2$$ where $a$ is the intercept, $b$ is the slope for variable $x_1$ and $c$ is the slope for variable $x_2$. This equation allows us to generate predicted values from our model. You will often see linear model equations written with a somewhat different notation: $$y_i = \beta_0 + \beta_1 x_{1i} + \beta_2 x_{2i} + \epsilon_i, \quad i = 1...n$$ This is the formal notation for a linear model with two explanatory variables. It looks rather more complicated but the fundamentals are the same: $\beta_0$ is the intercept and $\beta_1$ and $\beta_2$ are the slopes for the two variables $x_1$ and $x_2$. The subscript $i$ you can see refers to the "i-th" value in the data set. As an example, if we were thinking about the the third observation then $i$ would be three. $y_i$ in this case would refer to the value of the third observation in the data set, $x_{1i}$ means the third value of $x_1$ and $x_{2i}$ the third value of $x_2$. Finally, $\epsilon_i$ is the *error* term: so each of the $i$ values of $y$ in the dataset will deviate from the predicted value by some degree, and this part of the equation represents that deviation. For a standard linear model this error is assumed to be drawn from a normal distribution with a mean of zero and a fixed standard deviation, hence the assumptions that the errors in a dataset should be normal and the variance should be equal across the dataset. For the moment we'll stick with the simpler version which will generate the predicted values. We can extract the coefficients from our table above and generate an equation to relate `Hue` to `C1RCR` and `PGC_1a`: $$log (Hue) = 3.03 - 0.262 \times C1RCR + 7.10 \times PGC\_1a.$$ Whereas simple linear regression gives us a predicted straight line, in this case the equation describes a flat surface, and more complicated models with more than two explanatory variables describe more complex surfaces. These are not easy to visualise: as a general rule even "3D" graphs are poor ways to display data and plotting graphs in higher dimensions tends to be problematic. One option you'll often see is to plot the response variable against each explanatory variable separately and then to show a fitted line from a simple linear regression as an illustration of the way the two are related. We'll do this for our example: I've given you the code for the first plot, see if you can generate the code for the second. ```{r plot_variables1, exercise = TRUE, exercise.lines = 35, fig.width = 6, fig.height = 4} # Two plots side by side par(mfrow = c(1,2)) # plot log hue against C1RCR plot(log(Hue) ~ C1RCR, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue", xlab = "C1RCR level") # use abline() to draw the line from a # simple linear regression abline(lm(log(Hue) ~ C1RCR, data = finch_colours), col = "blue", lwd = 2) # plot log hue against PGC_1a # use abline() to draw the line from a # simple linear regression # reset plot window to a single plot par(mfrow = c(1,1)) ``` ```{r plot_variables1-hint-1} # You can use the code for the first plot # and the abline function # but you need to change the x-variable # name and also the x-axis label ``` ```{r plot_variables1-hint-2} # You can use the code for the first plot # and the abline function # but you need to change the x-variable # name and also the x-axis label # # Make sure you have commas between each argument # and that you have matching brackets ``` ```{r plot_variables1-hint-3} # This is the solution # Two plots side by side par(mfrow = c(1,2)) # plot log hue against C1RCR plot(log(Hue) ~ C1RCR, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue", xlab = "C1RCR level") # use abline() to draw the line from a # simple linear regression abline(lm(log(Hue) ~ C1RCR, data = finch_colours), col = "blue", lwd = 2) # plot log hue against PGC_1a plot(log(Hue) ~ PGC_1a, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue", xlab = "PGC 1a level") # use abline() to draw the line from a # simple linear regression abline(lm(log(Hue) ~ PGC_1a, data = finch_colours), col = "blue", lwd = 2) # reset plot window to a single plot par(mfrow = c(1,1)) ``` This is certainly illustrative and in this case it gives a reasonable impression of the way these two variables relate to feather hue, but it's not a great representation of the actual fitted model. Because variables in a linear model are fitted to the data with effects of other variables *partialled out*, you can find that including your variables in linear models can reveal effects which are substantially different from those that you would see if you analysed your variables separately, so be careful if you present your data like this. If you want to show the fitted model and the data more accurately, you can plot the data with the effect of the first variable removed. Our equation for our fitted model in this case is $$log (Hue) = 3.03 - 0.262 \times C1RCR + 7.10 \times PGC\_1a.$$ So we can generate a new variable which is $$log(Hue) - (3.03 - 0.262 \times C1RCR).$$ To understand what this new variable represents, recall that when a linear model is fitted with more than one explaantory variable, the sums of square are calculated sequentially. So for a two variable model, the variance is firstly divided into the treatment variance for the first variable in the model formula (the variance in the data explained by that variable) and the remaining variance which that first variable can't explain. This can be thought of as being equivalent to what the the residuals would be from fitting that first variable. The second variable is then fitted to to these data, so you can think of this as fitting the second variable to the response variable once it's been adjusted to remove the effect of the first variable --- statisticians would say that the effects of the first variable have been *partialled out* from the response variable. This allows us to partition the remaining variance into the treatment variance for the second variable and the error variance. What we're calculating here is essentially those adjusted values for `log(Hue)` with the effects of `C1RCR` partialled out. Now that we have these adjusted data we can plot the effect of PGC 1a against that, which will give a better visualisation of the effect of this variable as predicted by our model. Here's a code framework: see if you can fill in the missing parts to generate a new plot showing the effect of PGC 1a. To draw in the effect of PGC 1a you need a line with an intercept of zero and a slope equal to the coefficient for PGC 1a in the model. ```{r plot_variables2, exercise = TRUE, exercise.lines = 15} # Generate new variable with the effects of C1RCR removed newvar <- XXXXX # plot the new variable against PGC 1a plot(XXXX ~ XXXX, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue minus the effect of C1RCR", xlab = "XXXX") # use abline() to draw the line from a # simple linear regression abline(a = X, b = X, col = "blue", lwd = 2) ``` ```{r plot_variables2-hint-1} # To generate newvar: newvar <- log(finch_colours$Hue) - (3.03 - 0.262 * finch_colours$C1RCR) ``` ```{r plot_variables2-hint-2} # For the plot plot(newvar ~ PGC_1a, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue minus the effect of C1RCR", xlab = "PGC 1a") ``` ```{r plot_variables2-hint-3} # For the line abline(a = 0, b = 7.1, col = "blue", lwd = 2) ``` ```{r plot_variables2-hint-4} # This is the solution newvar <- log(finch_colours$Hue) - (3.03 - 0.262 * finch_colours$C1RCR) plot(newvar ~ PGC_1a, data = finch_colours, pch = 16, col = "steelblue", ylab = "Log of Hue minus the effect of C1RCR", xlab = "PGC 1a") abline(a = 0, b = 7.1, col = "blue", lwd = 2) ``` Finally, we need to interpret these results in terms of the biology of the system. Bearing in mind that low scores of Hue indicate male birds with redder plumage, and redness seems to be acting as an indicator of 'condition' in these males, what do these results tell us? For the full story you're best off reading the paper, but briefly: * Overall there does seem to be a link between mitochondrial functioning and feather colour. The data we have here are correlational, of course, so we have to be cautious in our interpretation because we don't know much about causality. * More yellow plumage is associated with high levels of PGC 1a. This protein is associated with mitochondrial biogenesis and this result suggests that perhaps birds with redder feathers have lower rates of mitochondrial turnover. * High RCR values are associated with redder males. RCR is the ratio of maximum respiratory rate to resting rate, and further analysis by the paper's authors showed that the increased RCR in redder males was a consequence of lower resting rate rather than higher maximum rate, suggesting that "better" birds with redder feathers are paying less of a cost for supporting basal respiration. ## Exercise 2: interactions and curves **Warning** This example gets quite complex and technical. In 2017 Tom Houslay and co-workers^1^ published a study of the relationship between, among other things, condition, resource availability (food quality) and sexual signalling (calling song) in the decorated cricket, *Gryllodes sigillatus*. Male crickets "sing" to attract females by rubbing specialised areas of their wings together, an activity which is energetically costly. As part of their study, Houslay *et al.* fed freshly eclosed^2^ adult male crickets on synthetic food with a 1:8 protein to carbohydrate ratio but which varied in total nutritional content from 12% to 84%, with the amount of nutritional content increasing in 12% increments between these limits. The crickets were weighed at the start of the experiment and again after a week, and the amount of time they spent singing during the week was recorded. The dataframe is loaded as `cricket_song`. We'll begin by checking its structure. ```{r} str(cricket_song) ``` The important variables from our point of view are `Delta_smi` which is the change in weight over the first week, `Diet` which gives the percentage nutritional content of the food the animal was fed and `Song_week1` which is the total amount of time the cricket spent singing in the first week of the experiment. We're interested in how the change in weight of these animals relates to their investment in signalling and to their diet. As always, we'll start by looking at some plots of our data. We'd like a scatterplot of `Delta_smi` against `Diet`, and a second one of `Delta_smi` against `Song_week1`. ```{r scatterplot_1, exercise = TRUE, fig.width = 7} # Two plots side by side par(mfrow = c(1,2)) # Plot of Delta_smi versus diet # Plot of Delta_smi versus Song_week1 par(mfrow = c(1,1)) ``` ```{r scatterplot_1-hint-1} # You need to use the plot() function with the # variables specified by a formula, so the # y-axis variable first, then a tilde ~, then # the x-axis formula. Tell plot() which # dataframe to look in with the data = # argument and label the x- and y- axes # with xlab = "NAME" and ylab = "NAME". # Don't forget to put commas between all # your arguments ``` ```{r scatterplot_1-hint-2} # Here is a code framework. You'll # need one plot() command for Delta_smi # versus Diet, and a second one for # Delta_smi versus Song_week1 plot(Y-VARIABLE ~ X-VARIABLE, data = DATAFRAME, xlab = "X AXIS", ylab = "Y AXIS") ``` ```{r scatterplot_1-hint-3} # Here is the code for the first plot plot(Delta_smi ~ Diet, data = cricket_song, xlab = "Diet nutritional content (%)", ylab = "Week1 Change in weight (g)") ``` ```{r scatterplot_1-hint-4} # Here is the solution # Two plots side by side par(mfrow = c(1,2)) # Plot of Delta_smi versus diet plot(Delta_smi ~ Diet, data = cricket_song, xlab = "Diet nutritional content (%)", ylab = "Week1 Change in weight (g)") # Plot of Delta_smi versus Song_week1 plot(Delta_smi ~ Song_week1, data = cricket_song, xlab = "Time spent singing (s)", ylab = "Week1 Change in weight (g)") par(mfrow = c(1,1)) ``` Have a look at these scatterplots and try to answer these questions ```{r cricket_scatterplot_quiz1, echo = FALSE} quiz( caption = "Cricket data exploratory graphs quiz", question("Which of the following statements about the graph of weight change versus diet is correct? More than one answer can be correct.", answer("Some crickets lost weight and some gained weight no matter what the diet", correct = TRUE), answer("Change in weight is positively correlated with the nutritional content of the diet", correct = TRUE), answer("Weight change increases as a simple straight line relatioship with diet nutritional content", message = "Answer 3. Weight change does increase with diet, but it's not clear from this plot whether a straight line will be adequate to describe the relationship. Looking at the graph it seems that the slope relating the two variables is less for higher values of diet, but it's not clear."), answer("There are no obviously anomalous data points and the distribution of weight change data doesn't appear heavily skewed or otherwise problematic", correct =TRUE), answer("Because the x-variable consists of discrete values we should fit it as a factor rather than as a continuous variable", message = "Answer 5. There's nothing wrong with fitting a variable like this as a continuous variable. We coud fit it as either a factor or a continuous variable, but since we're interested in the way that weight change changes with nutrition it makes more sense to fit is as a continuous variable, rather than as a factor which would give us a fitted mean for each value but no indication of the strength or direction of change in weight with increases in nutrition") ), question("Which of the following statements about the graph of weight change versus time spent singing is correct?", answer("There's a negative relationship between time spent singing and weight change", message = "Answer 1. You can't really see what form, if any, the relationship takes because the time spent singing is so heavily skewed"), answer("There's a positive relationship between time spent singing and weight change", message = "Answer 2. You can't really see what form, if any, the relationship takes because the time spent singing is so heavily skewed"), answer("There are some data points with anomalously high values for the time spent singing and these outliers should be removed before further analysis", message = "No, the variable shows obvious positive skew and these high-value datapoints are not obviusly anomalous given the nature of these data"), answer("The strong positive skew in the time spent singing makes it hard to see any patterns in these data", correct = TRUE) ) ) ``` <br><br> <details><summary>**Click for more on the patterns detected in the scatterplots**</summary> Looking at these scatterplots we've seen two issues that might concern us. Firstly, there is a suggestion that the relationship between diet and weight change might not conform to a straight line. We'll bear this in mind when we fit our model --- if this is the case we should see evidence for it in the diagnostic plots. Secondly, one of our explanatory variables, `Song_week1`, is strongly positively skewed. We can confirm this by plotting a histogram: ```{r} hist(cricket_song$Song_week1, breaks = 10) ``` Is this a problem? It's important to understand that there's no requirement for explanatory variables to follow, for example, a normal distribution: this only applies to the residuals of the response variable once an appropriate model has been fitted. Strong skew like this, however, tends to obscure any relationship between the response and the explanatory variable, and those few data points with very large values are likely to have a disproportionate effect n the overall model fit. It's best in this case to transform these data to reduce the skew, and with this degree of skew then a log-transformation is approriate. There are some zeros in the data so we will add a constant, in this case 1, to our variable before transforming it. Let's see what this does: ```{r} hist(log(cricket_song$Song_week1+1), breaks = 10) ``` This distribution is still pretty weird, with a small amount of negative skew in the non-zero values and a good number of zeros as well. You could argue that this is evidence that there are really two classes of cricket: 'singers' and 'non-singers' and that it's not appropriate to class them all together, but that's something for a different analysis. Is this OK as a distribution for an explanatory variable? Yes, probably. It would be nicer to have a more even distribution, but the linear model doesn't make any assumptions about the distribution of the explanatory variables and we've no longer got those few datapoints with very large values so we're probably OK using these data. </details> <br><br><hr> 1. Houslay, T.M., Houslay, K.F., Rapkin, J., Hunt, J. & Bussière, L.F. (2017) Mating opportunities and energetic constraints drive variation in age‐dependent sexual signalling (ed C Miller). Functional ecology, 31, 728–741. 2. Eclosion refers to the moult that an insect undergoes when it becomes an adult. ## Fitting a model In the previous example we fitted a model with only the main effects of our explanatory variables. In this case we would like to know whether the relationship between weight gain and diet depends on the amount of time the cricket spent singing --- in other words, does the change in weight from having a good or a poor diet depend on the amount of energy the cricket invested into signalling? To test this we need to fit a model with an interaction between `Diet` and `Song_week1` as well as the main effects. Try to do this, and then check the significance of the interaction using `drop1()`. Call your model `C1`. <br> Don't forget you need to add 1 to the Song_week1 variable and log transform it. <br> ```{r cricket_model1, exercise = TRUE, paged.print=FALSE} ``` ```{r cricket_model1-hint-1} # To fit a model with an interaction term you # can either use the format # # variable1 + variable2 + variable1:variable2 # or # variable1 * variable2 # # The second one in this case is # exactly the same as the first but if there # are more variables it will fit all of the # possible interaction terms. ``` ```{r cricket_model1-hint-2} # Don't forget to specify the data frame with # data = # # For the drop1() function call you need to # say what test to use with test = "F" # # Make sure there's a comma between your arguments # # To log transform the Song_week1 variable it's # log(Song_week1 +1) ``` ```{r cricket_model1-hint-3} # Here is the solution C1 <- lm(Delta_smi ~ Diet * log(Song_week1 +1), data = cricket_song) print(drop1(C1, test = "F")) ``` Hopefully you've got a highly significant interaction term. If you haven't then check whether you log transformed the `Song_week1` variable. Recall that `drop1()` only carries out a test on the model variables where it makes sense to do so, and since both our variables are included in the interaction term it is not meaningful to do a statistical test on the main effects. The next thing we need to do is to check the diagnostic plots. As usual we're really mostly interested in the first two so use the `which = 1:2` argument. ```{r prepare-model1, echo = FALSE} C1 <- lm(Delta_smi ~ Diet * log(Song_week1 +1), data = cricket_song) ``` ```{r cricket_diagnostics1, exercise = TRUE, exercise.setup = "prepare-model1"} ``` ```{r cricket_diagnostics1-hint-1} # Run plot() with your model object # as the first argument and which = 1:2 # as the second ``` ```{r cricket_diagnostics1-hint-2} # This is the solution plot(C1, which = 1:2) ``` Have a look at the diagnostic plots and then try to answer the questions. ```{r cricket_diagnostics_quiz1, echo = FALSE} quiz( caption = "Cricket data diagnostic plots quiz", question("Which of the following statements about the residuals versus fitted values is correct? More than one answer can be correct.", answer("The plot shows clear evidence for positive skew in the residuals", message = "Answer 1. There's nothing in this plot to suggest any skew in the residuals"), answer("datapoints 442, 447 & 565 have the largest absolute residual values but there is nothing to indicate that there is anything anomalous about them", correct = TRUE), answer("The plot shows no indication of heteroskedasticity", correct = TRUE), answer("Based on this plot we can conclude that there is no problem with non-independence between data points", message = "Answer 4. Diagnostic plots such as this cannot tell us about independence or otherwise of data points. This is something that you have to consider when thinking about study design."), answer("There is a slight tendency for the residuals for intermediate fitted values to have more positive values than those for low or high fitted values, indicate that the relationship between the explanatory variables and the response variable might be curved to some degree", correct = TRUE) ), question("Which of the following statements about the qq-plot of residuals is correct?", answer("The residuals apppear to follow a normal distribution very closely"), answer("The pattern of residuals on the qq-plot indicates some degree of positive skew", message = "Answer 2. Positive skew is indicated by both the larger positive and negative values plotting above the line of unity"), answer("There are some data points with anomalously large residuals and these outliers should be removed before further analysis", message = "No, there's nothing here to indicate that any of these data are anomalous"), answer("The larger positive and negative residuals are both plotting below the line of unity, indicating some negative skew in the residuals", correct = TRUE) ) ) ``` <details><summary> **Click here for more on what to do about the patterns seen in the diagnostic plots**</summary> The first plot, the residuals versus the fitted values, is mostly fine and shows, for example, no heteroskedasticity. There is a suggestion of curvature in the distribution of residuals, however, with high and low fitted values tending to be slightly more negative and intermediate fitted values being slighty more positive. It's not a big effect but it is there and it's also visible in the smoother line that R draws in the plot (the red line). The second diagnostic plot, the qq-plot, shows us that the residuals are mostly as expected from a normal distribution but the highest positive and lowest negative values are both a bit below what might be expected. This tells us that there is a small degree of negative skew in the residuals. Given our large sample size and the general robustness of the linear model this isn't a big concern. Our major concern about our fitted model, then, is that there might be some curvature in the relationship between our explanatory variables and the response variable. What to do? Since we already noticed a hint of this earlier in the scatterplot of change in weight versus diet we will see if this can be addressed by adding a *quadratic* term for diet: we will add an extra explanatory variable which is simply diet^2^. To do this we need to use the `I()` function within our model formula to tell R to treat what's contained in the brackets as standard R code rather than part of a formula. This is because the `^2` notation means something different when it's part of a model formula than when it's used in standard R. Here's the model with the quadratic term added. ```{r} C2 <- lm(Delta_smi ~ Diet + I(Diet^2) + log(Song_week1 +1) + Diet:log(Song_week1 +1), data = cricket_song) print(drop1(C2, test = "F")) ``` You can see that the quadratic term is highly significant on the deletion test, indicating that a model without it has significantly less explanatory power. Has this fixed the curvature in the diagnostic plot? ```{r} plot(C2, which = 1:2) ``` Not quite... but it's much better and we can probably not worry about this any more. The qq plot is also somewhat improved, especially for the high positive residuals. </details> ## Interpreting the model Our final model, C2, with the quadratic term, is quite a complex one and will need a fair amount of interpretation. Let's have a look at the summary output. ```{r prepare-C2, echo = FALSE} C2 <- lm(Delta_smi ~ Diet + I(Diet^2) + log(Song_week1 +1) + Diet:log(Song_week1 +1), data = cricket_song) ``` ```{r cricket_model_summary, exercise = TRUE, exercise.setup = "prepare-C2"} ``` ```{r cricket_model_summary-hint-1} # Use the summary() function ``` ```{r cricket_model_summary-hint-2} # Here's the solution summary(C2) ``` <br> What does that coefficients table mean? It's complicated by the presence of `Diet` in no fewer than three of the 5 estimates presented here. That doesn't really matter to the model fitting process though, it just treats each term as a separate explanatory variable. You can see this if we write down the equation for this model, which is: $$y_i = \beta_0 + \beta_1 x_{1i} + \beta_2 x_{2i} +\beta_3 x_{3i} + \beta_4 x_{4i} + \epsilon_i, \quad i = 1...n$$ The five coefficients in the table correspond to $\beta_0$ to $\beta_4$, and the model terms are $x_1$ to $x_4$, with: <br><br> `Diet` = $x_1$, `log(Song_week1 + 1)` = $x_2$ `I(Diet^2)` = $x_3$ and `Diet:log(Song_week1 + 1)` = $x_4$. To better understand the interaction term, it's worth knowing that for an interaction between two continuous variables it is the equivalent of fitting a model with a term which is simply the two variables multiplied by each other. To illustrate this, we can generate some new variables to take the place of the `I(Diet^2)` and interaction terms and compare the coefficient estimates. ```{r echo = FALSE} C2 <- lm(Delta_smi ~ Diet + I(Diet^2) + log(Song_week1 +1) + Diet:log(Song_week1 +1), data = cricket_song) ``` ```{r} # Generate new variables diet_squared <- cricket_song$Diet^2 int <- cricket_song$Diet * log(cricket_song$Song_week1 + 1) # Generate coefficients for the model with new variables coefs1 <- coef(lm( Delta_smi ~ Diet + diet_squared + log(Song_week1 + 1) + int, data = cricket_song )) # Generate coefficients for model C2 coefs2 <- coef(C2) # Put them together and make a table coefs_table <- cbind(coefs1, coefs2) colnames(coefs_table) <- c("Example coefficients", "C2 coefficients") rownames(coefs_table) <- c("Intercept", "Diet", "diet_squared / I(Diet^2)", "log(Song_week1 + 1)", "int / Diet:log(Song_week1 + 1)") knitr::kable(coefs_table) ``` You can see that the coefficients are exactly the same whether we enter our interaction as such in the formula or whether we generate our own interaction term by multiplying the two variables together. Hopefully that's helped you understand how the fitted model works. Now let's look at those coefficients again and work out what the model is telling us. ``` Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -1.524e-02 4.186e-03 -3.642 0.000297 *** Diet 7.974e-04 1.695e-04 4.704 3.25e-06 *** I(Diet^2) -6.789e-06 1.721e-06 -3.946 9.03e-05 *** log(Song_week1 + 1) -7.879e-04 6.435e-04 -1.224 0.221372 Diet:log(Song_week1 + 1) 4.950e-05 1.146e-05 4.318 1.88e-05 *** ``` ### Interaction between two continuous variables Our model can be thought of as having two separate things going on. The first is the relationship between log(Song_week1 + 1) and Delta_smi (for ease of reading I'll refer to these as "song effort" and "weight change" from now on). This is less complicated than the relationship between weight change and diet which includes the diet^2^ variable, and we'll deal with this next. The crucial thing to remember about interactions between two continuous variables is that the interaction means that *the slope relating one variable to the response variable depends on the value of the other variable*. We can illustrate this by calculating the slope relating song effort and weight change for different values of diet. The coefficient for the interaction term tells us the change in slope for song effort at any given value of diet, so for any given value of diet, the slope for song effort is equal to: *Coefficient for Song effort + Coefficient for the interaction x the value of diet* or $$slope = -0.0007879 + diet \times 0.0000495$$ So for a low value of diet, for example 12: $$slope = - 0.0007879 + 12 \times 0.0000495 = - 0.0007879 + 0.000594 = -0.0001939$$ for an intermediate value, say 48, the slope is: $$slope = - 0.0007879 + 48 \times 0.0000495 = - 0.0007879 + 0.002376 = 0.001588$$ and for a high value, say 84: $$slope = - 0.0007879 + 84 \times 0.0000495 = - 0.0007879 + 0.004158 = 0.00337.$$ You can see that the slope is changing from slightly negative to positive and then becoming more steeply positive as the diet becomes better. We can visualise this by plotting scatterplots of weight change versus song effort with each value of Diet in separate panels and fitting a line to each. We'll do this in ggplot2 because if there's one thing that's easier in ggplot2 than base R graphics it's plotting graphs like this. As we discussed in the previous example I've adjusted the values for song effort by subtracting the prediction from the parts of the model that aren't involved with the interaction that we're interested in, which means we can plot the predicted values and relate them to these data in a meaningful way. ```{r} library(ggplot2) # Remove the effects of diet resids <- cricket_song$Delta_smi - ( -0.01524 + 0.0007974 * cricket_song$Diet - 0.000006789 * (cricket_song$Diet ^ 2) + 0.0000495 * log(cricket_song$Song_week1 + 1) ) # Plot the graph p2 <- ggplot(data = data.frame(cricket_song, resids), aes(x = log(Song_week1 +1), y = resids)) + geom_point(pch = 1, colour = "steelblue") + theme_bw() + labs(x = "Log singing effort", y = "Residual change in weight (g)") + facet_wrap(facets = as.factor(cricket_song$Diet), nrow = 2) + geom_abline(aes(intercept = 0, slope = -0.0007879 + Diet * 0.0000495)) p2 ``` Here you can clearly see the way the slope changes with different values of diet. ### Interaction with a curve What about the relationship between diet and weight change? This is more complicated because the relationship between weight change and diet is described by a curve, rather than a straight line. Whereas the previous interaction meant that the slope relating weight change to song effort was changing with diet, here the interaction means that the shape of curve is changing depending on the value of log(Song_week1 + 1). When log(song_week1 +1) = 0, the effect of the interaction is zero (Diet * 0 = 0) and the curve describing how weight change relates to diet is $$Delta\_smi = -0.01524 + 0.0007947 \times Diet - 0.00000679 \times Diet^2$$ This is a curve with a decreasing slope as we can tell from the negative coefficient. When log(song_week1 +1) = 1, then the relationship between diet and weight change is adjusted by 0.0000495 (the coefficient for the interaction) so the curve is $$Delta\_smi = -0.01524 + (0.0007947 + 0.0000495) \times Diet - 0.00000679 \times Diet^2$$ When log(song_week1 +1) = 2, then the relationship between diet and weight change is adjusted by 2 * 0.0000495 = 0.000095 (the coefficient for the interaction multiplied by 2) so the curve is $$Delta\_smi = -0.01524 + (0.0007947 + 0.000095) \times Diet - 0.00000679 \times Diet^2$$ ... and so on. To visualise this we can plot scatterplots of weight change against diet, just as we did for song effort, with separate panels for different values of song effort. It's not as neat because song effort doesn't come in discrete values like diet, so we have to get a bit involved. What we'll do is split log(Song_week1 +1) into quartiles by making a dummy variable for each quartile, then a second one with the value of log(Song_week1 +1) from the midpoint of each quartile to calculate the fitted curve from. ```{r} # Generate dummy variable to divide song effort into quartiles song_quartiles <- character(length = dim(cricket_song)[1]) song_quartiles[which(log(cricket_song$Song_week1 +1) <= 4.307)] <- "1st quartile" song_quartiles[which(log(cricket_song$Song_week1 +1) > 4.307 & log(cricket_song$Song_week1 +1) <= 6.957)] <- "2nd quartile" song_quartiles[which(log(cricket_song$Song_week1 +1) > 6.957 & log(cricket_song$Song_week1 +1) <= 8.61)] <- "3rd quartile" song_quartiles[which(log(cricket_song$Song_week1 +1) > 8.61)] <- "4th quartile" song_quartiles <- as.factor(song_quartiles) # Midpoint values for each quartile midpoints <- numeric(length = dim(cricket_song)[1]) midpoints[which(song_quartiles == "1st quartile")] <- 4.307/2 midpoints[which(song_quartiles == "2nd quartile")] <- 4.307 + (6.957 - 4.307)/2 midpoints[which(song_quartiles == "3rd quartile")] <- 6.957 + (8.61 - 6.957)/2 midpoints[which(song_quartiles == "4th quartile")] <- 8.61 + (11.351 - 8.61)/2 # Generate predicted values predicted <- (0.0007974 + 0.0000495 * midpoints) * cricket_song$Diet - 0.000006789 * cricket_song$Diet^2 # Remove the effects of song effort resids <- cricket_song$Delta_smi - ( -0.01524 - 0.0007879 * log(cricket_song$Song_week1 + 1) + 0.0000495 * cricket_song$Diet ) # PLot the graph p2 <- ggplot(data = data.frame(cricket_song, song_quartiles, predicted, resids), aes(x = Diet, y = resids)) + geom_point(pch = 1, colour = "steelblue") + geom_line(aes(x = Diet, y = predicted)) + theme_bw() + facet_wrap(facet = song_quartiles, nrow = 2) p2 ``` <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/17_Multiple_regression/Linear_models_5_multiple_regression.Rmd
--- title: "Linear models 6: Factors and continuous variables" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > Linear models with both factors and continuous variables as explaantory variables: how to fit models, check diagnostics and interpret the output --- ```{r setup, include=FALSE} library(learnr) library(ggplot2) library(RColorBrewer) knitr::opts_chunk$set(echo = TRUE, comment = NA, message = FALSE, fig.width = 5, fig.height = 5) load("pinniped.rda") # pinniped$Mate_type <- as.factor(pinniped$Mate_type) load("gnatocerus.rda") palette1 <- brewer.pal(n = 6, name = 'Dark2') ``` ## Models combining factors and continuous explanatory variables You should now be starting to feel familiar with fitting linear models with multiple explanatory variables. Up to this point, however, we've focussed on either multiple factors (tutorial 4) or multiple continuous variables (tutorial 5). The next step, of course, is to fit models with both factors and continuous variables. People often find these difficult to interpret, but the trick is to remember that a model with a factor and a continuous explanatory variable is essentially fitting a series of lines, one for each factor level, with the slope being determined by the relationship between the continuous variable and the response variable. If there is no interaction then the lines will all share a common slope, with the differences in intercept being determined by the factor levels. An interaction term in a model with a factor and a continuous variable means that the lines are fitted with a different slope for each level of the factor. ## Exercise 1: pinniped brains and mating systems Let's start with an fairly straightforward example. This comes from a study of brain size and mating type in pinnipeds (seals, sealions and walruses) published in 2012 by John Fitzpatrick and coauthors^1^. As part of a study of how mating system might impact on the evolution of brain and body size, Fitzpatrick *et al.* collected data on brain and body size and mating system for males and females from 33 species of pinniped. The data are loaded as a dataframe called `pinniped`, and you should start by checking it with the `str()` function. ```{r pinniped_import, exercise = TRUE} ``` ```{r pinniped_import-hint-1} # This is the solution str(pinniped) ``` We're interested here in the relationship between body mass and brain mass for males, and how it relates to the intensity of sexual selection each species experiences. The latter is indicated here by a variable called `Mate_type` which has two values. `mono` indicates species where the males are *monogynous*, pairing with a single female at a time, whereas `poly` indicates species with *polygynous* males which defend groups of females against rival males during the breeding season. Polygynous males are expected to experience stronger sexual selection during the breeding season. <br> ![](images/Cape_fur_seals.jpg){width="500"} Two male pinnipeds (Cape Fur Seals *Arctocephalus pusillus*, actually a species of sealion) competing for territory in which to defend a group of females. Photo copyright Rob Knell 2019. <br> Before going any further we should do some exploratory analysis. In this case a scatterplot of `Male_brain_g` versus `Male_mass_Kg` would be appropriate, and you can colour-code it for `Mate_type` by firstly declaring `Mate_type` to be a factor and then specifying `col = Mate_type` as an argument in your `plot()` function call. Since we're just doing some exploratory analysis we won't worry about a legend --- because `poly` comes after `mono` in the alphabet the `poly` data will be coded red and the `mono` data black. Don't forget to label the axes appropriately. ```{r pinniped_scatterplot1, exercise = TRUE, exercise.lines = 10} ``` ```{r pinniped_scatterplot1-hint-1} # Make `Mate_type` into a factor by using the # as.factor() function: pinniped$Mate_type <- as.factor(pinniped$Mate_type) ``` ```{r pinniped_scatterplot1-hint-2} # This is the solution pinniped$Mate_type <- as.factor(pinniped$Mate_type) plot(Male_brain_g ~ Male_mass_Kg, col = Mate_type, data = pinniped, xlab = "Male mass (Kg)", ylab = "Brain mass (g)") ``` <br><br> What do you conclude from this plot? ```{r pinniped_scatterplot_quiz, echo=FALSE} question("Based on the scatterplot above, which of the following do you think are correct?", answer("The relationship between brain mass and body mass is curved and it will be necessary to use a quadratic term in our model", message = "Answer 1: It's hard to be sure about the nature of this relationship because of the strong positive skew in both variables. It is possible that any curvature will be removed if an appropriate transformation is used. Although there does seem to be some curvature, therefore, it's not clear that we need to fit a curved model."), answer("Both variables show strong negative skew", message = "Answer 2. No, these data are showing positive skew. Negative skew would show as the opposite pattern, with data points clustered towards the higher values."), answer("There are a number of data points with extreme values which should be removed as outliers", message = "Answer 3. Without any other good reason (data point known to be problematic for reasons other than its value), outliers should only be cautiously removed if the are clearly anomalous and not within the range of values that might be expected given the distribution of the data. Given the strong positive skew evident here there is no reason to remove these extreme values"), answer("Both variables show strong positive skew and a data transformation such as a log transformation will be necessary before further analysis", correct = TRUE), answer("The heaviest species are all polygynous so the analysis will be invalid unless we remove these", message = "There's no requirement that the explanatory variable should have the same range for both groups. Nonetheless, we should bear this in mind in the analysis: for each mating type we can only really draw conclusions regarding the range of data that we have represented") ) ``` <br> <details> <summary>**Click here for more on the exploratory analysis**</summary> <br> Both variables are strongly positively skewed and it's going to be necessary to correct this with a transformation before proceeding further. Given the nature of the data and the strong positive skew a log transformation would be appropriate: try to generate a new plot like the previous one but with both variables logged. NB neither variable has any zeros so no need to add a constant. ```{r pinniped_setup, echo = FALSE} pinniped$Mate_type <- as.factor(pinniped$Mate_type) ``` ```{r pinniped_scatterplot2, exercise = TRUE, exercise.setup = "pinniped_setup", exercise.lines = 8} plot(Male_brain_g ~ Male_mass_Kg, col = Mate_type, data = pinniped) ``` ```{r pinniped_scatterplot2-hint-1} # Just use the log() function on each variable. # Alternatively, use the log = "xy" argument ``` ```{r pinniped_scatterplot2-hint-2} # This is the solution # plot(log(Male_brain_g) ~ log(Male_mass_Kg), col = Mate_type, data = pinniped, xlab = "Log of male mass (Kg)", ylab = "Log of brain mass (g)") ``` The log transformation seems to have sorted out the problems associated with the very skewed data. Now to fit a model. We'd like the main effects of both `Male_mass_Kg` and `Mate_type`, plus their interaction. Don't forget to log both of the variables that require transformation, and call your fitted model object `P1`. Once the model is fitted, check the significance of the interaction term using `drop1()`. Note that to get the equivalent output to normal you'll need to use `print(drop1(... )) because otherwise the formatting applied in the tutorial will alter it. ```{r pinniped_model1, exercise = TRUE, exercise.setup = "pinniped_setup"} ``` ```{r pinniped_model1-hint-1} # To fit both main effects and the interaction # you can use var1 + var2 + var1:var2 or # var1 * var2. # You need to specify data = pinniped as an argument ``` ```{r pinniped_model1-hint-2} # For the drop1() don't forget to specify # test = "F" as an argument. # Make sure all arguments are separated by a comma # for both functions ``` ```{r pinniped_model1-hint-3} # Here is the solution P1 <- lm(log(Male_brain_g) ~ log(Male_mass_Kg) * Mate_type, data = pinniped) print(drop1(P1, test = "F")) ``` A model with the interaction present has higher explanatory power than one with just the main effects so we'll keep the present model. </details> <br><br> <hr> 1. Fitzpatrick, J.L., Almbro, M., Gonzalez-Voyer, A., Hamada, S., Pennington, C., Scanlan, J. & Kolm, N. (2012) Sexual selection uncouples the evolution of brain and body size in pinnipeds. Journal of evolutionary biology, 25, 1321--1330. ## Diagnostics for the pinniped model Before we go any further with trying to interpret the model we need to check the diagnostics for our model. Use the `plot()` function and the `which = 1:2` argument to bring up only the first two diagnostic plots. ```{r pinniped_model, echo = FALSE} P1 <- lm(log(Male_brain_g) ~ log(Male_mass_Kg) * Mate_type, data = pinniped) ``` ```{r pinniped-diagnostics, exercise.setup = "pinniped_model", exercise = TRUE} ``` ```{r pinniped-diagnostics-hint-1} # This is the solution plot(P1, which = 1:2) ``` Take a look at these diagnostics and try to answer these questions. ```{r pinniped-diagnostics-quiz, echo=FALSE} question( "Which of the following statements is correct?", answer("The residuals versus fitted values plot shows evidence of strong heteroskedasticity which needs to be corrrected", message = "Answer 1. Heteroskedascticity means that the variance is varying across the range of fitted values, often leading to a plot with a characteristic wedge shape. This plot has some suggestions of a wedge shape but it's not clear and is unlikely to be causing problems with the model fit."), answer("It is not clear but there is some indication of negative skew", message = "Answer 2. There's nothing here to indicate negative skew, which would show as more very negative residuals and a convex curve of points on the qq-plot."), answer("The qq-plot shows that there is some positive skew in the residuals", message = "Answer 3. The qq-plot is rather well-behaved with all the residuals plotting close to the line. There's nothing to indicate skew in these data."), answer( "The qq-plot shows that the relationship between at least onf of the explanatory variables and the response variable is non-linear", message = "Answer 4. The qq-plot can only tell you about the distribution of the residuals, not about whether the shape of the relationship between response and epxlanatory variables is linear."), answer("The qq-plot indicates that the residuals are approximately normally distributed", correct = TRUE) ) ``` <br> <details> <summary>**Click here for more on the diagnostics**</summary> The diagnostic plots do not iondicate anything particularly concerning. The qq-plot shows no serious deviation from what would be expected if the residuals were normally distributed. The residuals versus fitted values plot shows some hint of increasing variance with higher fitted values but this is not clear. When sample sizes are relatively small it becomes more difficult to see real patterns on diagnostic plots. Furthermore, when sample sizes are small we quite often see patterns that are more a consequence of the chance distribution of a few large positive or negative residuals than representative of a real issue with the data. Given the uncertainty about whether there really is heteroskedasticity, and the general robustness of the linear model, we will leave it as it is. </details> ## Interpretation of the pinniped model This video deals with model summaries when there are both factors and continuous explanatory variables. You might want to skip the first 6 minutes which are focussed on simple linear regression but if you're still at all confused about coefficients and summary tables I'd recommend watching the whole thing for a bit of revision. ![](https://youtu.be/aCOgHUSd-SI) Now check the `summary()` output for the model P1. ```{r pinniped_model2, echo = FALSE} P1 <- lm(log(Male_brain_g) ~ log(Male_mass_Kg) * Mate_type, data = pinniped) ``` ```{r pinniped_summary1, exercise.setup = "pinniped_model2", exercise = TRUE} ``` ```{r pinniped_summary1-hint-1} # This is the solution summary(P1) ``` Since we have a model with one continuous explanatory variable and one factor with two levels, and we have an interaction term in our model, we are fitting a model that consists of two lines with different slopes as well as different intercepts. On the basis of the explanation given in the video, try to work out that the equations for the two lines relating log male mass to log brain mass are. Just as a reminder the two factor levels in `Mate_type` are `mono` and `poly` and `mono` comes first alphabetically. ```{r pinniped-summary-quiz, echo=FALSE} quiz(caption = "Pinniped model interpretation quiz", question( "Which of the following is the equation of the line relating log male mass to log brain size for **monogynous** species?", answer("log(Brain mass) = 2.0779 + 1.9273 x log(Male mass)", message = "Answer 1. 1.9273 is the difference in intercepts and not slope"), answer("log(Brain mass) = 2.0779 + 0.7544 x log(Male mass)", correct = TRUE), answer("log(Brain mass) = 2.0779 + 1.9273 + 0.7544 x log(Male mass)", message = "Answer 2. 1.9273 is the difference in intercepts between the two mating systems and should be added on for polygynoous species"), answer("log(Brain mass) = 2.0779 + 1.9273 + (0.7544 - 0.3767) x log(Male mass)", message = "Answer 4. The coefficients for the monogynous species are the first two in the coefficients table.") ), question( "Which of the following is the equation of the line relating log male mass to log brain size for **polygynous** species?", answer("log(Brain mass) = 2.0779 + 1.9273 x log(Male mass)", message = "Answer 1. 1.9273 is the difference in intercepts and not slope"), answer("log(Brain mass) = 2.0779 + 0.7544 x log(Male mass)", message= "This is the equation for monogynous species"), answer("log(Brain mass) = 2.0779 + 1.9273 + 0.7544 x log(Male mass)", message = "Answer 2. Partly right - but you need the difference in slopes as well as the difference in intercepts"), answer("log(Brain mass) = 2.0779 + 1.9273 + (0.7544 - 0.3767) x log(Male mass)", correct = TRUE) ), question( "Which of the following statements is true?", answer("Brain mass increases more with body mass in polygynous species", message = "The slope is lower in polygynous species so brain mass increases more slowly"), answer("Brain mass decreases with body mass in polygynous species", message = "Brain mass increases with body mass in polygynous species, but it does so less than in monogynous species"), answer("On average, for every Kg increase in body mass, the brain increases in mass by 0.7544g in monogynous species", message = "The model was fitted to log transformed data so this is not correct"), answer("In polygynous species, on average, for an increase of 1 in the log body mass, log brain mass increases by 0.3777", correct = TRUE) ) ) ``` <br> <details> <summary>**Click here for more on the model interpretation**</summary> Hopefully you've been able to get a good idea of what the model is telling us but if not: - The model overall can be thought of a fitting two separate lines to the relationship between log male mass and log brain mass, one for monogynous males and one for polygynous males. - The interaction term defines the difference in slopes between the two groups defined by the factor levels. Because the interaction term is statistically significant we can interpret this as telling us that there is a significant difference in slopes. </details> ## Visualising the fitted model using `predict()`. We could visualise the data by plotting the data with the two lines derived from the coefficients table, either using `abline()` or using a custom function like the `goodline()` function that was referred to in the video to keep the lines within the data they are fitted to. This will give an acceptable graph, but it won't give any indication of the amount of uncertainty around each line. To do this we can use a function called `predict()` which will allow us to generate fitted values and either confidence intervals or prediction intervals for a range of data. To use predict we need to set up a dummay data frame with the values that we'd like to generate fitted values and (in this case) confidence intervals for. The first thing we'll need to know is the range of values for `Male_mass_Kg` for the two levels of `Mate_type`. You can find this out using `summary()` --- this is done for the `mono` level of `Mate_type`, try to do the same for `poly`. ```{r Male_mass_summary, exercise = TRUE} summary(pinniped$Male_mass_Kg[pinniped$Mate_type == "mono"]) ``` ```{r Male_mass_summary-hint-1} # Just replace "mono" with "poly" ``` ```{r Male_mass_summary-hint-2} # This is the solution summary(pinniped$Male_mass_Kg[pinniped$Mate_type == "mono"]) summary(pinniped$Male_mass_Kg[pinniped$Mate_type == "poly"]) ``` Now we use `predict()` to generate predicted values and confidence intervals. ```{r echo = FALSE} P1 <- lm(formula = log(Male_brain_g) ~ log(Male_mass_Kg) * Mate_type, data = pinniped) ``` ```{r} # Generate predicted values and confidence # intervals for Mate_type = "mono" mono_pred <- predict(P1, # 1st argument is the fitted model newdata = list( # New data is the range over which we want to predict Male_mass_Kg = seq(70.5, 343.2, length.out = 100), Mate_type = rep("mono", times = 100) ), int = "confidence") # predicted values plus 95% CIs pls # Add the range of male masses to the mono_pred object mono_pred <- data.frame(Male_mass_Kg = seq(70.5, 343.2, length.out = 100), mono_pred) # Generate predicted values and confidence # intervals for Mate_type = "mono" poly_pred <- predict(P1, list( Male_mass_Kg = seq(64.5, 3510, length.out = 100), Mate_type = rep("poly", times = 100) ), int = "confidence") poly_pred <- data.frame(Male_mass_Kg = seq(64.5, 3510, length.out = 100), poly_pred) ``` Let's check the mono_pred object ```{r} head(mono_pred) ``` All looks good. What we can do now is to plot the data and draw in the lines representing the fitted values and confidence intervals using `lines()`. See if you can replace the XXXXX parts in the code to generate the plot. We're going to use a nicer palette than the default, and add a legend. ```{r pinniped_plot_setup, echo = FALSE} pinniped$Mate_type <- as.factor(pinniped$Mate_type) P1 <- lm(formula = log(Male_brain_g) ~ log(Male_mass_Kg) * Mate_type, data = pinniped) # Generate predicted values and confidence # intervals for Mate_type = "mono" mono_pred <- predict(P1, # 1st argument is the fitted model newdata = list( # New data is the range over which we want to predict Male_mass_Kg = seq(70.5, 343.2, length.out = 100), Mate_type = rep("mono", times = 100) ), int = "confidence") # predicted values plus 95% CIs pls # Add the range of male masses to the mono_pred object mono_pred <- data.frame(Male_mass_Kg = seq(70.5, 343.2, length.out = 100), mono_pred) # Generate predicted values and confidence # intervals for Mate_type = "mono" poly_pred <- predict(P1, list( Male_mass_Kg = seq(64.5, 3510, length.out = 100), Mate_type = rep("poly", times = 100) ), int = "confidence") poly_pred <- data.frame(Male_mass_Kg = seq(64.5, 3510, length.out = 100), poly_pred) ``` ```{r pinniped_plot2, exercise = TRUE, exercise.setup = "pinniped_plot_setup", exercise.lines = 33} # Set custom palette col1 <- c("chocolate1", "cadetblue4") # Plot data plot( log(XXXXX) ~ XXXXX, data = XXXXX, pch = 16, col = col1[Mate_type], xlab = "Log Male Mass (Kg)", ylab = "XXXXX" ) # Add lines for predicted values and CIs # for Mate_type = mono lines(mono_pred$fit ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 2) lines(mono_pred$lwr ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 1) lines(mono_pred$upr ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 1) # Add lines for predicted values and CIs # for Mate_type = poly lines(poly_pred$fit ~ log(poly_pred$Male_mass_Kg), col = col1[2], lwd = 2) lines(XXXXX ~ XXXXX, col = col1[2], lwd = XXXXX) lines(XXXXX ~ XXXXX, col = col1[2], lwd = XXXXX) # Add legend legend("topleft", fill = col1, legend = c("Monogynous", "Polygynous")) ``` ```{r pinniped_plot2-solution} # This is the solution # Set custom palette col1 <- c("chocolate1", "cadetblue4") # Plot data plot( log(Male_brain_g) ~ log(Male_mass_Kg), data = pinniped, pch = 16, col = col1[Mate_type], xlab = "Log Male Mass (Kg)", ylab = "Log Male Brain Mass (g)" ) # Add lines for predicted values and CIs # for Mate_type = mono lines(mono_pred$fit ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 2) lines(mono_pred$lwr ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 1) lines(mono_pred$upr ~ log(mono_pred$Male_mass_Kg), col = col1[1], lwd = 1) # Add lines for predicted values and CIs # for Mate_type = poly lines(poly_pred$fit ~ log(poly_pred$Male_mass_Kg), col = col1[2], lwd = 2) lines(poly_pred$lwr ~ log(poly_pred$Male_mass_Kg), col = col1[2], lwd = 1) lines(poly_pred$upr ~ log(poly_pred$Male_mass_Kg), col = col1[2], lwd = 1) # Add legend legend("topleft", fill = palette1, legend = c("Monogynous", "Polygynous")) ``` ### Final point: independence of data As we've discussed in earlier tutorials, independence of data is a fundamental assumption behind this sort of analysis and also something that you can't really check: rather you have to think a out it when designing your study. Are the data points in this study independent? No --- because some of the species are more cloesly related to some of the other species, and so these are likely to be more similar than we'd expect by chance even in the absence of any effect of mating system. As an example of this take a look at the species in this dataset with male body masses of less than 100Kg. ```{r} pinniped[which(pinniped$Male_mass_Kg < 100),] ``` Six of these are from the genus *Phoca*, and all of these have monogynous males. What's more, these are all the species from the genus *Phoca* on the dataset. There's an obvious capacity here for apparent patterns in the dataset to be driven by evolutionary history rather than other factors. It could simply be the case that the common ancestor of the genus *Phoca* was small and monogynous, and that any apparent tendency for smaller males to be monogynous is a consequence of that rather than any evolutionary tendency towards small size in monogynous species. This is not something that we can correct for in this analysis here, but there are techniques for correcting for phylogenetic non-independence such as Phylogenetic Generalised Least Squares or PGLS. This is what Fitzpatrick et al. used in their paper: I'm pleased to say that the fundamental conclusions were unchanged. ## Exercise 2: Insulin-like peptides and beetle weaponry In many species of animal one sex (usually the male, but not always) carries weapons such as horns, enlarged mandibles or similar which are used in contests with other animals from the same species. These weapons are often extremely variable in size between individuals, and an interesting question is why this should be and how the growth of these weapons is regulated. In 2019 a team of scientists based in Japan and the USA^2^ published the results of an in-depth study on the regulation of weapon size in a beetle called the broad-horned flour beetle, *Gnatocerus cornutus*. Males of this species have enlarged mandibles which they use in fights with other males to gain access to females. There is considerable variation in the length of these mandibles, much of which is known to be associated with the diet of the males during development. <br> ![](images/G_cornutus.jpg){width="500"} Male *Gnatocerus cornutus* showing enlarged mandibles. Photo by Udo Schmidt, released under a [Creative Commons Attribution-ShareAlike license](https://creativecommons.org/licenses/by-sa/2.0/). Insulin-like peptides (ILPs) have been suggested to be important in the regulation of weaponry in insects, and Okada and co-workers identified five ILP genes in the *G. cornutus* genome. To test whether they were involved in weapon growth, they silenced these genes using RNA interference (RNAi) in final instar larvae, and then measured the mandible size and also a number of other measures of size in the adult males. RNAi coding for green flourescent protein (GFP) was used as a control. Here, we're going to model the relationship between RNAi treatment and mandible length. Because mandible length increases with body size, we'll also include elytron width in our model as a measure of body size. The dataframe is loaded as `gnatocerus`. Let's start by checking its structure. ```{r} str(gnatocerus) ``` There are three variables: `treatment` which is the RNAi treatment the beetle received, `EW` which is Elytra Width in µm and `ML` which is Mandible Length, also in µm. `ML` is our response variable and `EW` is a continuous explanatory variable. `treatment` has discrete values and we will be wanting to use this as a factor in our model. Before we do anything else, then, we need to ask R to make it into a factor using the `as.factor()` function. Check the conversion has gone correctly with the `summary()` function. ```{r treatment_factor, exercise = TRUE} ``` ```{r treatment_factor-hint-1} # Remember that you have to make treatment # into a factor and then allocate it to the # correct variable in the gnatocerus data frame. # # Then run summary() on gnatocerus$treatment ``` ```{r treatment_factor-hint-2} # This is the solution gnatocerus$treatment <- as.factor(gnatocerus$treatment) summary(gnatocerus$treatment) ``` `GFP` refers to the beetles given RNAi for Green Flourescent Protein (a jellyfish protein) as a control, and `1ILP` to `5ILP` are the five insulin-like protein genes which were suppressed using RNAi in the final instar larvae. Let's draw a scatterplot of `ML` against `EW`, colour coded by the levels of `treatment` to get an idea of what our data look like. Because we have lots of factor levels we'll use a colour palette from the RColorBrewer package. This package gives R users access to the colour palettes from [the color brewer website](https://colorbrewer2.org/). The package will be automatically installed if you are using a recent version of R, otherwise you might need to install it yourself using `install.packages("RColorBrewer")`. We can generate a palette with 6 colours like this: ```{r} library(RColorBrewer) palette1 <- brewer.pal(n = 6, name = 'Dark2') ``` Now to draw our scatterplot. See if you can fill in the XXXXX parts. ```{r beetle_setup, echo = FALSE} gnatocerus$treatment <- as.factor(gnatocerus$treatment) ``` ```{r beetle_scatterplot1, exercise = TRUE, exercise.setup = "beetle_setup", exercise.lines = 15} plot(XXXXX ~ XXXXX, data = XXXXX, col = palette1[treatment], pch = 16, xlab = "XXXXX", ylab = "XXXXX" ) legend("topleft", col = XXXXX, pch =XXXXX, legend = XXXXX ) ``` ```{r beetle_scatterplot1-hint-1} # The plot should be relatively easy: # we want ML plotted against EW with the # dataframe as gnatocerus. For the axis labels # don't forget the units are µm # # For the legend col and pch should # be the same as for the plot # # The legend text (the legend = argument) # needs to be the levels of the treatment # factor ``` ```{r beetle_scatterplot1-hint-2} # This is the solution plot(ML ~ EW, data = gnatocerus, col = palette1[treatment], pch = 16, xlab = "Elytron width (µm)", ylab = "Mandible length (µm)" ) legend("topleft", col = palette1, pch =16, legend = levels(gnatocerus$treatment) ) ``` Have a look at the plot and the patterns in the data. ```{r beetle_scatterplot1_quiz, echo=FALSE} question("Based on the scatterplot above, which of the following do you think are correct? More than one question can be correct", answer("The relationship between elytron width and mandible length is curved and it will be necessary to use a quadratic term in our model", message = "Answer 1: It's hard to be absolutely sure about this but there's no clearly curved relationshipds there."), answer("The beetles treated with 1ILP seem to have much shorter mandibles than do the others.", correct = TRUE), answer("There is one data point with an extreme value for mandible which we should consider possibly removing as an outlier", correct = TRUE), answer("Both variables show strong positive skew and a data transformation such as a log transformation will be necessary before further analysis", message = "Answer 2: There is nothing here to indicate positive skew"), answer("The two data points for beetles with an elytron width of less than 1200 µm appear to be anomalously small and we should consider removing these as outliers", message = "These were small beetles but there's nothing to indicate that their size is sufficiently small that we should consider them as being very unlikely values or as potentially having been generated by a different process from the ones we're interested in.") ) ``` <details> <summary>**Click here for more on what to do with these data**</summary> As a rule of thumb, it's always best to avoid removing data from an analysis for any reason. Nonetheless, we do sometimes get data points that just look like they shouldn't be there, and sometimes they really shouldn't: errors can be made during measurement and recording data, and other errors can be made such as misidentification of individuals, mistakes in care of experimental plants, animals or cultures or in the preparation of reagents and so on. Detecting and dealing with such anomalies is fraught with risk because because the potential for bias and unconsciously altering the results to fit a particular hypothesis is so high. In the mind of your author, the only time when it's really justified is when there is obviously something anomalous about a data point which means that you can be sure that it should not be there. Impossible values such as negative values when the variable in question cannot be negative, or fractions when the data can only be integers, are clearly anomalous and an indicator that an error has been made in measuring or recording the value. When it comes data points with values that are just unlikely, however, things are much more difficult. In these cases it's good practice to think about four questions. Is the data point clearly so far outside the range that would be expected given the distribution of the rest of the data that in all probability an error has been made? Based on your knowledge of the system, is the value of the data point one that is possible? Is it possible that the experimental protocol might have caused this extreme value? Is this single data point likely to have an disptoprtionate effect on the outcome of the analysis? In this case, the individual with the mandible length of 921 µm has a mandible length that is very much higher than all of the rest. Furthermore, there's no indication of positive skew in the remainder of the data which could lead to a few very high values. Overall then, we're probably justified in thinking there's something fishy about this particular data point. What you would do now if you were running this project is to go back and check the lab notebooks and see what was written down when this experiment was done. With any luck you'll find the correct value then. If you're unable to correct the value then you can cautiously consider removing the data point, but only do this when all other avenues have been explored. **If you do remove an outlier you MUST say so in your report.** We'll just analyse the data without the outlier here but the **best practice is to present analysis results both with and without the outlier present**. To find out which row of the data frame corresponds to this data point we can use `which()`. ```{r} which(gnatocerus$ML >900) ``` See if you can remove the outlier from these data, and allocate the edited data frame to a new object called `gnatocerus2`. Check it's worked by calling `summary` on the new `ML` variable in `gnatocerus2` --- the new maximum value should be more sensible. ```{r outlier_removal, exercise = TRUE} ``` ```{r outlier_removal-hint-1} # You know the row number for the data is 24 # so you can remove it using a subscript # # Remember subscripts for matrices and dataframes # go [row number, column number] and that to choose # all columns you leave it blank after the first # comma ``` ```{r outlier_removal-hint-2} # This is the solution gnatocerus2 <- gnatocerus[-24, ] summary(gnatocerus2$ML) ``` OK. Now we can get on with the analysis. Just to make it clear, this anomalous data point was not in the data set made public by Okada *et al.* --- it was added to give an example of what an outlying data point lookes like. </details> <hr> 2. Okada, Y., Katsuki, M., Okamoto, N., Fujioka, H. & Okada, K. (2019) A specific type of insulin-like peptide regulates the conditional growth of a beetle weapon. PLoS biology, 17, e3000541. ## Fitting the model ```{r beetle_setup1, echo = FALSE} gnatocerus2 <- gnatocerus[-24, ] gnatocerus2$treatment <- as.factor(gnatocerus2$treatment) ``` ```{r echo = FALSE} gnatocerus2 <- gnatocerus[-24, ] gnatocerus2$treatment <- as.factor(gnatocerus2$treatment) ``` The model we will fit will have two explanatory variables, `EW`, elytron width, included as a measure of overall beetle size, and `treatment` which tells us the experimental treatment the beetles received. It's necessary to include a measure of body size because mandible length, our response variable, increases with body size, and because the experimental treatment we are using might well change the relationship between these two variables. Rather than just asking "does mandible length change with treatment?", therefore, we really need to ask "does the relationship between mandible length and body size change with treatment?". Before we fit our model, however, we should think a little about how we can best ask the questions that we're really interested in. As we discussed way back in the first tutorial on single factor ANOVA, what the coefficients table you get in R gives us is a set of *treatment contrasts* which allow us to make certain comparisons directly without using things like post-hoc tests. As you've hopefully gathered by now, the default for these contrasts is to make the first factor level in alphabetical order into the *reference level* which is used for the intercept and which the other levels are compared with. In the case of `treatment` the reference level is the first one listed by `levels()`. ```{r} levels(gnatocerus2$treatment) ``` `1ILP` is first because numbers come before letters in alphabetical order, so `1ILP` is the reference level. We don't want to see contrasts between the `1ILP` factor level and the others, however. The animals given the GFP RNAi treatment are the controls, and the question we're interested in is whether treatment with our candidate insulin-like peptides changes the mandible length - elytron width relationship is relative to the `GFP` treatment. To see the contrasts with `GFP` we can use the `relevel()` function to change the reference level. `relevel()` takes two arguments, the name of the factor and then `ref =` followed by the name of the new reference level, typically in quote marks. Don't forget to allocate the new factor returned by `relevel()` to the `gnatocerus2$treatment` variable. Once it's done you can check the reference level by using `levels()` again. ```{r relevel, exercise = TRUE, exercise.setup = "beetle_setup1"} ``` ```{r relevel-hint-1} # Here's a code framework to help you # dataframe$factor <- relevel(dataframe$factor, ref = "factor level") # # levels(dataframe$factor) ``` ```{r relevel-hint-2} # Here's the solution gnatocerus2$treatment <- relevel(gnatocerus2$treatment, ref = "GFP") levels(gnatocerus2$treatment) ``` Now that we've done something that 95% of biology academics don't do before jumping straight into the stats, namely thinking about the reason why we're doing the analysis and what the questions we're trying to answer are, we can go ahead and fit our model. Main effects plus interaction please, and call the model `B1`. Check for significance using `drop1()`. To get the standard R formatted output please enclose your `drop1()`function call in `print()`. ```{r beetle_setup2} gnatocerus2 <- gnatocerus[-24, ] gnatocerus2$treatment <- as.factor(gnatocerus2$treatment) gnatocerus2$treatment <- relevel(gnatocerus2$treatment, ref = "GFP") ``` ```{r beetle_model1, exercise = TRUE, exercise.setup = "beetle_setup2", paged.print=FALSE} ``` ```{r beetle_model1-hint-1} # You want to fit a linear model with ML as the response variable # and EW and treatment as the explanatory variables ``` ```{r beetle_model1-hint-2} # Here's a code framework to help you B1 <- lm(Response ~ Explanatory1 * Explanatory2, data = dataframe) print(drop1(Model, test = "F")) ``` ```{r beetle_model1-hint-3} # Here's the solution B1 <- lm(ML ~ EW * treatment, data = gnatocerus2) print(drop1(B1, test = "F")) ``` Looks like a very highly significant interaction term: what does that mean? ```{r beetle_model1_quiz, echo=FALSE} question("The interaction term between EW and treatment is highly significant. What does that mean?", answer("The various ILP treatments have slopes different from the reference level"), answer("There is a significant effect of ILP on mandible length"), answer("Treatment with ILP interferes with mandible growth"), answer("At least one of the slopes of the lines corresponding to the factor levels is significantly different from at least one other", correct = TRUE), answer("At least one of the intercepts of the lines corresponding to the factor levels is significantly different from at least one other") ) ``` We'll get back to that when we look at the interpretation of the model in more detail. Before we go any further, of course, we need to check the diagnostics. ## Diagnostics for the beetle model As usual we'll just look at the first two diagnostic plots. ```{r beetle_setup3} gnatocerus2 <- gnatocerus[-24, ] B1 <- lm(ML ~ EW * treatment, data = gnatocerus2) ``` ```{r beetle_model_diagnostics, exercise = TRUE, exercise.setup = "beetle_setup3"} ``` ```{r beetle_model_diagnostics-hint-1} # Here's the solution plot(B1, which = 1:2) ``` What do the diagnostic plots tell you? ```{r beetle-diagnostics-quiz, echo=FALSE} question( "Which of the following statements is correct? More than one answer can be correct.", answer("The residuals versus fitted values plot shows evidence of heteroskedasticity which might need to be corrrected", message = "Answer 1. Heteroskedascticity means that the variance is varying across the range of fitted values, often leading to a plot with a characteristic wedge shape. There's nothing to indicate that the variance is changing across the range of fitted values here."), answer("The qq-plot shows some evidence of positive skew", message = "Answer 2. There's nothing here to indicate negative skew, which would show as a convex curve of points on the qq-plot."), answer("The qq-plot shows that the residuals mostly have the values expected if they were following a normal distribution", correct = TRUE), answer( "The plot of residuals versus fitted values has two groups of points indicating non-independence in the data which invalidates the analysis", message = "Answer 4. The two groups of points on the plot are showing up because there is one treatment level that has much lower fitted values than the others (see the scatterplot). This is not a problem. Diagnostic plots like this cannot give an indication of how independent the individual data points are."), answer("The apparent structure in the plot of residuals versus fitted values is a consequence of one treatment level having lower fitted values than the others and is not a problem", correct = TRUE) ) ``` <br> <details> <summary>**Click here for more on the diagnostics**</summary> Overall the diagnostic plots are remarkably well behaved and don't show anything to cause us any concern. We can go on and interpret our model output. </details> ## Interpreting the beetle model Let's check the output from running `summary()` on our model. ```{r beetle_setup4} gnatocerus2 <- gnatocerus[-24, ] gnatocerus2$treatment <- as.factor(gnatocerus2$treatment) gnatocerus2$treatment <- relevel(gnatocerus2$treatment, ref = "GFP") B1 <- lm(ML ~ EW * treatment, data = gnatocerus2) ``` ```{r beetle_summary, exercise = TRUE, exercise.setup = "beetle_setup4"} ``` ```{r beetle_summary-hint-1} # This is the solution summary(B1) ``` Looking at the coefficients table, the value for `EW` gives the slope for the fitted line relating elytron width to mandible length for the control `GFP` treatment. This is very close to 1 indicating that for the control beetles mandible length increases by 1µm for every 1µm increase in elytron width. The five lines at the end of the coefficients table labelled `EW:treatment1ILP` and so on give the estimated differences in slope between the line fitted for the `GFP` treatment and the lines fitted for the five ILP treatments. Similarly, the `(Intercept)` row gives us the estimated value for the intercept of the line relating mandible length to elytron width for the controls, and the rows labelled `treatment1ILP` and so on give us the differences in intercept for each of the five levels of `treatment` that are not controls. Note that differences in intercept are almost inevitable when there are differences in slope: if the data are in approximately the same space, as the slope of a line decreases so its intercept is likely to increase. Have a look at the values of the estimated coefficients and have a go at the questions. ```{r beetle-coefficients-quiz, echo=FALSE} question( "Which of the following statements is correct? More than one answer can be correct.", answer("Both 4ILP and 5ILP have fitted lines that do not appear to differ significantly from the GFP control.", correct = TRUE), answer("The intercept for 1ILP is much higher than that for the GFP control, indicating that beetles treated with 1ILP have larger mandibles than controls", message = "Answer 2. The intercept is indeed higher for 1ILP but the slope is much lower: in fact these animals have smaller mandibles."), answer("The estimated slope for 2ILP is 0.585", correct = TRUE), answer("The estimated slope for 3ILP is 0.239", message = "Answer 4. The estimated slope is the coefficient for the intercept (1.00466) plus the estimated coefficient for the interaction EW:treatment3ILP, which is -0.34641, so the slope for this line is 1.00466 - 0.34641 = 0.65825 or 0.658."), answer("3ILP has a fairly small effect on mandible growth compared to the control, which is just statistically significant", correct = TRUE), answer("The slope relating mandible length to elytron width is significantly different between 2ILP and 5ILP", message = "Answer 6. We can't really tell whether these slopes are different from the treatment contrasts we have in this table since there isn't a contrast between these two treatment levels. We can speculate that given the very small difference between 5ILP and the control and the fact that 2ILP is significantly different from the control we might well expect these to be different, but we don't really know.") ) ``` <details><summary>**Click here for more on interpreting these results**</summary> What our model is telling us is that the 1ILP treatment has a strong effect on mandible growth, with the relationship between body size and mandible length being substantially reduced compared to controls. The 2ILP and 3ILP treatments also have some effect, with the slopes of the relationships between mandible length and body size being reduced by both treatments, but the magnitude of these effects and also our confidence in them is rather less than for 1ILP. 4ILP and 5ILP seem to have no real detectable effect on mandible growth. </details> ## Visualising the model output using ggplot We could visualise the model output by just plotting the fitted lines onto a scatterplot using `abline()` or by using `predict()` to generate fitted values and confidence intervals. There is a much easier way, however, which is to use ggplot2. This graphics package is very good at pltting graphs with multiple factor levels visualised, and it would be silly not to at least show you how to do it in this case. We;re not going to go through it in detail because that would take a ot of explanation, but if you want to know more there are a lot of useful resources available on how to use ggplot2 - Winston Chang's [R Graphics Cookbook](https://r-graphics.org/) is very good in my opinion. To visualise our data with lines fitted for each level of treatment using ggplot2 we can just use this code. We could have plotted the confidence intervals for each line in as well but with more than a few lines this just makes a mess. ```{r} library(ggplot2) p1 <- ggplot(data = gnatocerus2) + # tell it which data frame to use aes(x = EW, y = ML, colour = treatment) + # specify the 'aesthetics' geom_point() + # add layer with points plotted geom_smooth(method= "lm", se = FALSE) + # add lines scale_color_brewer(palette = "Dark2") + # use the Dark2 palette theme_bw() + # use a better theme than the nasty default one labs(x = "Elytron width (µm)", y = "Mandible length (µm)") # axis labels p1 ``` Here you can see the very large effect that 1ILP has, wth all of the points well below the remainder. Looking at the lines for 2ILP and 3ILP you can see that they do have different slopes when compared to GFP but the elevation is roughly the same and so if these insulin-like proteins are having an effect it is quite subtle. The 4ILP and 5ILP lines are very close to the GFP line as we would expect from our model output. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/18_Factors_and_continuous_variables/Linear_models_6.Rmd
--- title: "Linear models 7: Model selection" output: learnr::tutorial: theme: default css: http://research.sbcs.qmul.ac.uk/r.knell/learnr_data/test2.css runtime: shiny_prerendered author: Rob Knell description: > When there are multiple potentially important explanatory variables there are many possible models that we could fit to our data. How do we choose an appropriate set of models, and how do we select the best ones? --- ```{r setup, include=FALSE} library(learnr) library(ggplot2) library(RColorBrewer) options(scipen = 5) knitr::opts_chunk$set(echo = TRUE, comment = NA, message = FALSE, fig.width = 5, fig.height = 5) load("gabon_diversity.rda") gabon2 <- gabon_diversity[ ,c(2,4,5,6,7,10)] ``` ## Principles of Model Selection The question of which model to select when we have a variety of potentially important explanatory variables is a complex one and there are differing opinions as to what constitutes the best approach. This video gives an introduction to the topic. It's a long one so take it easy. ![](https://youtu.be/4pgaVtVBVjk) ## Exercise one: Animal Abundance in Gabon For this exercise we'll be using the dataset which we previously used in the linear regression tutorial. Briefly, these are data published by Koerner *et al.* (2017)^1^ which are summaries of a series of repeated animal surveys on transects at varying distances from villages in Gabon. For each animal group Koerner *et al.* calculated the "relative abundance": the percentage of all encounters on that transect which were with that particular group. We will use the relative abundance of birds as our response variable. Koerner *et al.* collected data on a number of other aspects of each transect, so in addition to the distance from the nearest village we have the size of the nearest village (number of households), the type of land use (Park, Logging or Neither) the vegetation species richness measured as the number of tree species present in a series of plots along the transect, and the canopy cover as the percentage of the sky blocked by canpoy in each plot. Number of lianas, hunting intensity and land use were also recorded but we won't include these measures in our analysis for simplicity. The data are loaded as `gabon2`, which is a version of the `gabon_diversity` dataset we used before with some variables removed for simplicity. Check the gabon2 dataset using `str()` ```{r gabon1, exercise = TRUE} ``` ```{r gabon1-solution} str(gabon2) ``` Assuming the import is OK we will need to make LandUse into a factor using the `as.factor()` function. ```{r gabon2, exercise = TRUE} ``` ```{r gabon2-solution} gabon2$LandUse <- as.factor(gabon2$LandUse) ``` ```{r gabon_setup, echo = FALSE} gabon2$LandUse <- as.factor(gabon2$LandUse) model1 <- lm(RA_Birds ~ Distance + LandUse + NumHouseholds + Veg_Rich + Veg_Canopy, data = gabon2) model2 <- update(model1,~.-LandUse) model3 <- update(model2,~.-Veg_Canopy) ``` Check the conversion to a factor went well using `str()` again. ```{r gabon3, exercise = TRUE, exercise.setup = "gabon_setup"} ``` ```{r gabon3-solution} str(gabon2) ``` Now we should do some exploratory analysis. Rather than going thorugh a full version of this, for the sake of brevity we'll just use a quick method of visualising a complex dataset. This involves using the `pairs()` function on a data frame, which will plot a matrix with a small scatterplot for each pairwise comparision of variables. ```{r echo = FALSE} gabon2$LandUse <- as.factor(gabon2$LandUse) model1 <- lm(RA_Birds ~ Distance + LandUse + NumHouseholds + Veg_Rich + Veg_Canopy, data = gabon2) model2 <- update(model1,~.-LandUse) model3 <- update(model2,~.-Veg_Canopy) ``` ```{r fig.width = 7, fig.height = 7} pairs(gabon2) ``` Looking at this gives us a quick check that there are no seriously anomalous data points, and we can get an idea of where there might be some important relationships. You can see the negative relationship between relative bird abundance and distance from the nearest village, for example. We can also see if any of our potential explanatory variables are highly correlated which might indicate potential issues with multicollinearity. None of them look as though we need to start getting concerned at this point. <br><br> <hr> 1: Koerner, S.E., Poulsen, J.R., Blanchard, E.J., Okouyi, J. & Clark, C.J. (2017) Vertebrate community composition and diversity declines along a defaunation gradient radiating from rural villages in Gabon. The Journal of applied ecology, 54, 805–814. ## Initial model and diagnostics We need to produce a minimal adequate model to try to explain relative bird abundance in terms of the various potential epxlanatory variables that are available. As you will have gathered from the video, there are a number of ways of doing this (e.g. all subsets analysis, 'enlightened' model choice) and also a number of ways of deciding which is our preferred model (e.g. AIC, Significance testing). To run through examples of all of these would make this tutorial exceedingly long, so we'll just use a single approach for our data. We have little *a-priori* knowledge about this system, and since this is a largely exploratory analysis so we will take a model reduction approach, and for simplicities sake we'll use signficance tests to choose between models. If you'd like to see how to analyse these data using AIC then Koerner *et al.* took that approach in their original paper. They also used a few more variables than we are dealing with here. Start by fitting a model called `model1` with `RA_Birds` as the response variable and the main effects of each of our potential explantory variables (`Distance`, `LandUse`, `NumHouseholds`, `Veg_Rich`, and `Veg_Canopy`). We have no particular reason to look for interaction terms and since we have a lot of main effects and the sample size is not especially large we will not investigate these. ```{r gabon5, exercise = TRUE, exercise.lines = 7, exercise.setup = "gabon-setup"} ``` ```{r gabon5-solution} model1 <- lm(RA_Birds ~ Distance + LandUse + NumHouseholds + Veg_Rich + Veg_Canopy, data = gabon2) ``` The first thing to do is to check our diagnostic plots. We'll just look at the first two for the sake of simplicity -remember you can do this by adding the argument `which = 1:2` to the `plot()` function. ```{r gabon5a, exercise = TRUE, exercise.setup = "gabon_setup"} ``` ```{r gabon5a-solution} plot(model1, which = 1:2) ``` Have a look at these plots. What do you see? ```{r diagnostics-quiz1, echo=FALSE} question( "Which statements do you agree with? More than one answer can be correct.", answer("The qq-plot shows positive skew", message = "Answer 1: this would be the case if the qqplot had a concave shape"), answer("The residuals versus fitted values plot has a hint of curvature but this is probably due to one datapoint with a large negative residual", correct = TRUE), answer("It's not possible to draw any conclusions because the sample size is too small"), answer( "The qq-plot is about as good as we might expect for a dataset of this size. One point has a rather larger negative residual than we might expect", correct = TRUE), answer("There is evidence for heteroskedasticity in the residuals versus fitted values plot", message = "Answer 5. This would show up as a wedge shape") ) ``` <details><summary>**Click here for more on the diagnostics**</summary> The diagnostics mostly look OK aside from the one data point (8) with a somewhat large negative residual. We'll proceed with our model simplification and see if this remains the case. </details> ## Model simplification Now we will proceed with simplifying our model to generate a minimal adequate model. The first thing to do is to assess the significance of each term in the model using `drop1()`. This compares the goodness of fit of models with and without each term that can be tested without violating the principle of marginality, which in this case is all of them since there are no interaction terms. So `drop1` compares our full model with a reduced model with all the terms excapt `Distance` to give us an assessment of whether a model with `Distance` has significantly better explanatory power, then the same for a model with all the terms except `LandUse`, and so on. Don't forget you need to specify `test = "F"` as an argument for `drop1()`. Finally, because the rendering engine that formats these tutorials has a thing for trying to make tables pretty that is sometimes actually a hindrance, please enclose your `drop1()` function call in a `print()` function. This is to make the output that you see look like the standard R output and is just something you need to do within the tutorial. ```{r gabon6, exercise = TRUE, exercise.setup = "gabon_setup"} ``` ```{r gabon6-hint-1} # Just give drop1 the name of our model # as the first argument and then test = "F" # as the second # # Please remember to use print() as well ``` ```{r gabon6-hint-2} # This is the solution print(drop1(model1, test = "F")) ``` So one model term, `Distance`, is significant but the rest are not, although two are close. The one which is furthest from significance is `LandUse` so we'll fit a second model (`model2`) without that variable and run `drop1()` on our new model. ```{r gabon7, exercise = TRUE, exercise.lines = 8, exercise.setup = "gabon-setup"} ``` ```{r gabon7-hint-1} # Just use the code you used for model 1 # without the LandUse term, and the run drop1 # on the new model. Don't forget to include # the test = "F" argument for drop1 # # Please remember to use print() as well ``` ```{r gabon7-hint-2} # This is the solution model2 <- lm(RA_Birds ~ Distance + NumHouseholds + Veg_Rich + Veg_Canopy, data = gabon2) print(drop1(model2, test = "F")) ``` Removing `LandUse` has changed things a lot. We now have three apparently significant terms, and only `Veg_Canopy` remains as non-significant. As an aside it's quite normal for p-values to change like this as we remove terms, because the error variance will change, leading to different F-statistics. Sometimes it goes the other way and terms which are significant in more complex models lose their significance as the model simplifies. We still need to remove `Veg_Canopy` and check the model that is produced. Our next model shuld be called `model3` and you'll need to run `drop1()` on it to assess the signficance of the remaining terms. Just to avoid any confusion, we're removing `Veg_Canopy` from `model2` so our new model should have neither `LandUse` nor `VegCanopy` as explanatory variables. ```{r gabon8, exercise = TRUE, exercise.lines = 7, exercise.setup = "gabon-setup"} ``` ```{r gabon8-hint-1} # Just use the code you used for model 2 # without the Veg_Canopy term, and the run drop1 # on the new model. Don't forget to include # the test = "F" argument for drop1 # # Please remember to use print() as well ``` ```{r gabon8-hint-2} # This is the solution model3 <- lm(RA_Birds ~ Distance + NumHouseholds + Veg_Rich, data = gabon2) print(drop1(model3, test = "F")) ``` All three of the remaining explanatory variables are significant so we've arrived at our *Minimal Adequate Model*: a model with any of the three remaining terms has does significantly less well at explaining the patterns in the RA_Birds variable. We should probably check those diagnostic plots again. ```{r} plot(model3, which = 1:2) ``` These look very similar to before. That pesky data point 8 still has a larger negative residual than we'd like and there's a worry that it might be having a strong effect on the overall model. One option if you're worried about a datapoint like this is to run the analysis again and see if it makes a material difference. For the sake of brevity we won't go through this process, but if we were to do it we would end up with a very similar final model, which tells us that we don't really need to worry about this point. ## Interpretation Now that we have arrived at our minimal adequate model, we need to understand what it is telling us. This is where the `summary()` function and the coefficients table becomes important. ```{r gabon9, exercise = TRUE, exercise.setup = "gabon_setup"} ``` ```{r gabon9-solution} summary(model3) ``` Have a look at the coefficients table. All of the remaining explanatory variables are continuous so the table is reasonably simple to interpret. Remember that in a linear model the terms are entered sequentially. ```{r cofficients-quiz1, echo=FALSE} question( "Which statements do you agree with? More than one answer can be correct.", answer("The relative abundance of birds increases with the number of households in the nearest village", message = "Not quite: see the answer to the next question"), answer("Once the effect of distance has been controlled for, the relative abundance of birds increases with the number of households in the nearest village", correct = TRUE), answer("Once the effects of distance and size of the nearest village have been partialled out of the data, the relative abundance of birds declines with increasing species richness of vegetation", correct = TRUE), answer( "For every kilometer further to the nearest village, the relative abundance of birds decreases by 1.39", correct = TRUE), answer("The mean for vegetation richness is equal to 104.56 - 2.60", message = "Answer 5. That would only be the case if we were dealing with a single factor ANOVA") ) ``` The effect of village size (`NumHouseholds`) is worth a closer look because this is not significant if we analyse the variable by itself: ```{r} cor.test(gabon2$NumHouseholds, gabon2$RA_Birds) ``` This is an example of how including an explanatory variable in a more complex model can sometimes reveal patterns that are not clear when we consider the variable by itself. <br><br><hr> ## License This content is licensed under a [https://www.gnu.org/licenses/gpl-3.0.en.html](GPL-3) license
/scratch/gouwar.j/cran-all/cranData/Biostatistics/inst/tutorials/19_Model_selection/Linear_models_7_Model_selection.Rmd
--- title: "Biostatistics vignette" author: "Rob Knell" date: "January 2021" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Biostatistics vignette} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup, echo = FALSE} library(Biostatistics) ``` # The Biostatistics Package This package consists of a series of learnr tutorials for use in teaching statistics to biologists. They were written for use in undergraduate and postgraduate teaching in the UK but they could also be used for individual, self-directed learning. The subjects covered range from basic data visualistion and description through to reasonably advanced linear modelling. There are obviously many subjects which are not currently covered such as generalised linear models, mixed effects models and multivariate statistics and it is hoped that these will be incorporated in the future. There is a strong emphasis throughout the tutorial on analysing real data sets. This is much better for learning statistics than using synthetic example data because with real data comes all of the issues and uncertainty associated with real science. The data used here have mostly been made publicly available by the authors of papers published in the biological literature, mostly via the [Dryad data repository](https://datadryad.org/stash), and I would like to thank all of them for this. The tutorials are written for the [learnr](https://rstudio.github.io/learnr/) package which uses an [rmarkdown](https://rmarkdown.rstudio.com/) framework to render tutorials into [shiny](https://shiny.rstudio.com/) webapps. The rmarkdown files for all of the tutorials are available on the [author's github page](https://github.com/rjknell). ## Running tutorials There are two ways of running these tutorials. The easy way assumes you are using a recent version of RStudio. If this is the case then once you have installed the package the tutorials will show up in the 'Tutorial' tab in the RStudio pane that also includes the Environment and History tabs. Click the "Start Tutorial" button and the tutorial will render, which can take a few seconds, and then appear in the Tutorial tab. You'll probably want to maximise the pane within your RStudio window. If you want to finish the tutorial click on the 'Stop' sign button at the top left of the tab. If you would rather run your tutorial in a separate browser window then you can use the `run_tutorial()` function from the learnr package. You need to specify the name of the tutorial and the package, so `learnr::run_tutorial("02_Descriptive_statistics", package = "Biostatistics")` will run the Descriptive Statistics tutorial and `learnr::run_tutorial("17_Multiple_Regression", package = "Biostatistics")` will run the Multiple Regression tutorial. In my experience the first method, with the Tutorial pane, seems more stable and sometimes tutorials won't render using `run_tutorial()` for reasons that are not clear. ## List of tutorials The tutorials currently in the package are: 00_Introduction 01_Frequency_histograms 02_Descriptive_statistics 03_Boxplots 04_Scatterplots 05_Sampling_distributions 06_Standard_errors 07_Confidence_intervals 08_CIs_comparing_two_means 09_Paired_sample_t_tests 10_Two_sample_t_tests 11_Chi_square_tests 12_Correlation 13_Single_factor_ANOVA 14_Linear_Regression 15_Model_assumptions 16_Multi_factor_ANOVA 17_Multiple_regression 18_Factors_and_continuous_variables 19_Model_selection
/scratch/gouwar.j/cran-all/cranData/Biostatistics/vignettes/Biostatistics.Rmd
#' Outputs related adverse event timeline plots including just system organ class #' (AE category), or system organ class and lowest level term (AE detail). This #' function can fit up to 5 different attributions. Modify width, height and scale #' parameters in ggsave() to customize fit for large plot. #' #' @param subjID key identifier field for participant ID in data sets #' @param subjID_ineligText character text that denotes participant IDs to exclude, #' for example, c("New Subject") (if provided) #' @param baseline_datasets list of data frames that contain baseline participant characteristics, #' for example, list(enrollment_DF,demography_DF,ineligibility_DF) #' @param ae_dataset data frame that contains subject AEs #' @param ae_attribVars field(s) that denotes attribution to intervention under study, #' for example, c("CTC_AE_ATTR_SCALE","CTC_AE_ATTR_SCALE_1") (if provided) #' @param ae_attribVarsName character text that denotes name of interventions under study, #' for example, c("Drug 1", "Drug 2") (if provided) #' @param ae_attribVarText character text that denotes related attribution, for example #' c("Definite", "Probable", "Possible") (if provided) #' @param startDtVars field(s) that denotes participant start date (i.e. 10MAY2021). For example, #' it could be enrollment date or screening date. If more than one field given #' (unique names are required), each field is assumed to be specific start date #' for attribution in corresponding field order #' @param ae_detailVar field that denotes participant AE detail (lowest level term) #' @param ae_categoryVar field that denotes participant AE category (system organ class) #' @param ae_severityVar field that denotes participant AE severity grade (numeric) #' @param ae_onsetDtVar field that denotes participant AE onset date #' @param time_unit character text that denotes time unit for desired timeline, #' for example, could be one of c("day","week","month","year") (if provided) #' @param include_ae_detail boolean that denotes if AE detail should be included #' in timeline plot. Default is True #' @param legendPerSpace parameter at denotes proportion of vertical image space #' dedicated to legend at bottom. Default is 0.05 for AE detail and 0.1 for AE Category #' @param fonts character text that denotes font for AE category, AE detail, axis, #' legend and plot labels (if provided) #' @param fontColours character text that denotes system font colours for AE category and #' AE detail (if provided) #' @param panelColours character text that denotes panel background colours for AE category, #' AE detail and plot area (if provided) #' @param attribColours character text that denotes colours for attributions, supports up to 10 #' distinct colours (if provided) #' @param attribSymbols text that denotes median plot symbols for attributions, supports up to 10 #' distinct symbols (if provided) #' @param columnWidths text that denotes character columns widths for AE category and AE detail #' columns (if provided) #' @keywords plot #' @return ggplot object of AE timeline plot #' @importFrom plyr join_all rbind.fill #' @importFrom stats lm sd anova as.formula binomial median na.fail #' @importFrom purrr modify_if #' @importFrom dplyr select distinct mutate arrange summarise group_by filter across row_number n_distinct all_of right_join count ungroup coalesce #' @importFrom stringr str_detect str_wrap str_split #' @importFrom ggh4x strip_nested facet_nested elem_list_text elem_list_rect force_panelsizes #' @importFrom forcats fct_rev #' @importFrom cowplot get_legend #' @importFrom ggstance geom_pointrangeh #' @import ggplot2 #' @import lifecycle #' @export #' @examples #' data("drug1_admin", "drug2_admin", "ae"); #' p <- ae_timeline_plot(subjID="Subject",subjID_ineligText=c("01","11"), #' baseline_datasets=list(drug1_admin, drug2_admin), #' ae_dataset=ae, #' ae_attribVars=c("CTC_AE_ATTR_SCALE","CTC_AE_ATTR_SCALE_1"), #' ae_attribVarsName=c("Drug 1","Drug 2"), #' ae_attribVarText=c("Definite", "Probable", "Possible"), #' startDtVars=c("TX1_DATE_INT","TX2_DATE_INT"), #' ae_detailVar="ae_detail", #' ae_categoryVar="ae_category",ae_severityVar="AE_SEV_GD", #' ae_onsetDtVar="AE_ONSET_DT_INT",time_unit="month", #' include_ae_detail=FALSE, #' fonts=c("Calibri","Albany AMT","Gadugi","French Script MT","Forte"), #' fontColours=c("#FFE135"), #' panelColours=c("#E52B50",NA,"#FFE4C4"), #' attribColours=c("#9AB973","#01796F","#FFA343","#CC7722"), #' attribSymbols=c(7,8,5,6), #' columnWidths=c(23)) ae_timeline_plot <- function(subjID,subjID_ineligText=NULL,baseline_datasets,ae_dataset, ae_attribVars,ae_attribVarsName=NULL,ae_attribVarText=NULL, startDtVars,ae_detailVar,ae_categoryVar, ae_severityVar,ae_onsetDtVar,time_unit=c("day","week","month","year"), include_ae_detail=TRUE,legendPerSpace=NULL, fonts=NULL,fontColours=NULL,panelColours=NULL, attribColours=NULL,attribSymbols=NULL, columnWidths=NULL){ options(dplyr.summarise.inform = FALSE) if (is.null(ae_attribVarText)) { ae_attribVarText <- c("Definite", "Probable", "Possible"); } fontCategory <- "Bahnschrift"; fontDetail <- "Bauhaus 93"; fontAxis <- "Berlin Sans FB"; fontLegend <- "Arial"; fontPlotLabels <- "Arial"; if (!is.null(fonts)) { tryCatch({ fontCategory <- na.fail(fonts[1]); }, error=function(e){}) tryCatch({ fontDetail <- na.fail(fonts[2]); }, error=function(e){}) tryCatch({ fontAxis <- na.fail(fonts[3]); }, error=function(e){}) tryCatch({ fontLegend <- na.fail(fonts[4]); }, error=function(e){}) tryCatch({ fontPlotLabels <- na.fail(fonts[5]); }, error=function(e){}) } fontColoursCategory <- "black"; fontColoursDetail <- "white"; if (!is.null(fontColours)) { tryCatch({ fontColoursCategory <- na.fail(fontColours[1]); }, error=function(e){}) tryCatch({ fontColoursDetail <- na.fail(fontColours[2]); }, error=function(e){}) } panelColoursCategory <- "#FFB347"; panelColoursDetail <- "#C19A6B"; panelColoursPlot <- "#FAF0E6"; if (!is.null(panelColours)) { tryCatch({ panelColoursCategory <- na.fail(panelColours[1]); }, error=function(e){}) tryCatch({ panelColoursDetail <- na.fail(panelColours[2]); }, error=function(e){}) tryCatch({ panelColoursPlot <- na.fail(panelColours[3]); }, error=function(e){}) } attribColours1 <- "#FF2800"; attribColours2 <- "#AE0C00"; attribColours3 <- "#08E8DE"; attribColours4 <- "#1DACD6"; attribColours5 <- "#BF94E4"; attribColours6 <- "#702963"; attribColours7 <- "#FFBF00"; attribColours8 <- "#FF7E00"; attribColours9 <- "#8DB600"; attribColours10 <- "#008000"; if (!is.null(attribColours)) { tryCatch({ attribColours1 <- na.fail(attribColours[1]); }, error=function(e){}) tryCatch({ attribColours2 <- na.fail(attribColours[2]); }, error=function(e){}) tryCatch({ attribColours3 <- na.fail(attribColours[3]); }, error=function(e){}) tryCatch({ attribColours4 <- na.fail(attribColours[4]); }, error=function(e){}) tryCatch({ attribColours5 <- na.fail(attribColours[5]); }, error=function(e){}) tryCatch({ attribColours6 <- na.fail(attribColours[6]); }, error=function(e){}) tryCatch({ attribColours7 <- na.fail(attribColours[7]); }, error=function(e){}) tryCatch({ attribColours8 <- na.fail(attribColours[8]); }, error=function(e){}) tryCatch({ attribColours9 <- na.fail(attribColours[9]); }, error=function(e){}) tryCatch({ attribColours10 <- na.fail(attribColours[10]); }, error=function(e){}) } attribSymbols1 <- 1; attribSymbols2 <- 2; attribSymbols3 <- 3; attribSymbols4 <- 4; attribSymbols5 <- 5; attribSymbols6 <- 6; attribSymbols7 <- 7; attribSymbols8 <- 8; attribSymbols9 <- 9; attribSymbols10 <- 10; if (!is.null(attribSymbols)) { tryCatch({ attribSymbols1 <- na.fail(attribSymbols[1]); }, error=function(e){}) tryCatch({ attribSymbols2 <- na.fail(attribSymbols[2]); }, error=function(e){}) tryCatch({ attribSymbols3 <- na.fail(attribSymbols[3]); }, error=function(e){}) tryCatch({ attribSymbols4 <- na.fail(attribSymbols[4]); }, error=function(e){}) tryCatch({ attribSymbols5 <- na.fail(attribSymbols[5]); }, error=function(e){}) tryCatch({ attribSymbols6 <- na.fail(attribSymbols[6]); }, error=function(e){}) tryCatch({ attribSymbols7 <- na.fail(attribSymbols[7]); }, error=function(e){}) tryCatch({ attribSymbols8 <- na.fail(attribSymbols[8]); }, error=function(e){}) tryCatch({ attribSymbols9 <- na.fail(attribSymbols[9]); }, error=function(e){}) tryCatch({ attribSymbols10 <- na.fail(attribSymbols[10]); }, error=function(e){}) } divisionUnit = 1; plotTimeText = "Days"; if (time_unit == "day") { divisionUnit = 1; plotTimeText = "Days" } else if (time_unit == "week") { divisionUnit = 7; plotTimeText = "Weeks" } else if (time_unit == "month") { divisionUnit = 30; plotTimeText = "Months" } else if (time_unit == "year") { divisionUnit = 365.25; plotTimeText = "Years" } else { divisionUnit = 30; plotTimeText = "Months" } mydata <- plyr::join_all(baseline_datasets, by = subjID, type = "full") |> dplyr::right_join(ae, by = subjID) |> dplyr::mutate(Subject = eval(parse(text=subjID)), ae_detail = eval(parse(text=ae_detailVar)), ae_category = eval(parse(text=ae_categoryVar)), AE_SEV_GD = eval(parse(text=ae_severityVar)), AE_ONSET_DT_INT = eval(parse(text=ae_onsetDtVar))) |> dplyr::select(Subject, ae_detail, ae_category, AE_SEV_GD, dplyr::all_of(ae_attribVars), dplyr::all_of(startDtVars), AE_ONSET_DT_INT) |> dplyr::group_by(across(c(Subject, ae_detail, ae_category, dplyr::all_of(ae_attribVars), AE_SEV_GD, AE_ONSET_DT_INT))) |> dplyr::summarise(dplyr::across(dplyr::all_of(startDtVars), ~dplyr::coalesce(x=.x))) |> dplyr::mutate(AE_ONSET_DT_INT = as.Date(AE_ONSET_DT_INT, tz = "UTC"), dplyr::across(dplyr::all_of(startDtVars), ~as.Date(x=.x, tz = "UTC")), AE_SEV_GD = as.numeric(AE_SEV_GD)) |> dplyr::filter(!Subject %in% subjID_ineligText) |> dplyr::arrange(Subject) if (include_ae_detail == T) { legendPerSpaceDetail = 0.05 if (!is.null(legendPerSpace)) { tryCatch({ legendPerSpaceDetail = na.fail(legendPerSpace) }, error=function(e){}) } columnWidth1 = 15; columnWidth2 = 25; if (!is.null(columnWidths)) { tryCatch({ columnWidth1 = na.fail(columnWidths[1]); }, error=function(e){}) tryCatch({ columnWidth2 = na.fail(columnWidths[2]); }, error=function(e){}) } #i <- 2; mydataPlot <- NA; mydataPlot <- as.data.frame(mydataPlot); for (i in 1:length(ae_attribVars)) { drugName <- paste("Attribution ", i, sep=""); if (!is.null(ae_attribVarsName)) { tryCatch({ drugName <- na.fail(ae_attribVarsName[i]); }, error=function(e){}) } selectedAttribVar <- ae_attribVars[i]; if (!is.na(startDtVars[i])) { selectedStartVar <- startDtVars[i] } else { selectedStartVar <- startDtVars[1] } mydata_drug112 <- mydata |> dplyr::ungroup() |> dplyr::select(Subject, ae_detail, ae_category, AE_SEV_GD, dplyr::all_of(selectedAttribVar), dplyr::all_of(selectedStartVar), AE_ONSET_DT_INT) |> dplyr::filter(get(selectedAttribVar) %in% ae_attribVarText & AE_SEV_GD %in% c(1:2) & AE_ONSET_DT_INT >= get(selectedStartVar)) |> dplyr::group_by(Subject, ae_detail, ae_category, AE_ONSET_DT_INT, get(selectedStartVar)) |> dplyr::summarise(drug1_ae = AE_ONSET_DT_INT - get(selectedStartVar)) |> dplyr::ungroup() |> dplyr::group_by(Subject, ae_detail, ae_category) |> dplyr::arrange(AE_ONSET_DT_INT) #dplyr::filter(dplyr::row_number()==1) #takes the first grade 1-2 AE per subject by type mydata_drug1_sum12 <- mydata_drug112 |> dplyr::ungroup() |> dplyr::select(ae_detail, ae_category, drug1_ae) |> dplyr::group_by(ae_detail, ae_category) |> dplyr::summarise(time_median = median(drug1_ae), time_min = min(drug1_ae), time_max = max(drug1_ae), group = paste(drugName, ": AE 1-2", sep="")) mydata_drug112 <- as.data.frame(mydata_drug112); mydata_drug1_sum12 <- as.data.frame(mydata_drug1_sum12); mydata_drug13p <- mydata |> dplyr::ungroup() |> dplyr::select(Subject, ae_detail, ae_category, AE_SEV_GD, dplyr::all_of(selectedAttribVar), dplyr::all_of(selectedStartVar), AE_ONSET_DT_INT) |> dplyr::filter(get(selectedAttribVar) %in% ae_attribVarText & AE_SEV_GD %in% c(3:5) & AE_ONSET_DT_INT >= get(selectedStartVar)) |> dplyr::group_by(Subject, ae_detail, ae_category, AE_ONSET_DT_INT, get(selectedStartVar)) |> dplyr::summarise(drug1_ae = AE_ONSET_DT_INT - get(selectedStartVar)) |> dplyr::ungroup() |> dplyr::group_by(Subject, ae_detail, ae_category) |> dplyr::arrange(AE_ONSET_DT_INT) #dplyr::filter(dplyr::row_number()==1) #takes the first grade 3+ AE per subject by type mydata_drug1_sum3p <- mydata_drug13p |> dplyr::ungroup() |> dplyr::select(ae_detail, ae_category, drug1_ae) |> dplyr::group_by(ae_detail, ae_category) |> dplyr::summarise(time_median = median(drug1_ae), time_min = min(drug1_ae), time_max = max(drug1_ae), group = paste(drugName, ": AE 3+", sep="")) mydata_drug13p <- as.data.frame(mydata_drug13p); mydata_drug1_sum3p <- as.data.frame(mydata_drug1_sum3p); mydataPlot <- plyr::rbind.fill(mydataPlot, mydata_drug1_sum12, mydata_drug1_sum3p); } mydataPlot <- mydataPlot[-1,-1]; ### Convert to months from days; mydataPlot$time_median <- round((as.numeric(mydataPlot$time_median/divisionUnit)), 1); mydataPlot$time_max <- round((as.numeric(mydataPlot$time_max/divisionUnit)), 1); mydataPlot$time_min <- round((as.numeric(mydataPlot$time_min/divisionUnit)), 1); #-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#; #-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#; ### Make AE timeline plot including both SOC and LLT; plotData <- mydataPlot; plotData <- as.data.frame(plotData); plotData$label <- paste(plotData$time_median, " (", plotData$time_min, "-", plotData$time_max, ")", sep=""); plotData[which(plotData$time_min == plotData$time_max), ]$label <- paste(" ", plotData[which(plotData$time_min == plotData$time_max), ]$time_median, sep=""); plotData$ae_category <- stringr::str_wrap(plotData$ae_category, width = columnWidth1); plotData$ae_detail <- stringr::str_wrap(plotData$ae_detail, width = columnWidth2); ### Make correct order for AEs to be plotted; plotData <- plotData |> dplyr::arrange(ae_detail) plotData <- plotData |> purrr::modify_if(is.character, as.factor); plotData$ae_detail <- factor(plotData$ae_detail, levels=rev(levels(plotData$ae_detail))); #str(plotData); ### Below part sets correct facet_nested() panel spacing adjusting for LLT, SOC label size; plotPanelSpacing <- plotData |> dplyr::select("ae_category", "ae_detail") |> dplyr::mutate(span_soc = lengths(stringr::str_split(ae_category, '\n'))) |> dplyr::mutate(span_llt = lengths(stringr::str_split(ae_detail, '\n'))) span_txt <- plotData|> dplyr::group_by(ae_detail) |> dplyr::count(ae_detail, name="span_txt") plotPanelSpacing <- as.data.frame(plotPanelSpacing); span_txt <- as.data.frame(span_txt); plotData <- plyr::join_all(list(plotData, span_txt, plotPanelSpacing[, -which(colnames(plotPanelSpacing) %in% c("ae_category"))]), by=c("ae_detail"), type='left', match = "first"); #str(plotData); #92 obs. of 10 variables; #-#-#-# Control part for setting span size - this is complex, best leave alone; plotData$span <- plotData$span_txt; ### Below is how many treatment lines display okay per one line of LLT, manually tweak below; plotPerfectRowTxtAmt <- 2; plotData$span <- plotData$span_txt / plotPerfectRowTxtAmt; plotData$span[which(plotData$span_llt >= plotData$span_txt)] <- (plotData$span_llt[which(plotData$span_llt >= plotData$span_txt)] / plotData$span_txt[which(plotData$span_llt >= plotData$span_txt)]) * plotPerfectRowTxtAmt; plotData$span[which(plotData$span_txt == 1 & plotData$span_llt >= plotPerfectRowTxtAmt)] <- plotData$span_llt[which(plotData$span_txt == 1 & plotData$span_llt >= plotPerfectRowTxtAmt)]; plotData$span[which(plotData$span_txt == 1 & plotData$span_llt < plotPerfectRowTxtAmt)] <- 1; span_soc <- plotData|> dplyr::group_by(ae_category) |> dplyr::count(ae_category, name="span_soc_tot") span_soc <- as.data.frame(span_soc); plotData <- plyr::join_all(list(plotData, span_soc), by=c("ae_category"), type='left', match = "first"); #str(plotData); #194 obs. of 12 variables; ### Span part below for SOC may need future adjusting and error checking; plotData$span[which(plotData$span_soc_tot == plotPerfectRowTxtAmt)] <- (plotData$span_soc[which(plotData$span_soc_tot == plotPerfectRowTxtAmt)] / plotData$span_soc_tot[which(plotData$span_soc_tot == plotPerfectRowTxtAmt)]) * plotPerfectRowTxtAmt; plotData$span[which(plotData$span_soc_tot > plotData$span_soc & plotData$span_soc*plotPerfectRowTxtAmt > plotData$span_soc_tot)] <- plotData$span_soc[which(plotData$span_soc_tot > plotData$span_soc & plotData$span_soc*plotPerfectRowTxtAmt > plotData$span_soc_tot)] / plotPerfectRowTxtAmt; plotData$span[which(plotData$span_soc_tot < plotPerfectRowTxtAmt)] <- plotData$span_soc[which(plotData$span_soc_tot < plotPerfectRowTxtAmt)]; plotData$span[which(plotData$span_soc_tot < plotData$span_soc & plotData$span_soc_tot > plotPerfectRowTxtAmt)] <- plotData$span_soc[which(plotData$span_soc_tot < plotData$span_soc & plotData$span_soc_tot > plotPerfectRowTxtAmt)] / plotData$span_soc_tot[which(plotData$span_soc_tot < plotData$span_soc & plotData$span_soc_tot > plotPerfectRowTxtAmt)]; #-#-#-#; plotPanelSpacingCheck <- plotData |> dplyr::select("ae_category", "ae_detail", "span_soc", "span_llt", "span_txt", "span_soc_tot", "span") #write.xlsx(plotPanelSpacingCheck, file=paste("plotPanelSpacingCheck", ".xlsx", sep=""), sheetName="GGplot facet span", col.names=TRUE, row.names=FALSE, append=F, showNA=FALSE); ### This part sets order correct for ggplot facets; plotSpan <- plotData |> dplyr::select("ae_category", "ae_detail", "span") |> dplyr::group_by(ae_category, ae_detail) |> dplyr::arrange(ae_category, desc(ae_detail)) |> dplyr::filter(dplyr::row_number()==1) plotSpan <- as.data.frame(plotSpan); SOC_LLT_strips <- ggh4x::strip_nested( # Vertical strips size = "variable", background_y = ggh4x::elem_list_rect(fill = c(panelColoursCategory, panelColoursDetail)), text_y = ggh4x::elem_list_text(colour = c(fontColoursCategory, fontColoursDetail), family=c(fontCategory, fontDetail), hjust = c(1,0), vjust = c(1,0.5)), by_layer_y = TRUE ) #plotData; plotData$group <- as.factor(plotData$group); p1_with_legend <- ggplot(plotData, aes(xmin=time_min, xmax=time_max, y=forcats::fct_rev(group))) + ggstance::geom_pointrangeh(aes(x=time_median, shape=group, color=group), position=position_dodge2(width = 0, preserve = "single", padding = -1.5), fatten=2) + geom_text(aes(x=time_max, color=group, label=label), position=position_dodge2(width = 0, preserve = "single", padding = -1.5), size=2.6, hjust=-0.2, show.legend = FALSE, family=fontPlotLabels) + scale_color_manual(name=NULL, values=c(attribColours1, attribColours2, attribColours3, attribColours4, attribColours5, attribColours6, attribColours7, attribColours8, attribColours9, attribColours10)) + scale_shape_manual(name=NULL, values=c(attribSymbols1, attribSymbols2, attribSymbols3, attribSymbols4, attribSymbols5, attribSymbols6, attribSymbols7, attribSymbols8, attribSymbols9, attribSymbols10)) + xlab(paste(plotTimeText, " [median onset time (range)]", sep="")) + theme(legend.position="bottom") + theme(legend.title = element_blank()) + guides(color=guide_legend(nrow=2,byrow=F)) + theme(strip.text.y.left = element_text(angle = 0), strip.text = element_text(family=fontCategory, hjust = 1, vjust = 1, margin = margin(5, 5, 5, 5, "pt")), strip.background = element_rect(fill=panelColoursCategory, color="white")) + theme(panel.spacing=unit(0, "cm")) + theme(axis.text.y = element_text(hjust = 1)) + scale_x_continuous(expand = expansion(add = c(8,30)), limits=c(0,max(plotData$time_max)*1.30), minor_breaks=NULL) + theme(panel.grid.minor.x=element_blank(), panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) + theme(axis.title.y=element_blank(), axis.ticks.y=element_blank(), panel.border=element_blank(), panel.background=element_blank(), plot.title=element_text(hjust = 0.5)) + ggh4x::facet_nested(ae_category + forcats::fct_rev(ae_detail) ~ ., scales = "free", space = "free", switch = "y", strip = SOC_LLT_strips) + scale_y_discrete(position = "right") + theme(legend.key = element_rect(fill = panelColoursPlot), text=element_text(family=fontLegend), legend.background = element_rect(fill = panelColoursPlot), plot.margin = margin(0,0,0,0, "cm")) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x = element_text(family=fontAxis)) + theme(panel.background = element_rect(fill = panelColoursPlot)) + ggh4x::force_panelsizes(rows = plotSpan$span) + theme(legend.title=element_blank(), legend.margin = margin(0, 0, 0, 0), legend.spacing.x = unit(0, "mm"), legend.spacing.y = unit(0, "mm")) p1_no_legend <- p1_with_legend + theme(legend.position = "none") #### This part makes the legend centered below both plot and panel area; gt <- ggplot_gtable(ggplot_build(p1_no_legend)) le1 <- cowplot::get_legend(p1_with_legend) #---!!!!!! note the second parameter in rel_heights below sets legend space area on plot !!!!!!---# pPlot <- cowplot::plot_grid(gt, le1, nrow = 2, rel_heights = c(1, legendPerSpaceDetail)) + theme(plot.background = element_rect(fill = "white", colour = NA)) return(pPlot) } else { legendPerSpaceCategory = 0.1 if (!is.null(legendPerSpace)) { tryCatch({ legendPerSpaceCategory = na.fail(legendPerSpace) }, error=function(e){}) } columnWidth1 = 25; columnWidth2 = 15; if (!is.null(columnWidths)) { tryCatch({ columnWidth1 = na.fail(columnWidths[1]); }, error=function(e){}) tryCatch({ columnWidth2 = na.fail(columnWidths[2]); }, error=function(e){}) } #i <- 1; mydataPlot <- NA; mydataPlot <- as.data.frame(mydataPlot); for (i in 1:length(ae_attribVars)) { drugName <- paste("Attribution ", i, sep=""); if (!is.null(ae_attribVarsName)) { tryCatch({ drugName <- na.fail(ae_attribVarsName[i]); }, error=function(e){}) } selectedAttribVar <- ae_attribVars[i]; if (!is.na(startDtVars[i])) { selectedStartVar <- startDtVars[i] } else { selectedStartVar <- startDtVars[1] } mydata_drug112 <- mydata |> dplyr::ungroup() |> dplyr::select(Subject, ae_category, AE_SEV_GD, dplyr::all_of(selectedAttribVar), dplyr::all_of(selectedStartVar), AE_ONSET_DT_INT) |> dplyr::filter(get(selectedAttribVar) %in% ae_attribVarText & AE_SEV_GD %in% c(1:2) & AE_ONSET_DT_INT >= get(selectedStartVar)) |> dplyr::group_by(Subject, ae_category, AE_ONSET_DT_INT, get(selectedStartVar)) |> dplyr::summarise(drug1_ae = AE_ONSET_DT_INT - get(selectedStartVar)) |> dplyr::ungroup() |> dplyr::group_by(Subject, ae_category) |> dplyr::arrange(AE_ONSET_DT_INT) #dplyr::filter(dplyr::row_number()==1) #takes the first grade 1-2 AE per subject by type mydata_drug1_sum12 <- mydata_drug112 |> dplyr::ungroup() |> dplyr::select(ae_category, drug1_ae) |> dplyr::group_by(ae_category) |> dplyr::summarise(time_median = median(drug1_ae), time_min = min(drug1_ae), time_max = max(drug1_ae), group = paste(drugName, ": AE 1-2", sep="")) mydata_drug112 <- as.data.frame(mydata_drug112); mydata_drug1_sum12 <- as.data.frame(mydata_drug1_sum12); mydata_drug13p <- mydata |> dplyr::ungroup() |> dplyr::select(Subject, ae_category, AE_SEV_GD, dplyr::all_of(selectedAttribVar), dplyr::all_of(selectedStartVar), AE_ONSET_DT_INT) |> dplyr::filter(get(selectedAttribVar) %in% ae_attribVarText & AE_SEV_GD %in% c(3:5) & AE_ONSET_DT_INT >= get(selectedStartVar)) |> dplyr::group_by(Subject, ae_category, AE_ONSET_DT_INT, get(selectedStartVar)) |> dplyr::summarise(drug1_ae = AE_ONSET_DT_INT - get(selectedStartVar)) |> dplyr::ungroup() |> dplyr::group_by(Subject, ae_category) |> dplyr::arrange(AE_ONSET_DT_INT) #dplyr::filter(dplyr::row_number()==1) #takes the first grade 3+ AE per subject by type mydata_drug1_sum3p <- mydata_drug13p |> dplyr::ungroup() |> dplyr::select(ae_category, drug1_ae) |> dplyr::group_by(ae_category) |> dplyr::summarise(time_median = median(drug1_ae), time_min = min(drug1_ae), time_max = max(drug1_ae), group = paste(drugName, ": AE 3+", sep="")) mydata_drug13p <- as.data.frame(mydata_drug13p); mydata_drug1_sum3p <- as.data.frame(mydata_drug1_sum3p); mydataPlot <- plyr::rbind.fill(mydataPlot, mydata_drug1_sum12, mydata_drug1_sum3p); } mydataPlot <- mydataPlot[-1,-1]; ### Convert to months from days; mydataPlot$time_median <- round((as.numeric(mydataPlot$time_median/divisionUnit)), 1); mydataPlot$time_max <- round((as.numeric(mydataPlot$time_max/divisionUnit)), 1); mydataPlot$time_min <- round((as.numeric(mydataPlot$time_min/divisionUnit)), 1); #-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#; #-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#-~-#; ### Make AE timeline plot for SOC; plotData <- mydataPlot; plotData <- as.data.frame(plotData); plotData$label <- paste(plotData$time_median, " (", plotData$time_min, "-", plotData$time_max, ")", sep=""); plotData[which(plotData$time_min == plotData$time_max), ]$label <- paste(" ", plotData[which(plotData$time_min == plotData$time_max), ]$time_median, sep=""); plotData$ae_category <- stringr::str_wrap(plotData$ae_category, width = columnWidth1); ### Make correct order for AEs to be plotted; plotData <- plotData |> dplyr::arrange(ae_category) plotData <- plotData |> purrr::modify_if(is.character, as.factor); plotData$ae_category <- factor(plotData$ae_category, levels=rev(levels(plotData$ae_category))); ### Below part sets correct facet_nested() panel spacing adjusting for SOC label size; plotPanelSpacing <- plotData |> dplyr::select("ae_category") |> dplyr::mutate(span_soc = lengths(stringr::str_split(ae_category, '\n'))) span_txt <- plotData |> dplyr::group_by(ae_category) |> dplyr::count(ae_category, name="span_txt") plotPanelSpacing <- as.data.frame(plotPanelSpacing); span_txt <- as.data.frame(span_txt); plotData <- plyr::join_all(list(plotData, span_txt, plotPanelSpacing), by=c("ae_category"), type='left', match = "first"); #str(plotData); #49 obs. of 8 variables; #-#-#-# Control part for setting span size; plotData$span <- plotData$span_txt; ### Below is how many treatment lines display okay per one line of SOC, manually tweak below; plotPerfectRowTxtAmt <- 2; plotData$span <- plotData$span_txt / plotPerfectRowTxtAmt; plotData$span[which(plotData$span_soc >= plotData$span_txt)] <- (plotData$span_soc[which(plotData$span_soc >= plotData$span_txt)] / plotData$span_txt[which(plotData$span_soc >= plotData$span_txt)]) * plotPerfectRowTxtAmt; plotData$span[which(plotData$span_txt == 1 & plotData$span_soc >= plotPerfectRowTxtAmt)] <- plotData$span_soc[which(plotData$span_txt == 1 & plotData$span_soc >= plotPerfectRowTxtAmt)]; plotData$span[which(plotData$span_txt == 1 & plotData$span_soc < plotPerfectRowTxtAmt)] <- 1; #-#-#-#; plotPanelSpacingCheck <- plotData |> dplyr::select("ae_category", "span_soc", "span_txt", "span") #write.xlsx(plotPanelSpacingCheck, file=paste("plotPanelSpacingCheck", ".xlsx", sep=""), sheetName="GGplot facet span", col.names=TRUE, row.names=FALSE, append=F, showNA=FALSE); ### This part sets order correct for ggplot facets; plotSpan <- plotData |> dplyr::select("ae_category", "span") |> dplyr::group_by(ae_category) |> dplyr::arrange(desc(ae_category)) |> dplyr::filter(dplyr::row_number()==1) plotSpan <- as.data.frame(plotSpan); SOC_LLT_strips <- ggh4x::strip_nested( # Vertical strips size = "variable", background_y = ggh4x::elem_list_rect(fill = c(panelColoursCategory, panelColoursDetail)), text_y = ggh4x::elem_list_text(colour = c(fontColoursCategory, fontColoursDetail), family=c(fontCategory, fontDetail), hjust = c(1,0), vjust = c(1,0.5)), by_layer_y = TRUE ) #plotData; plotData$group <- as.factor(plotData$group); p1_with_legend <- ggplot(plotData, aes(xmin=time_min, xmax=time_max, y=forcats::fct_rev(group))) + ggstance::geom_pointrangeh(aes(x=time_median, shape=group, color=group), position=position_dodge2(width = 0, preserve = "single", padding = -1.5), fatten=2) + geom_text(aes(x=time_max, color=group, label=label), position=position_dodge2(width = 0, preserve = "single", padding = -1.5), size=2.6, hjust=-0.2, show.legend = FALSE, family=fontPlotLabels) + scale_color_manual(name=NULL, values=c(attribColours1, attribColours2, attribColours3, attribColours4, attribColours5, attribColours6, attribColours7, attribColours8, attribColours9, attribColours10)) + scale_shape_manual(name=NULL, values=c(attribSymbols1, attribSymbols2, attribSymbols3, attribSymbols4, attribSymbols5, attribSymbols6, attribSymbols7, attribSymbols8, attribSymbols9, attribSymbols10)) + xlab(paste(plotTimeText, " [median onset time (range)]", sep="")) + theme(legend.position="bottom") + theme(legend.title = element_blank()) + guides(color=guide_legend(nrow=2,byrow=F)) + theme(strip.text.y.left = element_text(angle = 0), strip.text = element_text(family=fontCategory, hjust = 1, margin = margin(5, 5, 5, 5, "pt")), strip.background = element_rect(fill=panelColoursCategory, color="white")) + theme(panel.spacing=unit(0, "cm")) + theme(axis.text.y = element_text(hjust = 1)) + scale_x_continuous(expand = expansion(add = c(8,30)), limits=c(0,max(plotData$time_max)*1.30), minor_breaks=NULL) + theme(panel.grid.minor.x=element_blank(), panel.grid.major.y=element_blank(), panel.grid.minor.y=element_blank()) + theme(axis.title.y=element_blank(), axis.ticks.y=element_blank(), panel.border=element_blank(), panel.background=element_blank(), plot.title=element_text(hjust = 0.5)) + ggh4x::facet_nested(forcats::fct_rev(ae_category) ~ ., scales = "free", space = "free", switch = "y", strip = SOC_LLT_strips) + scale_y_discrete(position = "right") + theme(legend.key = element_rect(fill = panelColoursPlot), text=element_text(family=fontLegend), legend.background = element_rect(fill = panelColoursPlot), plot.margin = margin(0,0,0,0, "cm")) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank(), axis.title.x = element_text(family=fontAxis)) + theme(panel.background = element_rect(fill = panelColoursPlot)) + ggh4x::force_panelsizes(rows = plotSpan$span) + theme(strip.text = element_text (margin = margin (2, 1, 2, 15))) + theme(legend.title=element_blank(), legend.margin = margin(0, 0, 0, 0), legend.spacing.x = unit(0, "mm"), legend.spacing.y = unit(0, "mm")) p1_no_legend <- p1_with_legend + theme(legend.position = "none") #### This part makes the legend centered below both plot and panel area; gt <- ggplot_gtable(ggplot_build(p1_no_legend)) le1 <- cowplot::get_legend(p1_with_legend) pPlot <- cowplot::plot_grid(gt, le1, nrow = 2, rel_heights = c(1, legendPerSpaceCategory)) + theme(plot.background = element_rect(fill = "white", colour = NA)) return(pPlot) } }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/ae_timeline_plot.R
#' Modification of the as.numeric function that prints entries that #' fail to parse as a message #' #' @param x string or vector to coerce to numeric #' @keywords as.numeric #' @export #' @examples #' z <- as_numeric_parse(c(1:5, "String1",6:10,"String2")) #' z #' @returns No return value, called for side effects as_numeric_parse <- function(x){ # Arguments: # x = string or vector to coerce to numeric if (!class(x) %in% c("logical","numeric","double","integer","character","Date")) { stop("Invalid data type") } y <- suppressWarnings(as.numeric(x)) noparse <- x[!is.na(x) & is.na(y)] if (length(noparse) > 0) { noparse_warn <- paste0("Entry ", which(!is.na(x) & is.na(y)), ", '", noparse, "'") message("The following entries were converted to NA values:") for (i in 1:length(noparse_warn)){ message(noparse_warn[i]) } } return(y) }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/as_numeric_parse.R
#' Caterpillar plot. Useful for plotting random effects from hierarchical models, #' such as MCMCglmm::MCMCglmm() object, that have binary outcome. #' #' @param subjID key identifier field for participant ID in data sets #' @param remove.text.subjID boolean indicating if non-numeric text should be #' removed from subjID in plot label. Note that this can only be used if there #' are non-duplicate participant IDs when non-numeric text is removed. Default #' is FALSE (if provided) #' @param mcmcglmm_object MCMCglmm model output #' @param orig_dataset data frame supplied to MCMCglmm function #' @param binaryOutcomeVar name of binary variable (0,1) that denotes outcome #' in MCMCglmm model #' @param prob probability for highest posterior density interval, similar to a #' confidence interval. Default is 0.95 (if provided) #' @param title title of the plot. Overrides default title (if provided) #' @param subtitle subtitle of the plot. Overrides default subtitle (if provided) #' @param ncol number of columns in plot. Default is 2 (if provided) #' @param no.title boolean that denotes if title should be outputted in plot. Default #' is TRUE (if provided) #' @param fonts character text that denotes font for title, subtitle, category labels, #' x-axis plot labels (if provided) #' @param columnTextWidth numeric that denotes character width for label text before #' breaking to start new line. Default is 20 characters (if provided) #' @param break.label.summary boolean to indicate if new line should start in label #' before (n, event) summary. Default is FALSE #' @keywords plot #' @return ggplot object of caterpillar plot #' @importFrom plyr join_all rbind.fill #' @importFrom MCMCglmm posterior.mode #' @importFrom coda HPDinterval #' @importFrom purrr modify_if #' @importFrom dplyr select rename group_by arrange filter row_number n #' @importFrom stringr str_wrap #' @import ggplot2 #' @import lifecycle #' @export #' @examples #' data("ae"); #' #' ae$G3Plus <- 0; #' ae$G3Plus[ae$AE_SEV_GD %in% c("3", "4", "5")] <- 1; #' ae$Drug_1_Attribution <- 0; #' ae$Drug_1_Attribution[ae$CTC_AE_ATTR_SCALE %in% c("Definite", "Probable", "Possible")] <- 1; #' ae$Drug_2_Attribution <- 0; #' ae$Drug_2_Attribution[ae$CTC_AE_ATTR_SCALE_1 %in% c("Definite", "Probable", "Possible")] <- 1; #' #' prior2RE <- list(R = list(V = diag(1), fix = 1), #' G=list(G1=list(V=1, nu=0.02), G2=list(V=1, nu=0.02))); #' #' model1 <- MCMCglmm::MCMCglmm(G3Plus ~ Drug_1_Attribution + Drug_2_Attribution, #' random=~Subject + ae_category, family="categorical", data=ae, saveX=TRUE, #' verbose=FALSE, burnin=2000, nitt=10000, thin=10, pr=TRUE, prior=prior2RE); #' #' p <- caterpillar_plot(subjID = "Subject", #' mcmcglmm_object = model1, #' prob = 0.99, #' orig_dataset = ae, #' binaryOutcomeVar = "G3Plus") #' #' p <- caterpillar_plot(subjID = "ae_category", #' mcmcglmm_object = model1, #' prob = 0.95, #' orig_dataset = ae, #' remove.text.subjID = FALSE, #' ncol = 4, #' binaryOutcomeVar = "G3Plus", #' subtitle = "System organ class (n, event)", #' title = "Odds Ratio for G3+ Severity with 95% Highest Posterior Density Interval", #' fonts = c("Arial", "Arial", "Arial", "Arial"), #' break.label.summary = TRUE) caterpillar_plot <- function(subjID, remove.text.subjID=FALSE, mcmcglmm_object,orig_dataset, binaryOutcomeVar, prob=NULL,title=NULL,no.title=FALSE, subtitle=NULL,ncol=NULL, fonts=NULL,columnTextWidth=NULL, break.label.summary=FALSE ){ font.title <- "Bahnschrift"; font.subtitle <- "Gungsuh"; font.labels <- "Berlin Sans FB"; font.axis <- "Gadugi"; if (!is.null(fonts)) { tryCatch({ font.title <- na.fail(fonts[1]); }, error=function(e){}) tryCatch({ font.subtitle <- na.fail(fonts[2]); }, error=function(e){}) tryCatch({ font.labels <- na.fail(fonts[3]); }, error=function(e){}) tryCatch({ font.axis <- na.fail(fonts[4]); }, error=function(e){}) } if (is.null(ncol)) { ncol <- 2; } if (is.null(columnTextWidth)) { columnTextWidth <- 20; } if (is.null(title) && no.title == FALSE) { tryCatch({ title <- paste("Odds Ratio with ", round(prob*100, 2), "% Highest Posterior Density Interval", sep=""); }, error=function(e){}) } if (no.title == TRUE) { title <- NULL; } if (is.null(subtitle)) { tryCatch({ subtitle <- paste(subjID, " (n, events)", sep=""); }, error=function(e){}) } intSubjs <- mcmcglmm_object$Sol[, which(grepl(paste(subjID, '.*?', sep=""), colnames(mcmcglmm_object$Sol)))]; ranefSubjs <- cbind(est = MCMCglmm::posterior.mode(intSubjs), CI = coda::HPDinterval(intSubjs, prob=prob)); rownames(ranefSubjs) <- sub(paste(subjID, ".", sep=""), '', rownames(t(intSubjs))); ranefSubjs <- as.data.frame(ranefSubjs); ranefSubjs$est <- exp(ranefSubjs$est); ranefSubjs$lower <- exp(ranefSubjs$lower); ranefSubjs$upper <- exp(ranefSubjs$upper); if (remove.text.subjID == TRUE) { rownames(ranefSubjs) <- gsub("[^0-9]", "", rownames(ranefSubjs)); #This removes non-numerical text from cow identifier, may not want to do if there are duplicate numbers when removing text; } ranefSubjs$ID <- rownames(ranefSubjs); ranefSubjs$term <- reorder(factor(rownames(ranefSubjs)), ranefSubjs$est); instSubj <- orig_dataset |> dplyr::rename(ID = subjID) |> dplyr::select(ID) |> dplyr::group_by(ID, .drop = FALSE) |> dplyr::count(name="instances", .drop=FALSE) hp_instSubj <- orig_dataset |> dplyr::rename(ID = subjID) |> dplyr::select("ID", binaryOutcomeVar) |> dplyr::group_by(ID, .drop = FALSE) |> dplyr::filter(get(binaryOutcomeVar) == 1) |> dplyr::count(get(binaryOutcomeVar), name="hp_instances") |> dplyr::select(-"get(binaryOutcomeVar)") ranefSubjs <- plyr::join_all(list(ranefSubjs, instSubj[, c("ID", "instances")], hp_instSubj[, c("ID", "hp_instances")]), by=c("ID"), type='left', match = "first"); ranefSubjs$instances[which(is.na(ranefSubjs$instances))] <- 0; ranefSubjs$hp_instances[which(is.na(ranefSubjs$hp_instances))] <- 0; if (break.label.summary == TRUE) { ranefSubjs$term <- stringr::str_wrap(ranefSubjs$term, width = columnTextWidth); ranefSubjs$term <- paste(ranefSubjs$term, "\n(", ranefSubjs$instances, ", ", ranefSubjs$hp_instances, ")", sep=""); } else { ranefSubjs$term <- paste(ranefSubjs$term, " (", ranefSubjs$instances, ", ", ranefSubjs$hp_instances, ")", sep=""); ranefSubjs$term <- stringr::str_wrap(ranefSubjs$term, width = columnTextWidth); } ranefSubjs$term <- reorder(factor(ranefSubjs$term), ranefSubjs$est); num_groups <- ncol; ranefSubjs <- ranefSubjs |> dplyr::arrange(est) |> dplyr::group_by(facet=(row_number()-1) %/% (n()/num_groups)+1) ranefSubjs$facet <- as.factor(ranefSubjs$facet); ranefSubjs$facet <- ordered(ranefSubjs$facet, levels = c(as.character(rev(seq(1:ncol))))); ranefSubjs$significance <- "normal"; ranefSubjs$est <- as.numeric(format(round(ranefSubjs$est, 2), nsmall=2)); ranefSubjs$lower <- as.numeric(format(round(ranefSubjs$lower, 2), nsmall=2)); ranefSubjs$upper <- as.numeric(format(round(ranefSubjs$upper, 2), nsmall=2)); ranefSubjs$significance[(ranefSubjs$lower >= 1.00 & ranefSubjs$upper >= 1.00) | (ranefSubjs$lower <= 1.00 & ranefSubjs$upper <= 1.00)] <- "different"; options(warn=-1); #suppress warning messages; pPlot <- ggplot(ranefSubjs,aes(term,est)) + geom_hline(yintercept=1, color="grey") + theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.border = element_blank(), panel.background = element_blank()) + geom_pointrange(aes(ymin=lower, ymax=upper, color=significance)) + guides(color=FALSE) + scale_color_manual(values=c("normal"="darkgrey", "different"="black")) + facet_wrap(~facet, dir="v", scales="free", ncol=ncol) + scale_y_continuous(limits = ~ c(0, ceiling(max(.x))), breaks = ~ c(0, floor(max(.x))), expand = c(0.12, 0.05)) + coord_flip() + theme(plot.title=element_text(family=font.title, size=14, hjust=0.5), plot.subtitle=element_text(family=font.subtitle, size=12), axis.text.y=element_text(family=font.labels, size=8), axis.text.x=element_text(family=font.axis), axis.title.x=element_blank(), axis.title.y=element_blank()) + theme(plot.title.position = "plot", plot.subtitle = element_text(hjust = 0.5), strip.background = element_blank(), strip.text.x = element_blank()) + theme(strip.text = element_blank(), panel.margin.y = unit(-0, "lines"), axis.ticks.y=element_blank()) + labs(title=paste(title), subtitle=paste(subtitle), caption=""); return(pPlot) }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/caterpillar_plot.R
covsum <- utils::getFromNamespace("covsum", "reportRmd") csep <- utils::getFromNamespace("csep", "reportRmd") formatp <- utils::getFromNamespace("formatp", "reportRmd") lpvalue <- utils::getFromNamespace("lpvalue", "reportRmd") niceNum <- utils::getFromNamespace("niceNum", "reportRmd") nicename <- utils::getFromNamespace("nicename", "reportRmd") replaceLbl <- utils::getFromNamespace("replaceLbl", "reportRmd") #' Nested version of reportRmd covsum() #' #' @param data dataframe containing data #' @param covs character vector with the names of columns to include in table #' @param maincov covariate to stratify table by #' @param id covariates to nest summary by #' @param digits number of digits for summarizing mean data, does not affect #' p-values #' @param numobs named list overriding the number of people you expect to have #' the covariate #' @param markup boolean indicating if you want latex markup #' @param sanitize boolean indicating if you want to sanitize all strings to not #' break LaTeX #' @param nicenames boolean indicating if you want to replace . and _ in strings #' with a space #' @param IQR boolean indicating if you want to display the inter quantile range #' (Q1,Q3) as opposed to (min,max) in the summary for continuous variables #' @param all.stats boolean indicating if all summary statistics (Q1,Q3 + #' min,max on a separate line) should be displayed. Overrides IQR. #' @param pvalue boolean indicating if you want p-values included in the table #' @param effSize boolean indicating if you want effect sizes included in the #' table. Can only be obtained if pvalue is also requested. #' @param show.tests boolean indicating if the type of statistical used should #' be shown in a column beside the p-values. Ignored if pvalue=FALSE. #' @param nCores if > 1, specifies number of cores to use for parallel processing #' for calculating the nested p-value (default: 1). #' @param nested.test specifies test used for calculating nested p-value from #' afex::mixed function. Either \emph{parametric bootstrap} method #' or \emph{likelihood ratio test} method (default: "LRT"). Parametric bootstrap #' takes longer. #' @param nsim specifies number of simulations to use for calculating nested p-value #' with \emph{parametric bootstrap} method used for nested.test (default: 1000). #' @param dropLevels logical, indicating if empty factor levels be dropped from #' the output, default is TRUE. #' @param excludeLevels a named list of covariate levels to exclude from #' statistical tests in the form list(varname =c('level1','level2')). These #' levels will be excluded from association tests, but not the table. This can #' be useful for levels where there is a logical skip (i.e. not missing, but not #' presented). Ignored if pvalue=FALSE. #' @param full boolean indicating if you want the full sample included in the #' table, ignored if maincov is NULL #' @param digits.cat number of digits for the proportions when summarizing #' categorical data (default: 0) #' @param testcont test of choice for continuous variables,one of #' \emph{rank-sum} (default) or \emph{ANOVA} #' @param testcat test of choice for categorical variables,one of #' \emph{Chi-squared} (default) or \emph{Fisher} #' @param include_missing Option to include NA values of maincov. NAs will not #' be included in statistical tests #' @param percentage choice of how percentages are presented, one of #' \emph{column} (default) or \emph{row} #' @importFrom stats lm sd anova as.formula binomial median na.fail #' @importFrom rstatix cramer_v eta_squared #' @importFrom dplyr select reframe summarise group_by filter across row_number n #' @importFrom purrr modify_if #' @importFrom rlang syms #' @importFrom modeest mlv #' @importFrom utils getFromNamespace #' @importFrom parallel detectCores makeCluster clusterExport parLapply #' @importFrom afex mixed #' @seealso \code{\link{fisher.test}},\code{\link{chisq.test}}, #' \code{\link{wilcox.test}},\code{\link{kruskal.test}}, #' \code{\link{anova}} and \code{\link{mixed}} covsum_nested <- function (data, covs, maincov = NULL, id = NULL, digits = 1, numobs = NULL, markup = TRUE, sanitize = TRUE, nicenames = TRUE, IQR = FALSE, all.stats = FALSE, pvalue = TRUE, effSize = TRUE, show.tests = TRUE, nCores = NULL, nested.test = NULL, nsim = NULL, excludeLevels = NULL, dropLevels = TRUE, full = TRUE, digits.cat = 0, testcont = c("rank-sum test", "ANOVA"), testcat = c("Chi-squared", "Fisher"), include_missing = FALSE, percentage = c("column", "row")) { #-#-#-#-#-#-#-#-#-#-#-#-# if (missing(id)) stop("id is a required argument. If id is not required, please use reportRmd::rm_covsum instead.\n") warning("Use this function at your own risk. Please check output.\nOrder of nested ids matter. For example, in c('id1','id2') id1 should be nested within id2, etc.\n") nested.pvalue=FALSE if (pvalue) { nested.pvalue=TRUE nc <- parallel::detectCores() # number of cores if (is.numeric(nCores) && nCores <= nc) { nc <- nCores } else if (is.numeric(nCores) && nCores > nc) { warning(paste("Number of core(s) requested exceeds that of system.\nUsing ", nc, " cores for parallel processing.\n", sep="")) } else { nc <- 1 } } options(dplyr.summarise.inform = FALSE) is.date <- function(x) inherits(x, 'Date') covsIdData1 <- function(covs = covs, id = id, data = data, excludeLevels = excludeLevels){ id <- c(id, NULL) tto <- data |> purrr::modify_if(is.character, as.factor) |> dplyr::select(!!!(rlang::syms(covs)), !!!(rlang::syms(id))) |> dplyr::group_by(!!!(rlang::syms(id))) |> dplyr::reframe(dplyr::across(where(is.numeric), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.date), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.factor), ~ modeest::mlv(.x, method = mfv))) |> dplyr::group_by(!!!(rlang::syms(id)), .drop=FALSE) |> #dplyr::filter(dplyr::row_number() == ceiling(n()/2)) dplyr::filter(dplyr::row_number() == 1) tto <- as.data.frame(tto) tto } covsIdData2 <- function(covs = covs, id = id, data = data, excludeLevels = excludeLevels){ tto <- data |> purrr::modify_if(is.character, as.factor) |> dplyr::select(!!!(rlang::syms(covs)), !!!(rlang::syms(id))) |> dplyr::group_by(!!!(rlang::syms(id))) |> dplyr::reframe(dplyr::across(where(is.numeric), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.date), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.factor), ~ modeest::mlv(.x, method = mfv))) |> dplyr::group_by(!!!(rlang::syms(id)), .drop=FALSE) |> #dplyr::filter(dplyr::row_number() == ceiling(n()/2)) dplyr::filter(dplyr::row_number() == 1) tto <- as.data.frame(tto) tto } maincovCovsIdData1 <- function(maincov = maincov, covs = covs, id = id, data = data, excludeLevels = excludeLevels){ id <- c(id, maincov) tto <- data |> purrr::modify_if(is.character, as.factor) |> dplyr::select(!!!(rlang::syms(maincov)), !!!(rlang::syms(covs)), !!!(rlang::syms(id))) |> dplyr::group_by(!!!(rlang::syms(id))) |> dplyr::reframe(dplyr::across(where(is.numeric), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.date), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.factor), ~ modeest::mlv(.x, method = mfv))) |> dplyr::group_by(!!!(rlang::syms(id)), .drop=FALSE) |> #dplyr::filter(dplyr::row_number() == ceiling(n()/2)) dplyr::filter(dplyr::row_number() == 1) tto <- as.data.frame(tto) tto } maincovCovsIdData2 <- function(maincov = maincov, covs = covs, id = id, data = data, excludeLevels = excludeLevels){ tto <- data |> purrr::modify_if(is.character, as.factor) |> dplyr::select(!!!(rlang::syms(maincov)), !!!(rlang::syms(covs)), !!!(rlang::syms(id))) |> dplyr::group_by(!!!(rlang::syms(id))) |> dplyr::reframe(dplyr::across(where(is.numeric), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.date), ~ mean(.x, na.rm = TRUE)), dplyr::across(where(is.factor), ~ modeest::mlv(.x, method = mfv))) |> dplyr::group_by(!!!(rlang::syms(id)), .drop=FALSE) |> #dplyr::filter(dplyr::row_number() == ceiling(n()/2)) dplyr::filter(dplyr::row_number() == 1) tto <- as.data.frame(tto) tto } if (is.null(maincov) & !is.null(id)) { data1 <- covsIdData1(covs, id, data) data2 <- covsIdData2(covs, id, data) } else if (!is.null(maincov) & !is.null(id)) { data1 <- maincovCovsIdData1(maincov, covs, id, data) data2 <- maincovCovsIdData2(maincov, covs, id, data) dataWithoutMaincov1 <- covsIdData1(covs, id, data) dataWithoutMaincov2 <- covsIdData2(covs, id, data) } else { data <- data } #-#-#-#-#-#-#-#-#-#-#-#-# #obj1 <- reportRmd:::covsum(data = data1, covs = covs, maincov = maincov, dropLevels = FALSE, full = T) #obj2 <- reportRmd:::covsum(data = data2, covs = covs, maincov = NULL, dropLevels = FALSE, full = T) obj1 <- covsum(data = data1, covs = covs, dropLevels = FALSE, maincov = maincov, digits=digits, numobs=numobs, markup=markup, sanitize=sanitize, nicenames=nicenames, IQR=IQR, all.stats=all.stats, pvalue=pvalue, effSize=effSize, show.tests=show.tests, excludeLevels=excludeLevels, full=full, digits.cat=digits.cat, testcont=testcont, testcat=testcat, include_missing=include_missing, percentage=percentage) obj2 <- covsum(data = data2, covs = covs, maincov = NULL, dropLevels = FALSE, digits=digits, numobs=numobs, markup=markup, sanitize=sanitize, nicenames=nicenames, IQR=IQR, all.stats=all.stats, pvalue=pvalue, effSize=effSize, show.tests=show.tests, excludeLevels=excludeLevels, full=full, digits.cat=digits.cat, testcont=testcont, testcat=testcat, include_missing=include_missing, percentage=percentage) objComb <- cbind(obj2, obj1[,-1]); if (!is.null(maincov)) { if (length(unique(eval(parse(text=paste("data1$", maincov, sep=""))))) < 2) { objComb <- objComb[,-3]; full <- F; } if (full == T) { objComb <- objComb[,-3]; } if (length(unique(eval(parse(text=paste("data1$", maincov, sep=""))))) > 1 ) { colnames(objComb)[2] <- paste("Full Sample (", colnames(objComb)[2], ")", sep=""); } else { colnames(objComb)[2] <- paste(unique(eval(parse(text=paste("data1$", maincov, sep=""))))[1], " (", colnames(objComb)[2], ")", sep=""); } if (full == F && (length(unique(eval(parse(text=paste("data1$", maincov, sep=""))))) > 1)) { objComb <- objComb[,-2]; } } if (is.null(maincov)) { if (full == T) { objComb <- objComb[,-3]; colnames(objComb)[2] <- paste("Full Sample (", colnames(objComb)[2], ")", sep=""); } if (full == F) { objComb <- objComb[,-3]; colnames(objComb)[2] <- paste("Full Sample (", colnames(objComb)[2], ")", sep=""); } } #------------# LRT glmer nested p-values #------------#; ###https://search.r-project.org/CRAN/refmans/afex/html/mixed.html if (nested.pvalue == TRUE & !is.null(maincov) & !is.null(id)) { objComb$cov <- ""; objComb$cov[which(objComb[2] == "")] <- covs; objComb$'Nested p-value' <- ""; if (is.null(nsim)) { nsim <- 1000; } if (is.null(nested.test)) { nested.test <- "LRT"; } if (nested.test == "PB") { warning(paste("Unnested p-value and statistical test is incorrect for nested data, but is kept for comparison to nested p-value.\nNested p-value derived from anova(afex::mixed(maincov ~ cov + (1|id1:id2:...idn), family=binomial, data, method='PB')).\nProcessing will take LONGER for parametric bootstrapping.", sep="")) } if (nested.test == "LRT") { warning(paste("Unnested p-value and statistical test is incorrect for nested data, but is kept for comparison to nested p-value.\nNested p-value derived from anova(afex::mixed(maincov ~ cov + (1|id1:id2:...idn), family=binomial, data, method='LRT')).", sep="")) } suppressWarnings({ tryCatch({ cl <- parallel::makeCluster(nc, type="PSOCK") # make cluster parallel::clusterExport(cl, list("maincov", "cov", "objComb", "id", "data", "cl", "nsim"), envir=environment()) # send data and functions to cluster suppressWarnings({tryCatch({ if (nested.test == "PB") { if (length(unique(data[[maincov]])) == 2) { out_glmer <- parallel::parLapply(cl, objComb$cov[which(objComb$cov != "")], function(x) tryCatch({as.numeric(stats::anova(afex::mixed(stats::as.formula(paste(maincov, '~', x, '+(', 1, '|', paste(id, collapse=':'), ')', sep='')), family=binomial, data=data, expand_re=TRUE, cl=NULL, method="PB", args_test=list(nsim=nsim,cl=cl)))[4])}, error=function(e){NA})) } else { #modify different family here in future for categorical outcome with more than 2 levels, but is ANOVA Type III test; out_glmer <- parallel::parLapply(cl, objComb$cov[which(objComb$cov != "")], function(x) tryCatch({as.numeric(stats::anova(afex::mixed(stats::as.formula(paste(maincov, '~', x, '+(', 1, '|', paste(id, collapse=':'), ')', sep='')), family=binomial, data=data, expand_re=TRUE, cl=NULL, method="PB", args_test=list(nsim=nsim,cl=cl)))[4])}, error=function(e){NA})) } } if (nested.test == "LRT") { if (length(unique(data[[maincov]])) == 2) { out_glmer <- parallel::parLapply(cl, objComb$cov[which(objComb$cov != "")], function(x) tryCatch({as.numeric(stats::anova(afex::mixed(stats::as.formula(paste(maincov, '~', x, '+(', 1, '|', paste(id, collapse=':'), ')', sep='')), family=binomial, data=data, expand_re=TRUE, cl=NULL, method="LRT"))[4])}, error=function(e){NA})) } else { #modify different family here in future for categorical outcome with more than 2 levels, but is ANOVA Type III test; #out_glmer <- lapply(objComb$cov[which(objComb$cov != "")], function(x) try(as.numeric(stats::anova(afex::mixed(stats::as.formula(paste(maincov, '~', x, '+(', 1, '|', paste(id, collapse=':'), ')', sep='')), family=binomial, data=data, expand_re=TRUE, cl=NULL, method="LRT"))[4]), silent=TRUE)) out_glmer <- parallel::parLapply(cl, objComb$cov[which(objComb$cov != "")], function(x) tryCatch({as.numeric(stats::anova(afex::mixed(stats::as.formula(paste(maincov, '~', x, '+(', 1, '|', paste(id, collapse=':'), ')', sep='')), family=binomial, data=data, expand_re=TRUE, cl=NULL, method="LRT"))[4])}, error=function(e){NA})) } } }, error=function(e){})}) try(parallel::stopCluster(cl), silent=TRUE) }, error=function(e){}) suppressWarnings({ tryCatch({ try(parallel::stopCluster(cl), silent=TRUE) }, error=function(e){}) }) out_glmer <- as.numeric(unlist(out_glmer)); objComb$'Nested p-value'[which(objComb$cov != "")] <- unlist(out_glmer); objComb <- objComb[, which(names(objComb) != "cov")]; objComb$'Nested p-value'[which(objComb$'p-value' %in% c("", NA, "NaN"))] <- NA; }) objComb; } else { objComb; } } #' Outputs a nested version of reportRmd::rm_covsum() #' #' @param data dataframe containing data #' @param covs character vector with the names of columns to include in table #' @param maincov covariate to stratify table by #' @param id covariates to nest summary by #' @param caption character containing table caption (default is no caption) #' @param tableOnly Logical, if TRUE then a dataframe is returned, otherwise a #' formatted printed object is returned (default). #' @param covTitle character with the names of the covariate (predictor) column. #' The default is to leave this empty for output or, for table only output to #' use the column name 'Covariate'. #' @param digits number of digits for summarizing mean data #' @param digits.cat number of digits for the proportions when summarizing #' categorical data (default: 0) #' @param nicenames boolean indicating if you want to replace . and _ in strings #' with a space #' @param IQR boolean indicating if you want to display the inter quantile range #' (Q1,Q3) as opposed to (min,max) in the summary for continuous variables #' @param all.stats boolean indicating if all summary statistics (Q1,Q3 + #' min,max on a separate line) should be displayed. Overrides IQR. #' @param pvalue boolean indicating if you want p-values included in the table #' @param effSize boolean indicating if you want effect sizes included in the #' table. Can only be obtained if pvalue is also requested. #' @param p.adjust p-adjustments to be performed #' @param unformattedp boolean indicating if you would like the p-value to be #' returned unformatted (ie not rounded or prefixed with '<'). Best used with #' tableOnly = T and outTable function. #' @param show.tests boolean indicating if the type of statistical used should #' be shown in a column beside the p-values. Ignored if pvalue=FALSE. #' @param nCores if > 1, specifies number of cores to use for parallel processing #' for calculating the nested p-value (default: 1). #' @param nested.test specifies test used for calculating nested p-value from #' afex::mixed function. Either \emph{parametric bootstrap} method #' or \emph{likelihood ratio test} method (default: "LRT"). Parametric bootstrap #' takes longer. #' @param nsim specifies number of simulations to use for calculating nested p-value #' with \emph{parametric bootstrap} method used for nested.test (default: 1000). #' @param just.nested.pvalue boolean indicating if the just the nested p-value #' should be shown in a column, and not unnested p-value, unnested statistical #' tests and effect size. Overrides effSize and show.tests arguments. #' @param nCores number of cores to use for parallel processing if calculating #' the nested p-value (if provided). #' @param testcont test of choice for continuous variables,one of #' \emph{rank-sum} (default) or \emph{ANOVA} #' @param testcat test of choice for categorical variables,one of #' \emph{Chi-squared} (default) or \emph{Fisher} #' @param full boolean indicating if you want the full sample included in the #' table, ignored if maincov is NULL #' @param include_missing Option to include NA values of maincov. NAs will not #' be included in statistical tests #' @param percentage choice of how percentages are presented, one of #' \emph{column} (default) or \emph{row} #' @param dropLevels logical, indicating if empty factor levels be dropped from #' the output, default is TRUE. #' @param excludeLevels a named list of covariate levels to exclude from #' statistical tests in the form list(varname =c('level1','level2')). These #' levels will be excluded from association tests, but not the table. This can #' be useful for levels where there is a logical skip (ie not missing, but not #' presented). Ignored if pvalue=FALSE. #' @param numobs named list overriding the number of people you expect to have #' the covariate #' @param markup boolean indicating if you want latex markup #' @param sanitize boolean indicating if you want to sanitize all strings to not #' break LaTeX #' @param chunk_label only used if output is to Word to allow cross-referencing #' @keywords dataframe #' @return A character vector of the table source code, unless tableOnly=TRUE in #' which case a data frame is returned #' @importFrom stats lm sd anova as.formula binomial median na.fail #' @importFrom rstatix cramer_v eta_squared #' @importFrom dplyr select reframe summarise group_by filter across row_number n #' @importFrom purrr modify_if #' @importFrom rlang syms #' @importFrom modeest mlv #' @importFrom afex mixed #' @export #' @seealso \code{\link{covsum}},\code{\link{fisher.test}}, #' \code{\link{chisq.test}}, \code{\link{wilcox.test}}, #' \code{\link{kruskal.test}}, \code{\link{anova}}, #' \code{\link{mixed}} and \code{\link{outTable}} #' @examples #' \dontrun{ #' # Example 1 #' data(ae) #' rm_covsum_nested(data = ae, id = c("ae_detail", "Subject"), covs = c("AE_SEV_GD", #' "AE_ONSET_DT_INT"), maincov = "CTC_AE_ATTR_SCALE") #' #' # Example 2: set variable labels and other options, save output with markup #' data("ae") #' lbls <- data.frame(c1=c('AE_SEV_GD','AE_ONSET_DT_INT'), #' c2=c('Adverse event severity grade','Adverse event onset date')) #' ae$AE_SEV_GD <- as.numeric(ae$AE_SEV_GD) #' ae <- reportRmd::set_labels(ae, lbls) #' output_tab <- rm_covsum_nested(data = ae, id = c("ae_detail", "Subject"), #' covs = c("AE_SEV_GD", "AE_ONSET_DT_INT"), maincov = "CTC_AE_ATTR_SCALE", #' testcat = "Fisher", percentage = c("col"), show.tests = FALSE, pvalue = TRUE, #' effSize = FALSE, full = TRUE, IQR = FALSE, nicenames = TRUE, sanitize = TRUE, #' markup = TRUE, include_missing = TRUE, just.nested.pvalue = TRUE, #' tableOnly = TRUE) #' cat(reportRmd::outTable(tab=output_tab)) #' cat(reportRmd::outTable(output_tab, format="html"), file = paste("./man/tables/", #' "output_tab.html", sep="")) #' cat(reportRmd::outTable(output_tab, format="latex"), file = paste("./man/tables/", #' "output_tab.tex", sep="")) #' } rm_covsum_nested <- function(data,covs,maincov=NULL,id=NULL,caption=NULL,tableOnly=FALSE,covTitle='', digits=1,digits.cat=0,nicenames=TRUE,IQR = FALSE,all.stats=FALSE, pvalue=TRUE,effSize=TRUE,p.adjust='none',unformattedp=FALSE,show.tests=TRUE, just.nested.pvalue=FALSE,nCores=NULL,nested.test=NULL,nsim=NULL, testcont=c('rank-sum test','ANOVA'), testcat=c('Chi-squared','Fisher'),full=TRUE,include_missing=FALSE, percentage=c('column','row'),dropLevels=TRUE,excludeLevels=NULL,numobs=NULL,markup=TRUE, sanitize= TRUE,chunk_label){ if (unformattedp |p.adjust !='none') formatp <- function(x) { as.numeric(x) } argList <- as.list(match.call(expand.dots = TRUE)[-1]) argsToPass <- intersect(names(formals(covsum_nested)), names(argList)) covsumArgs <- argList[names(argList) %in% argsToPass] covsumArgs[["markup"]] <- FALSE covsumArgs[["sanitize"]] <- FALSE covsumArgs[["nicenames"]] <- FALSE tab <- do.call(covsum_nested, covsumArgs) #tab <- objComb; colnames(tab)[1] <- "Covariate" output_var_names <- covs Sys.sleep(1) to_indent <- which(!tab$Covariate %in% output_var_names) to_bold_name <- which(tab$Covariate %in% output_var_names) bold_cells <- arrayInd(to_bold_name, dim(tab)) if (just.nested.pvalue == T) { if ('Nested p-value' %in% names(tab)) { tab <- tab[,-which(names(tab) %in% c('p-value', 'StatTest', 'Effect Size'))] } } if (nicenames) tab$Covariate <- replaceLbl(argList$data, tab$Covariate) names(tab)[1] <- covTitle if ("p-value" %in% names(tab)) { if (p.adjust!='none'){ tab[["p (unadjusted)"]] <- tab[["p-value"]] tab[["p-value"]] <- sapply(tab[["p-value"]],function(x) p.adjust(x,method=p.adjust)) } to_bold_p <- which(as.numeric(tab[["p-value"]]) < 0.05) p_vals <- tab[["p-value"]] new_p <- sapply(p_vals, formatp) tab[["p-value"]] <- new_p if (length(to_bold_p) > 0) bold_cells <- rbind(bold_cells, matrix(cbind(to_bold_p, which(names(tab) == "p-value")), ncol = 2)) } if ("Effect Size" %in% names(tab)) { e_vals <- tab[["Effect Size"]] new_e <- sapply(e_vals,formatp) tab[["Effect Size"]] <- new_e } if ('Nested p-value' %in% names(tab)) { # format p-values nicely to_bold_p <- which(!tab[["Nested p-value"]]=="" & as.numeric(tab[["Nested p-value"]])< 0.05) p_vals <- tab[['Nested p-value']] new_p <- sapply(p_vals,formatp) tab[['Nested p-value']] <- new_p if (length(to_bold_p)>0) bold_cells <- rbind(bold_cells, matrix(cbind(to_bold_p, which(names(tab)=='Nested p-value')),ncol=2)) } tryCatch({ if (length(which(tab$'p-value' != '' & is.na(tab$'Nested p-value'))) > 0) { tab$'Nested p-value'[tab$'p-value' != '' & is.na(tab$'Nested p-value')] <- 'Did not converge;<br>quasi or complete<br>category separation'; } }, error=function(e){}) if ('p-value' %in% names(tab)) names(tab)[names(tab) == 'p-value'] <- 'Unnested p-value' if ('StatTest' %in% names(tab)) names(tab)[names(tab) == 'StatTest'] <- 'Unnested StatTest' if ('Effect Size' %in% names(tab)) names(tab)[names(tab) == 'Effect Size'] <- 'Unnested Effect Size' suppressWarnings({ tryCatch({ try(stopCluster(cl), silent=TRUE) }, error=function(e){}) }) if (tableOnly){ if (names(tab)[1]=='') names(tab)[1]<- 'Covariate' attr(tab, "to_indent") <- to_indent attr(tab, "bold_cells") <- bold_cells attr(tab, "dimchk") <- dim(tab) return(tab) } reportRmd::outTable(tab=tab,to_indent=to_indent,bold_cells = bold_cells, caption=caption, chunk_label=ifelse(missing(chunk_label),'NOLABELTOADD',chunk_label)) }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/covsum_nested.R
#' Simulated adverse events for patients receiving two study agents. #' #' @format A data frame with 394 rows and 9 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{ae_detail}{Adverse event detail, also known as lowest level term} #' \item{ae_category}{Adverse event category, also known as system organ class} #' \item{CTCAE5_LLT_NM}{Common Terminology Criteria for Adverse Events (CTCAE) version 5} #' \item{AE_VERBATIM_TRM_TXT}{Adverse event verbatim text entered by clinical registered nurse, for "Other, specify"} #' \item{AE_SEV_GD}{Adverse event severity grade, scale from 1 to 5} #' \item{AE_ONSET_DT_INT}{Adverse event onset date} #' \item{CTC_AE_ATTR_SCALE}{Attribution scale of adverse event to first study agent} #' \item{CTC_AE_ATTR_SCALE_1}{Attribution scale of adverse event to second study agent} #' } "ae" #' Simulated demography for patients. #' #' @format A data frame with 12 rows and 2 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{GENDER_CODE}{Patient gender} #' } "demography" #' Enrollment data #' #' Simulated enrollment for patients. #' #' @format A data frame with 12 rows and 3 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{COHORT}{Study cohort for patient} #' \item{ENROL_DATE_INT}{Enrollment date of patient to study} #' } "enrollment" #' Simulated ineligibility for patients. #' #' @format A data frame with 11 rows and 2 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{INELIGIBILITY_STATUS}{Recorded ineligibility status of patient to study} #' } "ineligibility" #' Simulated study agent 1 for patients. #' #' @format A data frame with 12 rows and 2 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{TX1_DATE_INT}{Study agent 1 start date of patient on study} #' } "drug1_admin" #' Simulated study agent 2 for patients. #' #' @format A data frame with 12 rows and 2 variables: #' \describe{ #' \item{Subject}{Patient ID} #' \item{TX2_DATE_INT}{Study agent 2 start date of patient on study} #' } "drug2_admin"
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/data.R
#' Outputs the three DSMB-CCRU AE summary tables in Excel format per UHN template #' #' @param protocol study protocol name (uppercase, no spaces permitted) #' @param setwd directory to write Excel summary files to #' @param title full character vector with name of study #' @param comp baseline comparison group, for example, cohort (if provided) #' @param pi character vector name of study principal investigator #' @param presDate presentation date (i.e. 17NOV2023) for DSMB #' @param cutDate recent cutoff date for AEs (i.e. 31AUG2023) #' @param boundDate lower bound cutoff date for AEs (if provided) #' @param subjID key identifier field for participant ID in data sets #' @param subjID_ineligText character text that denotes participant IDs to exclude, #' for example, c("New Subject") (if provided) #' @param baseline_datasets list of data frames that contain baseline participant characteristics, #' for example, list(enrollment_DF,demography_DF,ineligibility_DF) #' @param ae_dataset data frame that contains subject AEs #' @param ineligVar field that denotes participant ineligibility #' @param ineligVarText character text that denotes participant ineligibility, #' for example, c("Yes", "Y") (if provided) #' @param genderVar field that denotes participant gender #' @param enrolDtVar field that denotes participant enrollment date (i.e. 10MAY2021) #' @param ae_detailVar field that denotes participant AE detail (lowest level term) #' @param ae_categoryVar field that denotes participant AE category (system organ class) #' @param ae_severityVar field that denotes participant AE severity grade (numeric) #' @param ae_onsetDtVar field that denotes participant AE onset date #' @param ae_detailOtherText character text that denotes referencing verbatim AE field, #' for example, c("Other, specify", "OTHER") (if provided) #' @param ae_detailOtherVar field that denotes participant AE detail other (if provided) #' @param ae_verbatimVar field that denotes participant AE detail verbatim (if provided) #' @param numSubj vector to override value for number of participants in summary (if provided) #' @param fileNameUnderscore boolean that denotes if spaces should be underscore in filename #' @keywords dataframe #' @return three Excel files containing DSMB-CCRU AE summary tables #' @importFrom openxlsx createStyle createWorkbook addWorksheet writeData mergeCells addStyle setRowHeights setColWidths saveWorkbook #' @importFrom plyr join_all #' @importFrom dplyr select distinct mutate arrange summarise group_by filter across row_number n_distinct #' @importFrom stringr str_detect #' @export #' @examples #' data("enrollment", "demography", "ineligibility", "ae"); #' dsmb_ccru(protocol="EXAMPLE_STUDY",setwd="./man/tables/", #' title="Phase X Study to Evaluate Treatments A-D", #' comp="COHORT",pi="Dr. Principal Investigator", #' presDate="30OCT2020",cutDate="31AUG2020", #' boundDate=NULL,subjID="Subject",subjID_ineligText=c("New Subject","Test"), #' baseline_datasets=list(enrollment,demography,ineligibility), #' ae_dataset=ae,ineligVar="INELIGIBILITY_STATUS",ineligVarText=c("Yes","Y"), #' genderVar="GENDER_CODE",enrolDtVar="ENROL_DATE_INT",ae_detailVar="ae_detail", #' ae_categoryVar="ae_category",ae_severityVar="AE_SEV_GD", #' ae_onsetDtVar="AE_ONSET_DT_INT",ae_detailOtherText="Other, specify", #' ae_detailOtherVar="CTCAE5_LLT_NM",ae_verbatimVar="AE_VERBATIM_TRM_TXT", #' numSubj=c(2,4,5,6)) dsmb_ccru <- function(protocol,setwd,title,comp=NULL,pi,presDate,cutDate,boundDate=NULL, subjID,subjID_ineligText=NULL,baseline_datasets,ae_dataset, ineligVar,ineligVarText=NULL, genderVar,enrolDtVar,ae_detailVar,ae_categoryVar, ae_severityVar,ae_onsetDtVar,ae_detailOtherText=NULL,ae_detailOtherVar=NULL, ae_verbatimVar=NULL,numSubj=NULL,fileNameUnderscore=TRUE){ #### Template style for tables; ##https://stackoverflow.com/questions/54322814/how-to-apply-thick-border-around-a-cell-range-using-the-openxlsx-package-in-r ; OutsideBorders <- function(wb_, sheet_, rows_, cols_, border_col = "#AAC1D9", border_thickness = "thick") { left_col = min(cols_) right_col = max(cols_) top_row = min(rows_) bottom_row = max(rows_) sub_rows <- list(c(bottom_row:top_row), c(bottom_row:top_row), top_row, bottom_row) sub_cols <- list(left_col, right_col, c(left_col:right_col), c(left_col:right_col)) directions <- list("Left", "Right", "Top", "Bottom") mapply(function(r_, c_, d) { temp_style <- createStyle(border = d, borderColour = border_col, borderStyle = border_thickness) addStyle( wb_, sheet_, style = temp_style, rows = r_, cols = c_, gridExpand = TRUE, stack = TRUE ) }, sub_rows, sub_cols, directions) } backgroundStyle <- createStyle(fontName = "Arial", fontSize = 9, fontColour = "black", halign = "center", valign = "center", fgFill = "white", border = NULL, borderColour = "#AAC1D9", textDecoration = "bold", wrapText = TRUE, borderStyle = NULL) headerStyle1 <- createStyle(fontName = "Arial", fontSize = 12, fontColour = "black", halign = "center", valign = "center", fgFill = "white", border = NULL, borderColour = "#AAC1D9", textDecoration = "bold", wrapText = TRUE, borderStyle = NULL) headerStyle2 <- createStyle(fontName = "Arial", fontSize = 9, fontColour = "black", halign = "center", valign = "center", fgFill = "#FAF3D4", border = "TopBottomLeftRight", borderColour = "#AAC1D9", textDecoration = "bold", wrapText = TRUE) contentStyleR <- createStyle(fontName = "Arial", fontSize = 8, fontColour = "black", halign = "right", valign = "center", fgFill = "white", border = "TopBottomLeftRight", borderColour = "#AAC1D9", wrapText = TRUE) contentStyleL <- createStyle(fontName = "Arial", fontSize = 8, fontColour = "black", halign = "left", valign = "center", fgFill = "white", border = "TopBottomLeftRight", borderColour = "#AAC1D9", wrapText = TRUE) #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#; #setwd(setwd); if (is.null(boundDate)) { boundDate <- "01JAN1990"; } if (is.null(ae_detailOtherVar)) { ae_detailOtherVar <- ae_detailVar; } if (is.null(ae_verbatimVar)) { ae_verbatimVar <- ae_detailVar; } if (is.null(ae_detailOtherText)) { ae_detailOtherText <- "Other, specify"; } if (is.null(comp)) { subjectsKeep_DF <- plyr::join_all(baseline_datasets, by = subjID, type = "full") |> #### --------------------------------------------- #### #### Just modify the below line for variable names #### dplyr::mutate(Subject = eval(parse(text=subjID)), comp = "", gender_code = eval(parse(text=genderVar)), PT_ELIG_IND_3 = eval(parse(text=ineligVar)), PARTIC_ENROL_DT_INT = eval(parse(text=enrolDtVar))) |> dplyr::select(Subject, comp, gender_code, PT_ELIG_IND_3, PARTIC_ENROL_DT_INT) |> dplyr::group_by(Subject) |> dplyr::summarise(comp = comp[which(!is.na(comp))[1]], gender_code = gender_code[which(!is.na(gender_code))[1]], PT_ELIG_IND_3 = PT_ELIG_IND_3[which(!is.na(PT_ELIG_IND_3))[1]], PARTIC_ENROL_DT_INT = PARTIC_ENROL_DT_INT[which(!is.na(PARTIC_ENROL_DT_INT))[1]]) |> #### --------------------------------------------- #### dplyr::mutate(PARTIC_ENROL_DT_INT = toupper(format(as.Date(PARTIC_ENROL_DT_INT, tz = "UTC"), "%d%b%Y"))) |> dplyr::filter(!PT_ELIG_IND_3 %in% ineligVarText, !Subject %in% subjID_ineligText) |> dplyr::arrange(Subject) } if (!is.null(comp)) { subjectsKeep_DF <- plyr::join_all(baseline_datasets, by = subjID, type = "full") |> #### --------------------------------------------- #### #### Just modify the below line for variable names #### dplyr::mutate(Subject = eval(parse(text=subjID)), comp = eval(parse(text=comp)), gender_code = eval(parse(text=genderVar)), PT_ELIG_IND_3 = eval(parse(text=ineligVar)), PARTIC_ENROL_DT_INT = eval(parse(text=enrolDtVar))) |> dplyr::select(Subject, comp, gender_code, PT_ELIG_IND_3, PARTIC_ENROL_DT_INT) |> dplyr::group_by(Subject) |> dplyr::summarise(comp = comp[which(!is.na(comp))[1]], gender_code = gender_code[which(!is.na(gender_code))[1]], PT_ELIG_IND_3 = PT_ELIG_IND_3[which(!is.na(PT_ELIG_IND_3))[1]], PARTIC_ENROL_DT_INT = PARTIC_ENROL_DT_INT[which(!is.na(PARTIC_ENROL_DT_INT))[1]]) |> #### --------------------------------------------- #### dplyr::mutate(PARTIC_ENROL_DT_INT = toupper(format(as.Date(PARTIC_ENROL_DT_INT, tz = "UTC"), "%d%b%Y"))) |> dplyr::filter(!PT_ELIG_IND_3 %in% ineligVarText, !Subject %in% subjID_ineligText) |> dplyr::arrange(Subject) } aeKeep_DF <- ae_dataset |> #### --------------------------------------------- #### #### Just modify the below line for variable names #### dplyr::mutate(Subject = eval(parse(text=subjID)), ae_grade_code_dyn_std = eval(parse(text=ae_severityVar)), CTCAE5_LLT_NM = eval(parse(text=ae_detailOtherVar)), AE_VERBATIM_TRM_TXT = eval(parse(text=ae_verbatimVar)), AE_ONSET_DT_INT = eval(parse(text=ae_onsetDtVar)), ae_detail = eval(parse(text=ae_detailVar)), ae_category = eval(parse(text=ae_categoryVar))) |> dplyr::select(Subject, ae_grade_code_dyn_std, CTCAE5_LLT_NM, AE_VERBATIM_TRM_TXT, AE_ONSET_DT_INT, ae_detail, ae_category) |> #### --------------------------------------------- #### dplyr::mutate(ae_detail = toupper(ifelse(stringr::str_detect(ae_detail, ae_detailOtherText), trimws(AE_VERBATIM_TRM_TXT), ae_detail)), AE_ONSET_DT_INT = toupper(format(as.Date(AE_ONSET_DT_INT, tz = "UTC"), "%d%b%Y")), ae_category = toupper(ae_category)) |> dplyr::mutate(ae_detail = toupper(ifelse(is.na(ae_detail), CTCAE5_LLT_NM, ae_detail))) #i <- 1; for (i in 1:length(unique(subjectsKeep_DF[["comp"]]))) { comp <- unique(subjectsKeep_DF[["comp"]])[i] tryCatch({ subjects_DF <- subjectsKeep_DF[which(subjectsKeep_DF$comp %in% c(comp)), ] }, error=function(e){}) #### Do not need to modify below here; aes1_DF <- subjects_DF |> dplyr::left_join(aeKeep_DF, by = "Subject") |> dplyr::select(Subject, AE_ONSET_DT_INT, ae_detail, ae_category, ae_grade_code_dyn_std, PARTIC_ENROL_DT_INT, CTCAE5_LLT_NM, AE_VERBATIM_TRM_TXT) |> dplyr::arrange(Subject) |> dplyr::filter(as.Date(PARTIC_ENROL_DT_INT, "%d%b%Y") <= as.Date(cutDate, "%d%b%Y"), as.Date(AE_ONSET_DT_INT, "%d%b%Y") <= as.Date(cutDate, "%d%b%Y"), !is.na(ae_detail)) |> dplyr::distinct(Subject, AE_ONSET_DT_INT, ae_detail, ae_category, ae_grade_code_dyn_std, PARTIC_ENROL_DT_INT) #write.xlsx(aes1_DF, file=paste("aes1_DF", ".xlsx", sep=""), sheetName="AEs check", col.names=TRUE, row.names=FALSE, append=TRUE, showNA=FALSE); #unique(aes1_DF$ae_detail); aes2_DF <- aes1_DF |> dplyr::distinct(Subject, ae_category, ae_detail, ae_grade_code_dyn_std) |> dplyr::group_by(Subject, ae_detail) |> dplyr::filter(ae_grade_code_dyn_std == max(ae_grade_code_dyn_std)) |> dplyr::arrange(Subject) total_subj_count <- length(unique(subjects_DF$Subject)); if (!is.null(numSubj)) { total_subj_count <- numSubj[i]; } total_ae_count <- length(aes1_DF$ae_category); #### Table 1; table1_dfa <- aes2_DF |> dplyr::group_by(ae_category, ae_detail) |> dplyr::summarise(ind = n_distinct(Subject)) |> dplyr::mutate(ind_per = format(round((ind/total_subj_count)*100, 2), nsmall=2)) table1_dfb <- aes2_DF |> dplyr::group_by(ae_category, ae_detail) |> dplyr::filter(ae_grade_code_dyn_std %in% c(3:5)) |> dplyr::summarise(indH = n_distinct(Subject)) |> dplyr::mutate(indH_per = format(round((indH/total_subj_count)*100, 2), nsmall=2)) table1_df <- table1_dfa |> dplyr::left_join(table1_dfb, by = c("ae_category", "ae_detail")) |> dplyr::mutate(indH = ifelse(is.na(indH), 0, indH), indH_per = ifelse(is.na(indH_per), "0", indH_per)) colnames(table1_df) <- c("Category", "Adverse event", "# of subjects that have experienced the AE", paste("the % of subjects that this comprises of the total accrual (N=", total_subj_count, ")", sep=""), "# of subjects that experienced the event at a grade 3 to 5", paste("% of the subjects that this comprises of the total accrual (N=", total_subj_count, ")", sep="")); table1_df <- as.data.frame(table1_df); table1_sn <- paste("ae_detail ", protocol, sep=""); table1_sn <- substr(table1_sn, 1, 31); table1_fn <- paste("ae_detail ", protocol, " ", comp, " ", presDate, ".xlsx", sep=""); wb <- createWorkbook(); addWorksheet(wb, sheetName = table1_sn, gridLines = FALSE); writeData(wb, sheet = table1_sn, table1_df, colNames = TRUE, rowNames = FALSE, startCol = 1, startRow = 9); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 1); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 2); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 3); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 4); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 5); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 6); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 7); mergeCells(wb, sheet = table1_sn, cols = 1:6, rows = 8); writeData(wb, sheet = table1_sn, title, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 1); writeData(wb, sheet = table1_sn, comp, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 3); writeData(wb, sheet = table1_sn, paste("PI: ", pi, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 5); writeData(wb, sheet = table1_sn, paste("Report date: ", presDate, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 7); addStyle(wb, sheet = table1_sn, headerStyle1, rows = c(1,3,5,7), cols = 1, gridExpand = TRUE); addStyle(wb, sheet = table1_sn, headerStyle2, rows = 9, cols = 1:6, gridExpand = TRUE); addStyle(wb, sheet = table1_sn, contentStyleL, rows = 10:(length(table1_df[, 1])+9), cols = 1:2, gridExpand = TRUE); addStyle(wb, sheet = table1_sn, contentStyleR, rows = 10:(length(table1_df[, 1])+9), cols = 3:6, gridExpand = TRUE); setRowHeights(wb, 1, rows = 1, heights = 50); setRowHeights(wb, 1, rows = 9, heights = 82); setColWidths(wb, 1, cols = c(1, 2, 3, 4, 5, 6), widths = c(34, 34, 15, 15, 15, 15)); OutsideBorders(wb, sheet_ = 1, rows_ = 9:(length(table1_df[, 1])+9), cols_ = 1:6); if (fileNameUnderscore == TRUE) { table1_fn <- chartr(" ", "_", table1_fn); } saveWorkbook(wb, paste(setwd, table1_fn, sep=""), overwrite = TRUE); #### Table 2; table2_dfa <- aes2_DF |> dplyr::group_by(ae_category) |> dplyr::summarise(ind = n_distinct(Subject)) |> dplyr::mutate(ind_per = format(round((ind/total_subj_count)*100, 2), nsmall=2)) table2_dfb <- aes2_DF |> dplyr::group_by(ae_category) |> dplyr::filter(ae_grade_code_dyn_std %in% c(3:5)) |> dplyr::summarise(indH = n_distinct(Subject)) |> dplyr::mutate(indH_per = format(round((indH/total_subj_count)*100, 2), nsmall=2)) table2_df <- table2_dfa |> dplyr::left_join(table2_dfb, by = c("ae_category")) |> dplyr::mutate(indH = ifelse(is.na(indH), 0, indH), indH_per = ifelse(is.na(indH_per), "0", indH_per)) colnames(table2_df) <- c("Category", "# of subjects that have experienced the AE", paste("the % of subjects that this comprises of the total accrual (N=", total_subj_count, ")", sep=""), "# of subjects that experienced the event at a grade 3 to 5", paste("% of the subjects that this comprises of the total accrual (N=", total_subj_count, ")", sep="")); table2_df <- as.data.frame(table2_df); table2_sn <- paste("category BySubject ", protocol, sep=""); table2_sn <- substr(table2_sn, 1, 31); table2_fn <- paste("category BySubject ", protocol, " ", comp, " ", presDate, ".xlsx", sep=""); wb <- createWorkbook(); addWorksheet(wb, sheetName = table2_sn, gridLines = FALSE); writeData(wb, sheet = table2_sn, table2_df, colNames = TRUE, rowNames = FALSE, startCol = 1, startRow = 9); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 1); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 2); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 3); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 4); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 5); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 6); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 7); mergeCells(wb, sheet = table2_sn, cols = 1:5, rows = 8); writeData(wb, sheet = table2_sn, title, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 1); writeData(wb, sheet = table2_sn, comp, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 3); writeData(wb, sheet = table2_sn, paste("PI: ", pi, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 5); writeData(wb, sheet = table2_sn, paste("Report date: ", presDate, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 7); addStyle(wb, sheet = table2_sn, headerStyle1, rows = c(1,3,5,7), cols = 1, gridExpand = TRUE); addStyle(wb, sheet = table2_sn, headerStyle2, rows = 9, cols = 1:5, gridExpand = TRUE); addStyle(wb, sheet = table2_sn, contentStyleL, rows = 10:(length(table2_df[, 1])+9), cols = 1, gridExpand = TRUE); addStyle(wb, sheet = table2_sn, contentStyleR, rows = 10:(length(table2_df[, 1])+9), cols = 2:5, gridExpand = TRUE); setRowHeights(wb, 1, rows = 1, heights = 50); setRowHeights(wb, 1, rows = 9, heights = 82); setColWidths(wb, 1, cols = c(1, 2, 3, 4, 5), widths = c(34, 15, 15, 15, 15)); OutsideBorders(wb, sheet_ = 1, rows_ = 9:(length(table2_df[, 1])+9), cols_ = 1:5); if (fileNameUnderscore == TRUE) { table2_fn <- chartr(" ", "_", table2_fn); } saveWorkbook(wb, paste(setwd, table2_fn, sep=""), overwrite = TRUE); #### Table 3; table3_dfa <- aes1_DF |> dplyr::group_by(ae_category) |> dplyr::summarise(ind = n()) |> dplyr::mutate(ind_per = format(round((ind/total_ae_count)*100, 2), nsmall=2)) table3_dfb <- aes1_DF |> dplyr::group_by(ae_category) |> dplyr::filter(ae_grade_code_dyn_std %in% c(3:5)) |> dplyr::summarise(indH = n()) |> dplyr::mutate(indH_per = format(round((indH/total_ae_count)*100, 2), nsmall=2)) table3_df <- table3_dfa |> dplyr::left_join(table3_dfb, by = c("ae_category")) |> dplyr::mutate(indH = ifelse(is.na(indH), 0, indH), indH_per = ifelse(is.na(indH_per), "0", indH_per)) colnames(table3_df) <- c("Category", "# of events that have experienced the AE in this category", paste("% of events in relation to the total events (N=", total_ae_count, ")", sep=""), "# of events that were grade 3 to 5", paste("% of events that this comprises of the total accrual (N=", total_ae_count, ")", sep="")); table3_df <- as.data.frame(table3_df); table3_sn <- paste("category ByEvent ", protocol, sep=""); table3_sn <- substr(table3_sn, 1, 31); table3_fn <- paste("category ByEvent ", protocol, " ", comp, " ", presDate, ".xlsx", sep=""); wb <- createWorkbook(); addWorksheet(wb, sheetName = table3_sn, gridLines = FALSE); writeData(wb, sheet = table3_sn, table3_df, colNames = TRUE, rowNames = FALSE, startCol = 1, startRow = 9); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 1); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 2); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 3); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 4); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 5); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 6); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 7); mergeCells(wb, sheet = table3_sn, cols = 1:5, rows = 8); writeData(wb, sheet = table3_sn, title, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 1); writeData(wb, sheet = table3_sn, comp, colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 3); writeData(wb, sheet = table3_sn, paste("PI: ", pi, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 5); writeData(wb, sheet = table3_sn, paste("Report date: ", presDate, sep=""), colNames = FALSE, rowNames = FALSE, startCol = 1, startRow = 7); addStyle(wb, sheet = table3_sn, headerStyle1, rows = c(1,3,5,7), cols = 1, gridExpand = TRUE); addStyle(wb, sheet = table3_sn, headerStyle2, rows = 9, cols = 1:5, gridExpand = TRUE); addStyle(wb, sheet = table3_sn, contentStyleL, rows = 10:(length(table3_df[, 1])+9), cols = 1, gridExpand = TRUE); addStyle(wb, sheet = table3_sn, contentStyleR, rows = 10:(length(table3_df[, 1])+9), cols = 2:5, gridExpand = TRUE); setRowHeights(wb, 1, rows = 1, heights = 50); setRowHeights(wb, 1, rows = 9, heights = 82); setColWidths(wb, 1, cols = c(1, 2, 3, 4, 5), widths = c(34, 15, 15, 15, 15)); OutsideBorders(wb, sheet_ = 1, rows_ = 9:(length(table3_df[, 1])+9), cols_ = 1:5); trimws("dsmb_ccru_tables/category ByEvent EXAMPLE_STUDY Cohort D 30OCT2020.xlsx"); if (fileNameUnderscore == TRUE) { table3_fn <- chartr(" ", "_", table3_fn); } saveWorkbook(wb, paste(setwd, table3_fn, sep=""), overwrite = TRUE); #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#; } }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/dsmb_ccru.R
utils::globalVariables(c("AE_ONSET_DT_INT", "AE_SEV_GD", "AE_VERBATIM_TRM_TXT", "CTCAE5_LLT_NM", "OrigOrder", "Ovar", "PARTIC_ENROL_DT_INT", "PT_ELIG_IND_3", "Subject", "Variable", "ae", "ae_category", "ae_detail", "ae_grade_code_dyn_std", "anova", "as.formula", "binomial", "cl", "desc", "drug1_ae", "gender_code", "group", "ind", "indH", "indH_per", "instance", "join", "label", "median", "na.fail", "stopCluster", "time_max", "time_median", "time_min", "where", "ID", "est", "term", "lower", "upper", "significance", "model1", "reorder"))
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/globals.R
#' Nice table of model output from MCMCglmm::MCMCglmm() #' #' @param mcmcglmm_object returned output from MCMCglmm() #' @param dataset dataframe containing data #' @keywords dataframe #' @returns grouped_df #' @importFrom tibble rownames_to_column #' @importFrom plyr join_all #' @importFrom purrr modify_if #' @importFrom dplyr select mutate arrange group_by #' @importFrom MCMCglmm MCMCglmm #' @export #' @examples #' \dontrun{ #' data(ae) #' #' ae$AE_SEV_GD <- as.numeric(ae$AE_SEV_GD); #' ae$Drug_1_Attribution <- "No"; #' ae$Drug_1_Attribution[ae$CTC_AE_ATTR_SCALE %in% c("Definite", "Probable", "Possible")] <- "Yes"; #' ae$Drug_1_Attribution <- as.factor(ae$Drug_1_Attribution); #' ae$Drug_2_Attribution <- "No"; #' ae$Drug_2_Attribution[ae$CTC_AE_ATTR_SCALE_1 %in% c("Definite", "Probable", "Possible")] <- "Yes"; #' ae$Drug_2_Attribution <- as.factor(ae$Drug_2_Attribution); #' #' prior2RE <- list(R = list(V = diag(1), fix = 1), G=list(G1=list(V=1, nu=0.02), #' G2=list(V=1, nu=0.02))); #' #' model1 <- MCMCglmm::MCMCglmm(Drug_1_Attribution ~ AE_SEV_GD + Drug_2_Attribution, #' random=~ae_detail + Subject, family="categorical", data=ae, saveX=TRUE, #' verbose=FALSE, burnin=2000, nitt=10000, thin=10, pr=TRUE, prior=prior2RE); #' #' mcmcglmm_mva <- nice_mcmcglmm(model1, ae); #' } nice_mcmcglmm <- function(mcmcglmm_object, dataset) { cc <- summary(mcmcglmm_object)$solutions citab <- with(as.data.frame(cc), cbind(OR_HPDI_95 = paste0(trimws(format(round(exp(cc[,1]), 2), nsmall=2)), " (", trimws(format(round(exp(cc[,2]), 2), nsmall=2)), ", ", trimws(format(round(exp(cc[,3]), 2), nsmall=2)), ")", sep=""), MCMCp = trimws(format(round(cc[,5], 3), nsmall=3)), eff_sample = trimws(format(round(cc[,4], 2), nsmall=2)) )) rownames(citab) <- rownames(cc) mcmcglmm_ci <- citab; mcmcglmm_ci <- as.data.frame(mcmcglmm_ci); mcmcglmm_ci <- tibble::rownames_to_column(mcmcglmm_ci, "Variable"); mcmcglmm_ci <- mcmcglmm_ci[-1,]; colnames(mcmcglmm_ci) <- c("Variable", "OR (95% HPDI)", "MCMCp", "eff.samp"); mcmcglmm_ci$join <- mcmcglmm_ci$Variable; ## Have to do tryCatc() in this order for combination of variable factors and numeric; tryCatch({ varLevels <- do.call(rbind, lapply(sapply(dataset[, c(all.vars(mcmcglmm_object$Fixed$formula)[-1])], levels), data.frame)); }, error=function(e){}) tryCatch({ dataset <- dataset |> purrr::modify_if(is.character, as.factor); t1 <- sapply(dataset[, c(all.vars(mcmcglmm_object$Fixed$formula)[-1])], levels); t1 <- as.data.frame(t1); varLevels <- do.call(rbind, lapply(t1, data.frame)) }, error=function(e){}) # tryCatch({ # dataset <- dataset |> purrr::modify_if(is.character, as.factor); # varLevels <- do.call(rbind, lapply(sapply(dataset[, c(all.vars(mcmcglmm_object$Fixed$formula)[-1])], levels), data.frame)); # }, error=function(e){}) tryCatch({ varLevels <- tibble::rownames_to_column(varLevels, "Variable"); colnames(varLevels) <- c("Variable", "Levels"); varLevels$Variable <- gsub("(.*)\\.(.*)", "\\1", varLevels$Variable); varLevels$join <- paste(varLevels$Variable, varLevels$Levels, sep=""); }, error=function(e){return(printErr <- NA)}) if (length(varLevels) == 1) { if (colSums(varLevels) == 0) { varLevels <- as.data.frame(cbind(mcmcglmm_ci$Variable, NA, mcmcglmm_ci$join)) } } if (length(varLevels) == 0) { stop("Try converting indicator variables to factors and run again.") } colnames(varLevels) <- c("Variable", "Levels", "join"); opd_mcmcglmm <- plyr::join_all(list(varLevels, mcmcglmm_ci), by=c("join"), type='full'); opd_mcmcglmm <- opd_mcmcglmm |> purrr::modify_if(is.factor, as.character); origVar <- as.data.frame(all.vars(mcmcglmm_object$Fixed$formula)[-1]); colnames(origVar) <- "Variable"; origVar <- origVar |> dplyr::mutate(OrigOrder = 1:dplyr::n()) opd_mcmcglmm <- plyr::join_all(list(opd_mcmcglmm, origVar), by=c("Variable"), type='full'); tryCatch({ opd_mcmcglmm[which(is.na(opd_mcmcglmm$"OR (95% HPDI)")), ]$"OR (95% HPDI)" <- "reference"; }, error=function(e){return(printErr <- NA)}) opd_mcmcglmm <- opd_mcmcglmm |> dplyr::select(!join) |> dplyr::mutate(Ovar = match(Variable, unique(Variable))) |> dplyr::group_by(Variable) |> dplyr::mutate(instance = 1:dplyr::n()) tryCatch({ opd_mcmcglmm[which(opd_mcmcglmm$"OR (95% HPDI)" == "reference"), ]$instance <- 0; }, error=function(e){return(printErr <- NA)}) opd_mcmcglmm <- opd_mcmcglmm |> dplyr::arrange(Variable, instance) |> dplyr::arrange(OrigOrder,instance) |> dplyr::select(!c(instance, Ovar, OrigOrder)) opd_mcmcglmm$Variable <- gsub("_", " ", opd_mcmcglmm$Variable); opd_mcmcglmm$Variable[duplicated(opd_mcmcglmm$Variable )] <- NA; opd_mcmcglmm$MCMCp[opd_mcmcglmm$MCMCp == "0.000"] <- "<0.001"; if (nrow(opd_mcmcglmm[which(is.na(opd_mcmcglmm$Levels) & opd_mcmcglmm$"OR (95% HPDI)" == "reference"), ]) >= 1) { stop("Try converting indicator variables to factors in dataset and run again.") } return(opd_mcmcglmm) }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/nice_mcmcglmm.R
#' Nice table of intraclass correlation coefficients from MCMCglmm::MCMCglmm() #' model output #' #' @param mcmcglmm_object returned output from MCMCglmm() #' @param prob probability for highest posterior density interval, similar to a #' confidence interval. Default is 0.95 (if provided) #' @param decimals number of decimal places to use in estimates #' @keywords dataframe #' @returns grouped_df #' @importFrom MCMCglmm posterior.mode #' @importFrom coda HPDinterval #' @export #' @examples #' \dontrun{ #' data(ae) #' ae$AE_SEV_GD <- as.numeric(ae$AE_SEV_GD); #' ae$Drug_1_Attribution <- 0; #' ae$Drug_1_Attribution[ae$CTC_AE_ATTR_SCALE %in% c("Definite", "Probable", "Possible")] <- 1; #' ae$Drug_2_Attribution <- 0; #' ae$Drug_2_Attribution[ae$CTC_AE_ATTR_SCALE_1 %in% c("Definite", "Probable", "Possible")] <- 1; #' prior2RE <- list(R = list(V = diag(1), fix = 1), G=list(G1=list(V=1, nu=0.02), #' G2=list(V=1, nu=0.02))); #' model1 <- MCMCglmm::MCMCglmm(Drug_1_Attribution ~ AE_SEV_GD + Drug_2_Attribution, #' random=~ae_detail + Subject, family="categorical", data=ae, saveX=TRUE, #' verbose=FALSE, burnin=2000, nitt=10000, thin=10, pr=TRUE, prior=prior2RE); #' mcmcglmm_icc <- nice_mcmcglmm_icc(model1); #' } nice_mcmcglmm_icc <- function(mcmcglmm_object, prob=NULL, decimals=NULL) { if (is.null(prob)) { prob <- 0.95; } if (is.null(decimals)) { decimals <- 4; } re <- colnames(model1$VCV); ICC <- data.frame(); for (i in 1:length(re)) { tmp <- mcmcglmm_object$VCV[, i]/(rowSums(mcmcglmm_object$VCV)); df_tmp <- cbind(ICC = MCMCglmm::posterior.mode(tmp), CI = coda::HPDinterval(tmp, prob=prob)); ICC <- rbind(ICC, df_tmp); ICC <- round(ICC, 4); } row.names(ICC) <- re; return(ICC) }
/scratch/gouwar.j/cran-all/cranData/BiostatsUHNplus/R/nice_mcmcglmm_icc.R